forked from TrueCloudLab/distribution
Compare commits
1 commit
main
...
docker/1.1
Author | SHA1 | Date | |
---|---|---|---|
|
0f2d99b13a |
3644 changed files with 1160629 additions and 825371 deletions
|
@ -1 +0,0 @@
|
|||
bin/
|
38
.drone.yml
Normal file
38
.drone.yml
Normal file
|
@ -0,0 +1,38 @@
|
|||
image: dmp42/go:stable
|
||||
|
||||
script:
|
||||
# To be spoofed back into the test image
|
||||
- go get github.com/modocache/gover
|
||||
|
||||
- go get -t ./...
|
||||
|
||||
# Go fmt
|
||||
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||
# Go lint
|
||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||
# Go vet
|
||||
- go vet ./...
|
||||
# Go test
|
||||
- go test -v -race -cover ./...
|
||||
# Helper to concatenate reports
|
||||
- gover
|
||||
# Send to coverall
|
||||
- goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}}
|
||||
|
||||
# Do we want these as well?
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
||||
# http://labix.org/gocheck
|
||||
|
||||
notify:
|
||||
email:
|
||||
recipients:
|
||||
- distribution@docker.com
|
||||
|
||||
slack:
|
||||
team: docker
|
||||
channel: "#dt"
|
||||
username: mom
|
||||
token: {{SLACK_TOKEN}}
|
||||
on_success: true
|
||||
on_failure: true
|
134
.github/workflows/build.yml
vendored
134
.github/workflows/build.yml
vendored
|
@ -1,134 +0,0 @@
|
|||
name: build
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
DOCKERHUB_SLUG: distribution/distribution
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go:
|
||||
- 1.18
|
||||
- 1.19.10
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make coverage
|
||||
-
|
||||
name: Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
directory: ./
|
||||
|
||||
build:
|
||||
permissions:
|
||||
contents: write # to create GitHub release (softprops/action-gh-release)
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
${{ env.DOCKERHUB_SLUG }}
|
||||
### versioning strategy
|
||||
### push semver tag v3.2.1 on main (default branch)
|
||||
# distribution/distribution:3.2.1
|
||||
# distribution/distribution:3.2
|
||||
# distribution/distribution:3
|
||||
# distribution/distribution:latest
|
||||
### push semver prelease tag v3.0.0-beta.1 on main (default branch)
|
||||
# distribution/distribution:3.0.0-beta.1
|
||||
### push on main
|
||||
# distribution/distribution:edge
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=ref,event=pr
|
||||
type=edge
|
||||
labels: |
|
||||
org.opencontainers.image.title=Distribution
|
||||
org.opencontainers.image.description=The toolkit to pack, ship, store, and deliver container content
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build artifacts
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: artifact-all
|
||||
-
|
||||
name: Move artifacts
|
||||
run: |
|
||||
mv ./bin/**/* ./bin/
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: registry
|
||||
path: ./bin/*
|
||||
if-no-files-found: error
|
||||
-
|
||||
name: Build image
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
${{ steps.meta.outputs.bake-file }}
|
||||
targets: image-all
|
||||
push: ${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }}
|
||||
-
|
||||
name: GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
draft: true
|
||||
files: |
|
||||
bin/*.tar.gz
|
||||
bin/*.sha256
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
55
.github/workflows/codeql-analysis.yml
vendored
55
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,55 +0,0 @@
|
|||
name: CodeQL
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 12 * * 6'
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
security-events: write # to upload SARIF results (github/codeql-action/analyze)
|
||||
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language:
|
||||
- go
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
-
|
||||
name: Checkout HEAD on PR
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
git checkout HEAD^2
|
||||
-
|
||||
name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
-
|
||||
name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
-
|
||||
name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
56
.github/workflows/conformance.yml
vendored
56
.github/workflows/conformance.yml
vendored
|
@ -1,56 +0,0 @@
|
|||
name: conformance
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
run-conformance-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Build image
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: image-local
|
||||
-
|
||||
name: Start distribution server
|
||||
run: |
|
||||
IP=`hostname -I | awk '{print $1}'`
|
||||
echo "IP=$IP" >> $GITHUB_ENV
|
||||
echo "OCI_ROOT_URL=http://$IP:5000" >> $GITHUB_ENV
|
||||
DISTRIBUTION_REF="registry:local"
|
||||
docker run --rm -p 5000:5000 -e REGISTRY_STORAGE_DELETE_ENABLED=true -idt "registry:local"
|
||||
-
|
||||
name: Run OCI Distribution Spec conformance tests
|
||||
uses: opencontainers/distribution-spec@v1.0.1
|
||||
env:
|
||||
OCI_ROOT_URL: ${{ env.OCI_ROOT_URL }}
|
||||
OCI_NAMESPACE: oci-conformance/distribution-test
|
||||
OCI_TEST_PULL: 1
|
||||
OCI_TEST_PUSH: 1
|
||||
OCI_TEST_CONTENT_DISCOVERY: 1
|
||||
OCI_TEST_CONTENT_MANAGEMENT: 1
|
||||
OCI_HIDE_SKIPPED_WORKFLOWS: 1
|
||||
-
|
||||
name: Move test results
|
||||
run: mkdir -p .out/ && mv {report.html,junit.xml} .out/
|
||||
-
|
||||
name: Upload test results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: oci-test-results-${{ github.sha }}
|
||||
path: .out/
|
||||
if-no-files-found: error
|
42
.github/workflows/e2e.yml
vendored
42
.github/workflows/e2e.yml
vendored
|
@ -1,42 +0,0 @@
|
|||
name: e2e
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
run-e2e-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Build image
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: image-local
|
||||
-
|
||||
name: Start distribution server
|
||||
run: |
|
||||
IP=`hostname -I | awk '{print $1}'`
|
||||
echo "IP=$IP" >> $GITHUB_ENV
|
||||
echo '{"insecure-registries" : ["'$IP':5000"]}' | sudo tee /etc/docker/daemon.json
|
||||
sudo service docker restart
|
||||
docker run --rm -p 5000:5000 -p 5001:5001 -idt "registry:local"
|
||||
-
|
||||
name: Tests
|
||||
run: |
|
||||
bash ./tests/push.sh $IP
|
25
.github/workflows/fossa.yml
vendored
25
.github/workflows/fossa.yml
vendored
|
@ -1,25 +0,0 @@
|
|||
name: FOSSA License Scanning
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
- pull_request
|
||||
- push
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
scan-license:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossa-contrib/fossa-action@v2
|
||||
with:
|
||||
fossa-api-key: cac3dc8d4f2ba86142f6c0f2199a160f
|
60
.github/workflows/scorecards.yml
vendored
60
.github/workflows/scorecards.yml
vendored
|
@ -1,60 +0,0 @@
|
|||
name: Scorecards supply-chain security
|
||||
on:
|
||||
# Only the default branch is supported.
|
||||
branch_protection_rule:
|
||||
schedule:
|
||||
- cron: '26 0 * * 0'
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecards analysis
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Used to receive a badge.
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # tag=v3.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # tag=v2.0.6
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) Read-only PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecards on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
|
||||
# repo_token: ${{ secrets.SCORECARD_READ_TOKEN }}
|
||||
|
||||
# Publish the results for public repositories to enable scorecard badges. For more details, see
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 # tag=v3.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # tag=v1.0.26
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
38
.github/workflows/validate.yml
vendored
38
.github/workflows/validate.yml
vendored
|
@ -1,38 +0,0 @@
|
|||
name: validate
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- lint
|
||||
- validate-vendor
|
||||
- validate-git
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Run
|
||||
run: |
|
||||
make ${{ matrix.target }}
|
||||
env:
|
||||
COMMIT_RANGE: ${{ format('{0}..{1}', github.sha, 'HEAD') }}
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -35,4 +35,3 @@ bin/*
|
|||
# Editor/IDE specific files.
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
.idea/*
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
linters:
|
||||
enable:
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- vet
|
||||
- unused
|
||||
- misspell
|
||||
- bodyclose
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
# TODO(thaJeztah): temporarily disabled the "unused-parameter" check.
|
||||
# It produces many warnings, and some of those may need to be looked at.
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
|
||||
run:
|
||||
deadline: 2m
|
||||
skip-dirs:
|
||||
- vendor
|
30
.mailmap
30
.mailmap
|
@ -1,9 +1,8 @@
|
|||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
|
||||
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
|
||||
|
@ -12,21 +11,4 @@ Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
|
|||
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
|
||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
||||
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
|
||||
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
|
||||
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
|
||||
Yu Wang <yuwa@microsoft.com> yuwaMSFT2 <yuwa@microsoft.com>
|
||||
Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com>
|
||||
Olivier Gambier <olivier@docker.com> dmp <dmp@loaner.local>
|
||||
Olivier Gambier <olivier@docker.com> Olivier <o+github@gambier.email>
|
||||
Olivier Gambier <olivier@docker.com> Olivier <dmp42@users.noreply.github.com>
|
||||
Elsan Li 李楠 <elsanli@tencent.com> elsanli(李楠) <elsanli@tencent.com>
|
||||
Rui Cao <ruicao@alauda.io> ruicao <ruicao@alauda.io>
|
||||
Gwendolynne Barr <gwendolynne.barr@docker.com> gbarr01 <gwendolynne.barr@docker.com>
|
||||
Haibing Zhou 周海兵 <zhouhaibing089@gmail.com> zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||
Feng Honglin <tifayuki@gmail.com> tifayuki <tifayuki@gmail.com>
|
||||
Helen Xie <xieyulin821@harmonycloud.cn> Helen-xie <xieyulin821@harmonycloud.cn>
|
||||
Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com>
|
||||
Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com>
|
||||
Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>
|
||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
11
ADOPTERS.md
11
ADOPTERS.md
|
@ -1,11 +0,0 @@
|
|||
Docker Hub https://hub.docker.com/
|
||||
|
||||
GitLab Container Registry https://docs.gitlab.com/ee/user/packages/container_registry/
|
||||
|
||||
GitHub Container Registry https://docs.github.com/en/free-pro-team@latest/packages/guides/about-github-container-registry
|
||||
|
||||
Harbor, CNCF Graduated project https://goharbor.io/
|
||||
|
||||
VMware Harbor Registry https://docs.pivotal.io/partners/vmware-harbor/index.html
|
||||
|
||||
DigitalOcean Container Registry https://www.digitalocean.com/products/container-registry/
|
104
AUTHORS
Normal file
104
AUTHORS
Normal file
|
@ -0,0 +1,104 @@
|
|||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Aaron Vinson <avinson.public@gmail.com>
|
||||
Adam Enger <adamenger@gmail.com>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||
Alex Chan <alex.chan@metaswitch.com>
|
||||
Alex Elman <aelman@indeed.com>
|
||||
amitshukla <ashukla73@hotmail.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Andrew Meredith <andymeredith@gmail.com>
|
||||
Andrey Kostov <kostov.andrey@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Mercado <amercado@thinknode.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Baars <arthur@semmle.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Ayose Cazorla <ayosec@gmail.com>
|
||||
BadZen <dave.trombley@gmail.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
bin liu <liubin0329@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com>
|
||||
burnettk <burnettk@gmail.com>
|
||||
Chris Dillon <squarism@gmail.com>
|
||||
Daisuke Fujita <dtanshi45@gmail.com>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Trombley <dave.trombley@gmail.com>
|
||||
Dave Tucker <dt@docker.com>
|
||||
David Lawrence <david.lawrence@docker.com>
|
||||
David Verhasselt <david@crowdway.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
davidli <wenquan.li@hp.com>
|
||||
Dejan Golja <dejan@golja.org>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Diogo Mónica <diogo.monica@gmail.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
farmerworking <farmerworking@gmail.com>
|
||||
Florentin Raud <florentin.raud@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
harche <p.harshal@gmail.com>
|
||||
Henri Gomez <henri.gomez@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hua Wang <wanghua.humble@gmail.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
Jack Griffin <jackpg14@gmail.com>
|
||||
Jason Freidman <jason.freidman@gmail.com>
|
||||
Jeff Nickoloff <jeff@allingeek.com>
|
||||
Jessie Frazelle <jessie@docker.com>
|
||||
Jianqing Wang <tsing@jianqing.org>
|
||||
Jon Poler <jonathan.poler@apcera.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jordan Liggitt <jliggitt@redhat.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Julien Fernandez <julien.fernandez@gmail.com>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kenneth Lim <kennethlimcp@gmail.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Louis Kottmann <louis.kottmann@gmail.com>
|
||||
Luke Carpenter <x@rubynerd.net>
|
||||
Mary Anthony <mary@docker.com>
|
||||
Matt Bentley <mbentley@mbentley.net>
|
||||
Matt Moore <mattmoor@google.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Michael Prokop <mika@grml.org>
|
||||
Miquel Sabaté <msabate@suse.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
moxiegirl <mary@docker.com>
|
||||
Nathan Sullivan <nathan@nightsys.net>
|
||||
nevermosby <robolwq@qq.com>
|
||||
Nghia Tran <tcnghia@gmail.com>
|
||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||
Oilbeater <liumengxinfly@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com>
|
||||
Olivier Jacques <olivier.jacques@hp.com>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Philip Misiowiec <philip@atlashealth.com>
|
||||
Richard Scothern <richard.scothern@docker.com>
|
||||
Rusty Conover <rusty@luckydinosaur.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||
Simon Thulbourn <simon+github@thulbourn.com>
|
||||
Spencer Rinehart <anubis@overthemonkey.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Sungho Moon <sungho.moon@navercorp.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
Ted Reed <ted.reed@gmail.com>
|
||||
tgic <farmer1992@gmail.com>
|
||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Troels Thomsen <troels@thomsen.io>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
W. Trevor King <wking@tremily.us>
|
||||
xg.song <xg.song@venusource.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||
yuzou <zouyu7@huawei.com>
|
107
BUILDING.md
107
BUILDING.md
|
@ -1,107 +0,0 @@
|
|||
|
||||
# Building the registry source
|
||||
|
||||
## Use-case
|
||||
|
||||
This is useful if you intend to actively work on the registry.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
|
||||
|
||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
||||
|
||||
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
|
||||
|
||||
### Gotchas
|
||||
|
||||
You are expected to know your way around with go & git.
|
||||
|
||||
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
|
||||
|
||||
## Configure the development environment
|
||||
|
||||
The first prerequisite of properly building distribution targets is to have a Go
|
||||
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
|
||||
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
|
||||
environment.
|
||||
|
||||
Next, fetch the code from the repository using git:
|
||||
|
||||
git clone https://github.com/distribution/distribution
|
||||
cd distribution
|
||||
|
||||
If you are planning to create a pull request with changes, you may want to clone directly from your [fork](https://help.github.com/en/articles/about-forks).
|
||||
|
||||
## Build and run from source
|
||||
|
||||
First, build the binaries:
|
||||
|
||||
$ make
|
||||
+ bin/registry
|
||||
+ bin/digest
|
||||
+ bin/registry-api-descriptor-template
|
||||
+ binaries
|
||||
|
||||
Now create the directory for the registry data (this might require you to set permissions properly)
|
||||
|
||||
mkdir -p /var/lib/registry
|
||||
|
||||
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
|
||||
|
||||
The `registry`
|
||||
binary can then be run with the following:
|
||||
|
||||
$ ./bin/registry --version
|
||||
./bin/registry github.com/distribution/distribution/v3 v2.7.0-1993-g8857a194
|
||||
|
||||
The registry can be run with a development config using the following
|
||||
incantation:
|
||||
|
||||
$ ./bin/registry serve cmd/registry/config-dev.yml
|
||||
INFO[0000] debug server listening :5001
|
||||
WARN[0000] No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable. environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] endpoint local-5003 disabled, skipping environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] endpoint local-8083 disabled, skipping environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] using inmemory blob descriptor cache environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] providing prometheus metrics on /metrics
|
||||
INFO[0000] listening on [::]:5000 environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
|
||||
If it is working, one should see the above log messages.
|
||||
|
||||
### Build reference
|
||||
|
||||
The regular `go` commands, such as `go test`, should work per package.
|
||||
|
||||
A `Makefile` has been provided as a convenience to support repeatable builds.
|
||||
|
||||
Run `make` to build the binaries:
|
||||
|
||||
$ make
|
||||
+ bin/registry
|
||||
+ bin/digest
|
||||
+ bin/registry-api-descriptor-template
|
||||
+ binaries
|
||||
|
||||
The above provides a repeatable build using the contents of the vendor
|
||||
directory. We can verify this worked by running
|
||||
the registry binary generated in the "./bin" directory:
|
||||
|
||||
$ ./bin/registry --version
|
||||
./bin/registry github.com/distribution/distribution v2.0.0-alpha.2-80-g16d8b2c.m
|
||||
|
||||
Run `make test` to run all of the tests.
|
||||
|
||||
Run `make validate` to run the validators, including the linter and vendor validation. You must have docker with the buildx plugin installed to run the validators.
|
||||
|
||||
### Optional build tags
|
||||
|
||||
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
|
||||
the environment variable `BUILDTAGS`.
|
||||
|
||||
<dl>
|
||||
<dt>noresumabledigest</dt>
|
||||
<dd>Compiles without resumable digest support</dd>
|
||||
<dt>include_gcs</dt>
|
||||
<dd>Adds support for <a href="https://cloud.google.com/storage">Google Cloud Storage</a></dd>
|
||||
</dl>
|
|
@ -1,5 +0,0 @@
|
|||
# Code of Conduct
|
||||
|
||||
We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.
|
149
CONTRIBUTING.md
149
CONTRIBUTING.md
|
@ -1,15 +1,14 @@
|
|||
# Contributing to the registry
|
||||
|
||||
## Before reporting an issue...
|
||||
## Before reporting an issue...
|
||||
|
||||
### If your problem is with...
|
||||
|
||||
- automated builds or your [Docker Hub](https://hub.docker.com/) account
|
||||
- Report it to [Hub Support](https://hub.docker.com/support/)
|
||||
- Distributions of Docker for desktop or Linux
|
||||
- Report [Mac Desktop issues](https://github.com/docker/for-mac)
|
||||
- Report [Windows Desktop issues](https://github.com/docker/for-win)
|
||||
- Report [Linux issues](https://github.com/docker/for-linux)
|
||||
- automated builds
|
||||
- your account on the [Docker Hub](https://hub.docker.com/)
|
||||
- any other [Docker Hub](https://hub.docker.com/) issue
|
||||
|
||||
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
|
||||
|
||||
### If you...
|
||||
|
||||
|
@ -17,16 +16,10 @@
|
|||
- can't figure out something
|
||||
- are not sure what's going on or what your problem is
|
||||
|
||||
Please ask first in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
|
||||
[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
|
||||
Then please do not open an issue here yet - you should first try one of the following support forums:
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
The maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
|
||||
- irc: #docker-distribution on freenode
|
||||
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
|
@ -34,7 +27,7 @@ By following these simple rules you will get better and faster feedback on your
|
|||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
|
@ -47,10 +40,10 @@ By following these simple rules you will get better and faster feedback on your
|
|||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of (or similar for other container tools):
|
||||
2. copy the output of:
|
||||
- `docker version`
|
||||
- `docker info`
|
||||
- `docker exec <registry-container> registry --version`
|
||||
- `docker exec <registry-container> registry -version`
|
||||
3. copy the command line you used to launch your Registry
|
||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
||||
5. reproduce your problem and get your docker daemon logs showing the error
|
||||
|
@ -58,72 +51,90 @@ By following these simple rules you will get better and faster feedback on your
|
|||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Contributions should be made via pull requests. Pull requests will be reviewed
|
||||
by one or more maintainers or reviewers and merged when acceptable.
|
||||
## Contributing a patch for a known bug, or a small correction
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. Use your own [fork](https://help.github.com/en/articles/about-forks)
|
||||
2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
3. Test your code
|
||||
4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
|
||||
5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
|
||||
1. fork
|
||||
2. commit a change
|
||||
3. make sure the tests pass
|
||||
4. PR
|
||||
|
||||
Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
for tips on creating a successful contribution.
|
||||
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
|
||||
|
||||
## Sign your work
|
||||
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
|
||||
- sign your commits using `-s`: `git commit -s -m "My commit"`
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
Some simple rules to ensure quick merge:
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
|
||||
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
|
||||
- if you need to amend your PR following comments, please squash instead of adding more commits
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
## Contributing new features
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
|
||||
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
It's mandatory to:
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
|
||||
- address maintainers' comments and modify your submission accordingly
|
||||
- write tests for any new code
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443)
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
## Coding Style
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
Unless explicitly stated, we follow all coding guidelines from the Go
|
||||
community. While some of these standards may seem arbitrary, they somehow seem
|
||||
to result in a solid, consistent codebase.
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
It is possible that the code base does not currently comply with these
|
||||
guidelines. We are not looking for a massive PR that fixes this, since that
|
||||
goes against the spirit of the guidelines. All new contributions should make a
|
||||
best effort to clean up and make the code base better than they left it.
|
||||
Obviously, apply your best judgement. Remember, the goal here is to make the
|
||||
code base easier for humans to navigate and understand. Always keep that in
|
||||
mind when nudging others to comply.
|
||||
|
||||
The rules:
|
||||
|
||||
1. All code should be formatted with `gofmt -s`.
|
||||
2. All code should pass the default levels of
|
||||
[`golint`](https://github.com/golang/lint).
|
||||
3. All code should follow the guidelines covered in [Effective
|
||||
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
|
||||
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
4. Comment the code. Tell us the why, the history and the context.
|
||||
5. Document _all_ declarations and methods, even private ones. Declare
|
||||
expectations, caveats and anything else that may be important. If a type
|
||||
gets exported, having the comments already there will ensure it's ready.
|
||||
6. Variable name length should be proportional to its context and no longer.
|
||||
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
|
||||
In practice, short methods will have short variable names and globals will
|
||||
have longer names.
|
||||
7. No underscores in package names. If you need a compound name, step back,
|
||||
and re-examine why you need a compound name. If you still think you need a
|
||||
compound name, lose the underscore.
|
||||
8. No utils or helpers packages. If a function is not general enough to
|
||||
warrant its own package, it has not been written generally enough to be a
|
||||
part of a util package. Just leave it unexported and well-documented.
|
||||
9. All tests should run with `go test` and outside tooling should not be
|
||||
required. No, we don't need another unit testing framework. Assertion
|
||||
packages are acceptable if they provide _real_ incremental value.
|
||||
10. Even though we call these "rules" above, they are actually just
|
||||
guidelines. Since you've read all the rules, you now know that.
|
||||
|
||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
|
||||
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
|
||||
kool-aid is a lot easier than going thirsty.
|
||||
|
|
65
Dockerfile
65
Dockerfile
|
@ -1,60 +1,19 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM golang:1.5.3
|
||||
|
||||
ARG GO_VERSION=1.19.10
|
||||
ARG ALPINE_VERSION=3.18
|
||||
ARG XX_VERSION=1.2.1
|
||||
RUN apt-get update && \
|
||||
apt-get install -y librados-dev apache2-utils && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
COPY --from=xx / /
|
||||
RUN apk add --no-cache bash coreutils file git
|
||||
ENV GO111MODULE=auto
|
||||
ENV CGO_ENABLED=0
|
||||
WORKDIR /src
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH
|
||||
ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs
|
||||
|
||||
FROM base AS version
|
||||
ARG PKG=github.com/distribution/distribution/v3
|
||||
RUN --mount=target=. \
|
||||
VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
|
||||
echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
|
||||
echo -n "${VERSION}" | tee /tmp/.version;
|
||||
|
||||
FROM base AS build
|
||||
ARG TARGETPLATFORM
|
||||
ARG LDFLAGS="-s -w"
|
||||
ARG BUILDTAGS="include_gcs"
|
||||
RUN --mount=type=bind,target=/src,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \
|
||||
set -x ; xx-go build -tags "${BUILDTAGS}" -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \
|
||||
&& xx-verify --static /usr/bin/registry
|
||||
|
||||
FROM scratch AS binary
|
||||
COPY --from=build /usr/bin/registry /
|
||||
|
||||
FROM base AS releaser
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
WORKDIR /work
|
||||
RUN --mount=from=binary,target=/build \
|
||||
--mount=type=bind,target=/src \
|
||||
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \
|
||||
VERSION=$(cat /tmp/.version) \
|
||||
&& mkdir -p /out \
|
||||
&& cp /build/registry /src/README.md /src/LICENSE . \
|
||||
&& tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \
|
||||
&& sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256"
|
||||
|
||||
FROM scratch AS artifact
|
||||
COPY --from=releaser /out /
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
RUN apk add --no-cache ca-certificates
|
||||
WORKDIR $DISTRIBUTION_DIR
|
||||
COPY . $DISTRIBUTION_DIR
|
||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||
COPY --from=binary /registry /bin/registry
|
||||
RUN make PREFIX=/go clean binaries
|
||||
|
||||
VOLUME ["/var/lib/registry"]
|
||||
EXPOSE 5000
|
||||
ENTRYPOINT ["registry"]
|
||||
CMD ["serve", "/etc/docker/registry/config.yml"]
|
||||
CMD ["/etc/docker/registry/config.yml"]
|
||||
|
|
144
GOVERNANCE.md
144
GOVERNANCE.md
|
@ -1,144 +0,0 @@
|
|||
# distribution/distribution Project Governance
|
||||
|
||||
Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
|
||||
|
||||
For specific guidance on practical contribution steps please
|
||||
see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
|
||||
|
||||
## Maintainership
|
||||
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
|
||||
## Reviewers
|
||||
|
||||
A reviewer is a core role within the project.
|
||||
They share in reviewing issues and pull requests and their LGTM counts towards the
|
||||
required LGTM count to merge a code change into the project.
|
||||
|
||||
Reviewers are part of the organization but do not have write access.
|
||||
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
|
||||
|
||||
## Adding maintainers
|
||||
|
||||
Maintainers are first and foremost contributors that have shown they are
|
||||
committed to the long term success of a project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code, pull
|
||||
request review, and triage of issues in the project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors that have
|
||||
shown regular activity on the project over the prior months. From this list,
|
||||
maintainer candidates are selected and proposed in a pull request or a
|
||||
maintainers communication channel.
|
||||
|
||||
After a candidate has been announced to the maintainers, the existing
|
||||
maintainers are given five business days to discuss the candidate, raise
|
||||
objections and cast their vote. Votes may take place on the communication
|
||||
channel or via pull request comment. Candidates must be approved by at least 66%
|
||||
of the current maintainers by adding their vote on the mailing list. The
|
||||
reviewer role has the same process but only requires 33% of current maintainers.
|
||||
Only maintainers of the repository that the candidate is proposed for are
|
||||
allowed to vote.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The voting process may take place inside a pull request if a
|
||||
maintainer has already discussed the candidacy with the candidate and a
|
||||
maintainer is willing to be a sponsor by opening the pull request. The candidate
|
||||
becomes a maintainer once the pull request is merged.
|
||||
|
||||
## Stepping down policy
|
||||
|
||||
Life priorities, interests, and passions can change. If you're a maintainer but
|
||||
feel you must remove yourself from the list, inform other maintainers that you
|
||||
intend to step down, and if possible, help find someone to pick up your work.
|
||||
At the very least, ensure your work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to remove
|
||||
yourself from the MAINTAINERS file.
|
||||
|
||||
## Removal of inactive maintainers
|
||||
|
||||
Similar to the procedure for adding new maintainers, existing maintainers can
|
||||
be removed from the list if they do not show significant activity on the
|
||||
project. Periodically, the maintainers review the list of maintainers and their
|
||||
activity over the last three months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a neutral
|
||||
person will contact the maintainer to ask if they want to continue being
|
||||
a maintainer. If the maintainer decides to step down as a maintainer, they
|
||||
open a pull request to be removed from the MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to perform the
|
||||
required duties they can be removed with a vote of at least 66% of the current
|
||||
maintainers. In this case, maintainers should first propose the change to
|
||||
maintainers via the maintainers communication channel, then open a pull request
|
||||
for voting. The voting period is five business days. The voting pull request
|
||||
should not come as a surpise to any maintainer and any discussion related to
|
||||
performance must not be discussed on the pull request.
|
||||
|
||||
## How are decisions made?
|
||||
|
||||
Docker distribution is an open-source project with an open design philosophy.
|
||||
This means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, road map, and APIs. *If it's part of
|
||||
the project, it's in the repo. If it's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An
|
||||
implementation change is a change to the source code. An API change is a change
|
||||
to the API specification. A philosophy change is a change to the philosophy
|
||||
manifesto, and so on.
|
||||
|
||||
All decisions affecting distribution, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects.
|
||||
|
||||
## Helping contributors with the DCO
|
||||
|
||||
The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some contributors are not as familiar with `git`, or have used a web
|
||||
based editor, and thus asking them to `git commit --amend -s` is not the best
|
||||
way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO.
|
||||
The most trivial way for a contributor to allow the maintainer to do this, is to
|
||||
add a DCO signature in a pull requests's comment, or a maintainer can simply
|
||||
note that the change is sufficiently trivial that it does not substantially
|
||||
change the existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
|
||||
## I'm a maintainer. Should I make pull requests too?
|
||||
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
If you have a technical dispute that you feel has reached an impasse with a
|
||||
subset of the community, any contributor may open an issue, specifically
|
||||
calling for a resolution vote of the current core maintainers to resolve the
|
||||
dispute. The same voting quorums required (2/3) for adding and removing
|
||||
maintainers will apply to conflict resolution.
|
181
Godeps/Godeps.json
generated
Normal file
181
Godeps/Godeps.json
generated
Normal file
|
@ -0,0 +1,181 @@
|
|||
{
|
||||
"ImportPath": "github.com/docker/distribution",
|
||||
"GoVersion": "go1.4.2",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2",
|
||||
"Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/google",
|
||||
"Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/storage/v1",
|
||||
"Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud",
|
||||
"Rev": "2400193c85c3561d13880d34e0e10c4315bb02af"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api",
|
||||
"Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "91c8b79535eb6045d70ec671d302213f88a3ab95"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bradfitz/http2",
|
||||
"Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf",
|
||||
"Rev": "0f7a9caded1fb3c9cc5a9b4bcf2ff633cc8ae644"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/storage",
|
||||
"Rev": "2400193c85c3561d13880d34e0e10c4315bb02af"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/AdRoll/goamz/aws",
|
||||
"Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/AdRoll/goamz/cloudfront",
|
||||
"Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/AdRoll/goamz/s3",
|
||||
"Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
|
||||
"Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bugsnag/bugsnag-go",
|
||||
"Comment": "v1.0.2-5-gb1d1530",
|
||||
"Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bugsnag/osext",
|
||||
"Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bugsnag/panicwrap",
|
||||
"Rev": "e2c28503fcd0675329da73bf48b33404db873782"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/denverdino/aliyungo/oss",
|
||||
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/denverdino/aliyungo/util",
|
||||
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/denverdino/aliyungo/common",
|
||||
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/libtrust",
|
||||
"Rev": "fa567046d9b14f6aa788882a950d69651d230b21"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/garyburd/redigo/internal",
|
||||
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/garyburd/redigo/redis",
|
||||
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/context",
|
||||
"Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/handlers",
|
||||
"Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/mux",
|
||||
"Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ncw/swift",
|
||||
"Rev": "c54732e87b0b283d1baf0a18db689d0aea460ba3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/noahdesu/go-ceph/rados",
|
||||
"Comment": "v.0.3.0-29-gb15639c",
|
||||
"Rev": "b15639c44c05368348355229070361395d9152ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "312092086bed4968099259622145a0c9ae280064"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "5644820622454e71517561946e3d94b9f9db6842"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stevvooe/resumable",
|
||||
"Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/yvasiyarov/go-metrics",
|
||||
"Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/yvasiyarov/gorelic",
|
||||
"Comment": "v0.0.6-8-ga9bba5b",
|
||||
"Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/yvasiyarov/newrelic_platform_go",
|
||||
"Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/trace",
|
||||
"Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
|
@ -0,0 +1,5 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
/pkg
|
||||
/bin
|
74
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt.go
generated
vendored
Normal file
74
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// AttemptStrategy represents a strategy for waiting for an action
|
||||
// to complete successfully. This is an internal type used by the
|
||||
// implementation of other goamz packages.
|
||||
type AttemptStrategy struct {
|
||||
Total time.Duration // total duration of attempt.
|
||||
Delay time.Duration // interval between each try in the burst.
|
||||
Min int // minimum number of retries; overrides Total
|
||||
}
|
||||
|
||||
type Attempt struct {
|
||||
strategy AttemptStrategy
|
||||
last time.Time
|
||||
end time.Time
|
||||
force bool
|
||||
count int
|
||||
}
|
||||
|
||||
// Start begins a new sequence of attempts for the given strategy.
|
||||
func (s AttemptStrategy) Start() *Attempt {
|
||||
now := time.Now()
|
||||
return &Attempt{
|
||||
strategy: s,
|
||||
last: now,
|
||||
end: now.Add(s.Total),
|
||||
force: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Next waits until it is time to perform the next attempt or returns
|
||||
// false if it is time to stop trying.
|
||||
func (a *Attempt) Next() bool {
|
||||
now := time.Now()
|
||||
sleep := a.nextSleep(now)
|
||||
if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
|
||||
return false
|
||||
}
|
||||
a.force = false
|
||||
if sleep > 0 && a.count > 0 {
|
||||
time.Sleep(sleep)
|
||||
now = time.Now()
|
||||
}
|
||||
a.count++
|
||||
a.last = now
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *Attempt) nextSleep(now time.Time) time.Duration {
|
||||
sleep := a.strategy.Delay - now.Sub(a.last)
|
||||
if sleep < 0 {
|
||||
return 0
|
||||
}
|
||||
return sleep
|
||||
}
|
||||
|
||||
// HasNext returns whether another attempt will be made if the current
|
||||
// one fails. If it returns true, the following call to Next is
|
||||
// guaranteed to return true.
|
||||
func (a *Attempt) HasNext() bool {
|
||||
if a.force || a.strategy.Min > a.count {
|
||||
return true
|
||||
}
|
||||
now := time.Now()
|
||||
if now.Add(a.nextSleep(now)).Before(a.end) {
|
||||
a.force = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
57
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt_test.go
generated
vendored
Normal file
57
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt_test.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package aws_test
|
||||
|
||||
import (
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"gopkg.in/check.v1"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (S) TestAttemptTiming(c *check.C) {
|
||||
testAttempt := aws.AttemptStrategy{
|
||||
Total: 0.25e9,
|
||||
Delay: 0.1e9,
|
||||
}
|
||||
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
|
||||
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
|
||||
t0 := time.Now()
|
||||
for a := testAttempt.Start(); a.Next(); {
|
||||
got = append(got, time.Now().Sub(t0))
|
||||
}
|
||||
got = append(got, time.Now().Sub(t0))
|
||||
c.Assert(got, check.HasLen, len(want))
|
||||
const margin = 0.01e9
|
||||
for i, got := range want {
|
||||
lo := want[i] - margin
|
||||
hi := want[i] + margin
|
||||
if got < lo || got > hi {
|
||||
c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (S) TestAttemptNextHasNext(c *check.C) {
|
||||
a := aws.AttemptStrategy{}.Start()
|
||||
c.Assert(a.Next(), check.Equals, true)
|
||||
c.Assert(a.Next(), check.Equals, false)
|
||||
|
||||
a = aws.AttemptStrategy{}.Start()
|
||||
c.Assert(a.Next(), check.Equals, true)
|
||||
c.Assert(a.HasNext(), check.Equals, false)
|
||||
c.Assert(a.Next(), check.Equals, false)
|
||||
|
||||
a = aws.AttemptStrategy{Total: 2e8}.Start()
|
||||
c.Assert(a.Next(), check.Equals, true)
|
||||
c.Assert(a.HasNext(), check.Equals, true)
|
||||
time.Sleep(2e8)
|
||||
c.Assert(a.HasNext(), check.Equals, true)
|
||||
c.Assert(a.Next(), check.Equals, true)
|
||||
c.Assert(a.Next(), check.Equals, false)
|
||||
|
||||
a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
|
||||
time.Sleep(1e8)
|
||||
c.Assert(a.Next(), check.Equals, true)
|
||||
c.Assert(a.HasNext(), check.Equals, true)
|
||||
c.Assert(a.Next(), check.Equals, true)
|
||||
c.Assert(a.HasNext(), check.Equals, false)
|
||||
c.Assert(a.Next(), check.Equals, false)
|
||||
}
|
629
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go
generated
vendored
Normal file
629
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go
generated
vendored
Normal file
|
@ -0,0 +1,629 @@
|
|||
//
|
||||
// goamz - Go packages to interact with the Amazon Web Services.
|
||||
//
|
||||
// https://wiki.ubuntu.com/goamz
|
||||
//
|
||||
// Copyright (c) 2011 Canonical Ltd.
|
||||
//
|
||||
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
|
||||
//
|
||||
package aws
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Regular expressions for INI files
|
||||
var (
|
||||
iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`)
|
||||
iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`)
|
||||
)
|
||||
|
||||
// Defines the valid signers
|
||||
const (
|
||||
V2Signature = iota
|
||||
V4Signature = iota
|
||||
Route53Signature = iota
|
||||
)
|
||||
|
||||
// Defines the service endpoint and correct Signer implementation to use
|
||||
// to sign requests for this endpoint
|
||||
type ServiceInfo struct {
|
||||
Endpoint string
|
||||
Signer uint
|
||||
}
|
||||
|
||||
// Region defines the URLs where AWS services may be accessed.
|
||||
//
|
||||
// See http://goo.gl/d8BP1 for more details.
|
||||
type Region struct {
|
||||
Name string // the canonical name of this region.
|
||||
EC2Endpoint ServiceInfo
|
||||
S3Endpoint string
|
||||
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
|
||||
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
|
||||
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
|
||||
SDBEndpoint string
|
||||
SNSEndpoint string
|
||||
SQSEndpoint string
|
||||
SESEndpoint string
|
||||
IAMEndpoint string
|
||||
ELBEndpoint string
|
||||
KMSEndpoint string
|
||||
DynamoDBEndpoint string
|
||||
CloudWatchServicepoint ServiceInfo
|
||||
AutoScalingEndpoint string
|
||||
RDSEndpoint ServiceInfo
|
||||
KinesisEndpoint string
|
||||
STSEndpoint string
|
||||
CloudFormationEndpoint string
|
||||
ElastiCacheEndpoint string
|
||||
}
|
||||
|
||||
var Regions = map[string]Region{
|
||||
APNortheast.Name: APNortheast,
|
||||
APSoutheast.Name: APSoutheast,
|
||||
APSoutheast2.Name: APSoutheast2,
|
||||
EUCentral.Name: EUCentral,
|
||||
EUWest.Name: EUWest,
|
||||
USEast.Name: USEast,
|
||||
USWest.Name: USWest,
|
||||
USWest2.Name: USWest2,
|
||||
USGovWest.Name: USGovWest,
|
||||
SAEast.Name: SAEast,
|
||||
CNNorth1.Name: CNNorth1,
|
||||
}
|
||||
|
||||
// Designates a signer interface suitable for signing AWS requests, params
|
||||
// should be appropriately encoded for the request before signing.
|
||||
//
|
||||
// A signer should be initialized with Auth and the appropriate endpoint.
|
||||
type Signer interface {
|
||||
Sign(method, path string, params map[string]string)
|
||||
}
|
||||
|
||||
// An AWS Service interface with the API to query the AWS service
|
||||
//
|
||||
// Supplied as an easy way to mock out service calls during testing.
|
||||
type AWSService interface {
|
||||
// Queries the AWS service at a given method/path with the params and
|
||||
// returns an http.Response and error
|
||||
Query(method, path string, params map[string]string) (*http.Response, error)
|
||||
// Builds an error given an XML payload in the http.Response, can be used
|
||||
// to process an error if the status code is not 200 for example.
|
||||
BuildError(r *http.Response) error
|
||||
}
|
||||
|
||||
// Implements a Server Query/Post API to easily query AWS services and build
|
||||
// errors when desired
|
||||
type Service struct {
|
||||
service ServiceInfo
|
||||
signer Signer
|
||||
}
|
||||
|
||||
// Create a base set of params for an action
|
||||
func MakeParams(action string) map[string]string {
|
||||
params := make(map[string]string)
|
||||
params["Action"] = action
|
||||
return params
|
||||
}
|
||||
|
||||
// Create a new AWS server to handle making requests
|
||||
func NewService(auth Auth, service ServiceInfo) (s *Service, err error) {
|
||||
var signer Signer
|
||||
switch service.Signer {
|
||||
case V2Signature:
|
||||
signer, err = NewV2Signer(auth, service)
|
||||
// case V4Signature:
|
||||
// signer, err = NewV4Signer(auth, service, Regions["eu-west-1"])
|
||||
default:
|
||||
err = fmt.Errorf("Unsupported signer for service")
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s = &Service{service: service, signer: signer}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) {
|
||||
params["Timestamp"] = time.Now().UTC().Format(time.RFC3339)
|
||||
u, err := url.Parse(s.service.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = path
|
||||
|
||||
s.signer.Sign(method, path, params)
|
||||
if method == "GET" {
|
||||
u.RawQuery = multimap(params).Encode()
|
||||
resp, err = http.Get(u.String())
|
||||
} else if method == "POST" {
|
||||
resp, err = http.PostForm(u.String(), multimap(params))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) BuildError(r *http.Response) error {
|
||||
errors := ErrorResponse{}
|
||||
xml.NewDecoder(r.Body).Decode(&errors)
|
||||
var err Error
|
||||
err = errors.Errors
|
||||
err.RequestId = errors.RequestId
|
||||
err.StatusCode = r.StatusCode
|
||||
if err.Message == "" {
|
||||
err.Message = r.Status
|
||||
}
|
||||
return &err
|
||||
}
|
||||
|
||||
type ServiceError interface {
|
||||
error
|
||||
ErrorCode() string
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Errors Error `xml:"Error"`
|
||||
RequestId string // A unique ID for tracking the request
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
StatusCode int
|
||||
Type string
|
||||
Code string
|
||||
Message string
|
||||
RequestId string
|
||||
}
|
||||
|
||||
func (err *Error) Error() string {
|
||||
return fmt.Sprintf("Type: %s, Code: %s, Message: %s",
|
||||
err.Type, err.Code, err.Message,
|
||||
)
|
||||
}
|
||||
|
||||
func (err *Error) ErrorCode() string {
|
||||
return err.Code
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
AccessKey, SecretKey string
|
||||
token string
|
||||
expiration time.Time
|
||||
}
|
||||
|
||||
func (a *Auth) Token() string {
|
||||
if a.token == "" {
|
||||
return ""
|
||||
}
|
||||
if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock
|
||||
auth, err := GetAuth("", "", "", time.Time{})
|
||||
if err == nil {
|
||||
*a = auth
|
||||
}
|
||||
}
|
||||
return a.token
|
||||
}
|
||||
|
||||
func (a *Auth) Expiration() time.Time {
|
||||
return a.expiration
|
||||
}
|
||||
|
||||
// To be used with other APIs that return auth credentials such as STS
|
||||
func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth {
|
||||
return &Auth{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
token: token,
|
||||
expiration: expiration,
|
||||
}
|
||||
}
|
||||
|
||||
// ResponseMetadata
|
||||
type ResponseMetadata struct {
|
||||
RequestId string // A unique ID for tracking the request
|
||||
}
|
||||
|
||||
type BaseResponse struct {
|
||||
ResponseMetadata ResponseMetadata
|
||||
}
|
||||
|
||||
var unreserved = make([]bool, 128)
|
||||
var hex = "0123456789ABCDEF"
|
||||
|
||||
func init() {
|
||||
// RFC3986
|
||||
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
|
||||
for _, c := range u {
|
||||
unreserved[c] = true
|
||||
}
|
||||
}
|
||||
|
||||
func multimap(p map[string]string) url.Values {
|
||||
q := make(url.Values, len(p))
|
||||
for k, v := range p {
|
||||
q[k] = []string{v}
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
type credentials struct {
|
||||
Code string
|
||||
LastUpdated string
|
||||
Type string
|
||||
AccessKeyId string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
Expiration string
|
||||
}
|
||||
|
||||
// GetMetaData retrieves instance metadata about the current machine.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
|
||||
func GetMetaData(path string) (contents []byte, err error) {
|
||||
c := http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
deadline := time.Now().Add(5 * time.Second)
|
||||
c, err := net.DialTimeout(netw, addr, time.Second*2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.SetDeadline(deadline)
|
||||
return c, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
url := "http://169.254.169.254/latest/meta-data/" + path
|
||||
|
||||
resp, err := c.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return []byte(body), err
|
||||
}
|
||||
|
||||
func GetRegion(regionName string) (region Region) {
|
||||
region = Regions[regionName]
|
||||
return
|
||||
}
|
||||
|
||||
// GetInstanceCredentials creates an Auth based on the instance's role credentials.
|
||||
// If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned.
|
||||
// For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
func GetInstanceCredentials() (cred credentials, err error) {
|
||||
credentialPath := "iam/security-credentials/"
|
||||
|
||||
// Get the instance role
|
||||
role, err := GetMetaData(credentialPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the instance role credentials
|
||||
credentialJSON, err := GetMetaData(credentialPath + string(role))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(credentialJSON), &cred)
|
||||
return
|
||||
}
|
||||
|
||||
// GetAuth creates an Auth based on either passed in credentials,
|
||||
// environment information or instance based role credentials.
|
||||
func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) {
|
||||
// First try passed in credentials
|
||||
if accessKey != "" && secretKey != "" {
|
||||
return Auth{accessKey, secretKey, token, expiration}, nil
|
||||
}
|
||||
|
||||
// Next try to get auth from the environment
|
||||
auth, err = EnvAuth()
|
||||
if err == nil {
|
||||
// Found auth, return
|
||||
return
|
||||
}
|
||||
|
||||
// Next try getting auth from the instance role
|
||||
cred, err := GetInstanceCredentials()
|
||||
if err == nil {
|
||||
// Found auth, return
|
||||
auth.AccessKey = cred.AccessKeyId
|
||||
auth.SecretKey = cred.SecretAccessKey
|
||||
auth.token = cred.Token
|
||||
exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err)
|
||||
}
|
||||
auth.expiration = exptdate
|
||||
return auth, err
|
||||
}
|
||||
|
||||
// Next try getting auth from the credentials file
|
||||
auth, err = CredentialFileAuth("", "", time.Minute*5)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
//err = errors.New("No valid AWS authentication found")
|
||||
err = fmt.Errorf("No valid AWS authentication found: %s", err)
|
||||
return auth, err
|
||||
}
|
||||
|
||||
// EnvAuth creates an Auth based on environment information.
|
||||
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
|
||||
// variables are used.
|
||||
func EnvAuth() (auth Auth, err error) {
|
||||
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
if auth.AccessKey == "" {
|
||||
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
|
||||
}
|
||||
|
||||
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
if auth.SecretKey == "" {
|
||||
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
|
||||
}
|
||||
if auth.AccessKey == "" {
|
||||
err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
|
||||
}
|
||||
if auth.SecretKey == "" {
|
||||
err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CredentialFileAuth creates and Auth based on a credentials file. The file
|
||||
// contains various authentication profiles for use with AWS.
|
||||
//
|
||||
// The credentials file, which is used by other AWS SDKs, is documented at
|
||||
// http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs
|
||||
func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) {
|
||||
if profile == "" {
|
||||
profile = "default"
|
||||
}
|
||||
|
||||
if filePath == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return auth, err
|
||||
}
|
||||
|
||||
filePath = path.Join(u.HomeDir, ".aws", "credentials")
|
||||
}
|
||||
|
||||
// read the file, then parse the INI
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
profiles := parseINI(string(contents))
|
||||
profileData, ok := profiles[profile]
|
||||
|
||||
if !ok {
|
||||
err = errors.New("The credentials file did not contain the profile")
|
||||
return
|
||||
}
|
||||
|
||||
keyId, ok := profileData["aws_access_key_id"]
|
||||
if !ok {
|
||||
err = errors.New("The credentials file did not contain required attribute aws_access_key_id")
|
||||
return
|
||||
}
|
||||
|
||||
secretKey, ok := profileData["aws_secret_access_key"]
|
||||
if !ok {
|
||||
err = errors.New("The credentials file did not contain required attribute aws_secret_access_key")
|
||||
return
|
||||
}
|
||||
|
||||
auth.AccessKey = keyId
|
||||
auth.SecretKey = secretKey
|
||||
|
||||
if token, ok := profileData["aws_session_token"]; ok {
|
||||
auth.token = token
|
||||
}
|
||||
|
||||
auth.expiration = time.Now().Add(expiration)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseINI takes the contents of a credentials file and returns a map, whose keys
|
||||
// are the various profiles, and whose values are maps of the settings for the
|
||||
// profiles
|
||||
func parseINI(fileContents string) map[string]map[string]string {
|
||||
profiles := make(map[string]map[string]string)
|
||||
|
||||
lines := strings.Split(fileContents, "\n")
|
||||
|
||||
var currentSection map[string]string
|
||||
for _, line := range lines {
|
||||
// remove comments, which start with a semi-colon
|
||||
if split := strings.Split(line, ";"); len(split) > 1 {
|
||||
line = split[0]
|
||||
}
|
||||
|
||||
// check if the line is the start of a profile.
|
||||
//
|
||||
// for example:
|
||||
// [default]
|
||||
//
|
||||
// otherwise, check for the proper setting
|
||||
// property=value
|
||||
if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 {
|
||||
currentSection = make(map[string]string)
|
||||
profiles[sectMatch[1]] = currentSection
|
||||
} else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil {
|
||||
currentSection[setMatch[1]] = setMatch[2]
|
||||
}
|
||||
}
|
||||
|
||||
return profiles
|
||||
}
|
||||
|
||||
// Encode takes a string and URI-encodes it in a way suitable
|
||||
// to be used in AWS signatures.
|
||||
func Encode(s string) string {
|
||||
encode := false
|
||||
for i := 0; i != len(s); i++ {
|
||||
c := s[i]
|
||||
if c > 127 || !unreserved[c] {
|
||||
encode = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !encode {
|
||||
return s
|
||||
}
|
||||
e := make([]byte, len(s)*3)
|
||||
ei := 0
|
||||
for i := 0; i != len(s); i++ {
|
||||
c := s[i]
|
||||
if c > 127 || !unreserved[c] {
|
||||
e[ei] = '%'
|
||||
e[ei+1] = hex[c>>4]
|
||||
e[ei+2] = hex[c&0xF]
|
||||
ei += 3
|
||||
} else {
|
||||
e[ei] = c
|
||||
ei += 1
|
||||
}
|
||||
}
|
||||
return string(e[:ei])
|
||||
}
|
||||
|
||||
func dialTimeout(network, addr string) (net.Conn, error) {
|
||||
return net.DialTimeout(network, addr, time.Duration(2*time.Second))
|
||||
}
|
||||
|
||||
func AvailabilityZone() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone")
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func InstanceRegion() string {
|
||||
az := AvailabilityZone()
|
||||
if az == "unknown" {
|
||||
return az
|
||||
} else {
|
||||
region := az[:len(az)-1]
|
||||
return region
|
||||
}
|
||||
}
|
||||
|
||||
func InstanceId() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id")
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func InstanceType() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type")
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ServerLocalIp() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4")
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ServerPublicIp() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4")
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
140
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws_test.go
generated
vendored
Normal file
140
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws_test.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
package aws_test
|
||||
|
||||
import (
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"gopkg.in/check.v1"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
var _ = check.Suite(&S{})
|
||||
|
||||
type S struct {
|
||||
environ []string
|
||||
}
|
||||
|
||||
func (s *S) SetUpSuite(c *check.C) {
|
||||
s.environ = os.Environ()
|
||||
}
|
||||
|
||||
func (s *S) TearDownTest(c *check.C) {
|
||||
os.Clearenv()
|
||||
for _, kv := range s.environ {
|
||||
l := strings.SplitN(kv, "=", 2)
|
||||
os.Setenv(l[0], l[1])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestEnvAuthNoSecret(c *check.C) {
|
||||
os.Clearenv()
|
||||
_, err := aws.EnvAuth()
|
||||
c.Assert(err, check.ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
|
||||
}
|
||||
|
||||
func (s *S) TestEnvAuthNoAccess(c *check.C) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
|
||||
_, err := aws.EnvAuth()
|
||||
c.Assert(err, check.ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
|
||||
}
|
||||
|
||||
func (s *S) TestEnvAuth(c *check.C) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
auth, err := aws.EnvAuth()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
|
||||
}
|
||||
|
||||
func (s *S) TestEnvAuthAlt(c *check.C) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_SECRET_KEY", "secret")
|
||||
os.Setenv("AWS_ACCESS_KEY", "access")
|
||||
auth, err := aws.EnvAuth()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
|
||||
}
|
||||
|
||||
func (s *S) TestGetAuthStatic(c *check.C) {
|
||||
exptdate := time.Now().Add(time.Hour)
|
||||
auth, err := aws.GetAuth("access", "secret", "token", exptdate)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(auth.AccessKey, check.Equals, "access")
|
||||
c.Assert(auth.SecretKey, check.Equals, "secret")
|
||||
c.Assert(auth.Token(), check.Equals, "token")
|
||||
c.Assert(auth.Expiration(), check.Equals, exptdate)
|
||||
}
|
||||
|
||||
func (s *S) TestGetAuthEnv(c *check.C) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
auth, err := aws.GetAuth("", "", "", time.Time{})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
|
||||
}
|
||||
|
||||
func (s *S) TestEncode(c *check.C) {
|
||||
c.Assert(aws.Encode("foo"), check.Equals, "foo")
|
||||
c.Assert(aws.Encode("/"), check.Equals, "%2F")
|
||||
}
|
||||
|
||||
func (s *S) TestRegionsAreNamed(c *check.C) {
|
||||
for n, r := range aws.Regions {
|
||||
c.Assert(n, check.Equals, r.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestCredentialsFileAuth(c *check.C) {
|
||||
file, err := ioutil.TempFile("", "creds")
|
||||
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
iniFile := `
|
||||
|
||||
[default] ; comment 123
|
||||
aws_access_key_id = keyid1 ;comment
|
||||
aws_secret_access_key=key1
|
||||
|
||||
[profile2]
|
||||
aws_access_key_id = keyid2 ;comment
|
||||
aws_secret_access_key=key2
|
||||
aws_session_token=token1
|
||||
|
||||
`
|
||||
_, err = file.WriteString(iniFile)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// check non-existant profile
|
||||
_, err = aws.CredentialFileAuth(file.Name(), "no profile", 30*time.Minute)
|
||||
c.Assert(err, check.Not(check.Equals), nil)
|
||||
|
||||
defaultProfile, err := aws.CredentialFileAuth(file.Name(), "default", 30*time.Minute)
|
||||
c.Assert(err, check.Equals, nil)
|
||||
c.Assert(defaultProfile.AccessKey, check.Equals, "keyid1")
|
||||
c.Assert(defaultProfile.SecretKey, check.Equals, "key1")
|
||||
c.Assert(defaultProfile.Token(), check.Equals, "")
|
||||
|
||||
profile2, err := aws.CredentialFileAuth(file.Name(), "profile2", 30*time.Minute)
|
||||
c.Assert(err, check.Equals, nil)
|
||||
c.Assert(profile2.AccessKey, check.Equals, "keyid2")
|
||||
c.Assert(profile2.SecretKey, check.Equals, "key2")
|
||||
c.Assert(profile2.Token(), check.Equals, "token1")
|
||||
}
|
124
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/client.go
generated
vendored
Normal file
124
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/client.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RetryableFunc func(*http.Request, *http.Response, error) bool
|
||||
type WaitFunc func(try int)
|
||||
type DeadlineFunc func() time.Time
|
||||
|
||||
type ResilientTransport struct {
|
||||
// Timeout is the maximum amount of time a dial will wait for
|
||||
// a connect to complete.
|
||||
//
|
||||
// The default is no timeout.
|
||||
//
|
||||
// With or without a timeout, the operating system may impose
|
||||
// its own earlier timeout. For instance, TCP timeouts are
|
||||
// often around 3 minutes.
|
||||
DialTimeout time.Duration
|
||||
|
||||
// MaxTries, if non-zero, specifies the number of times we will retry on
|
||||
// failure. Retries are only attempted for temporary network errors or known
|
||||
// safe failures.
|
||||
MaxTries int
|
||||
Deadline DeadlineFunc
|
||||
ShouldRetry RetryableFunc
|
||||
Wait WaitFunc
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
// Convenience method for creating an http client
|
||||
func NewClient(rt *ResilientTransport) *http.Client {
|
||||
rt.transport = &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.SetDeadline(rt.Deadline())
|
||||
return c, nil
|
||||
},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
// TODO: Would be nice is ResilientTransport allowed clients to initialize
|
||||
// with http.Transport attributes.
|
||||
return &http.Client{
|
||||
Transport: rt,
|
||||
}
|
||||
}
|
||||
|
||||
var retryingTransport = &ResilientTransport{
|
||||
Deadline: func() time.Time {
|
||||
return time.Now().Add(5 * time.Second)
|
||||
},
|
||||
DialTimeout: 10 * time.Second,
|
||||
MaxTries: 3,
|
||||
ShouldRetry: awsRetry,
|
||||
Wait: ExpBackoff,
|
||||
}
|
||||
|
||||
// Exported default client
|
||||
var RetryingClient = NewClient(retryingTransport)
|
||||
|
||||
func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return t.tries(req)
|
||||
}
|
||||
|
||||
// Retry a request a maximum of t.MaxTries times.
|
||||
// We'll only retry if the proper criteria are met.
|
||||
// If a wait function is specified, wait that amount of time
|
||||
// In between requests.
|
||||
func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
|
||||
for try := 0; try < t.MaxTries; try += 1 {
|
||||
res, err = t.transport.RoundTrip(req)
|
||||
|
||||
if !t.ShouldRetry(req, res, err) {
|
||||
break
|
||||
}
|
||||
if res != nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
if t.Wait != nil {
|
||||
t.Wait(try)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ExpBackoff(try int) {
|
||||
time.Sleep(100 * time.Millisecond *
|
||||
time.Duration(math.Exp2(float64(try))))
|
||||
}
|
||||
|
||||
func LinearBackoff(try int) {
|
||||
time.Sleep(time.Duration(try*100) * time.Millisecond)
|
||||
}
|
||||
|
||||
// Decide if we should retry a request.
|
||||
// In general, the criteria for retrying a request is described here
|
||||
// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
|
||||
func awsRetry(req *http.Request, res *http.Response, err error) bool {
|
||||
retry := false
|
||||
|
||||
// Retry if there's a temporary network error.
|
||||
if neterr, ok := err.(net.Error); ok {
|
||||
if neterr.Temporary() {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
// Retry if we get a 5xx series error.
|
||||
if res != nil {
|
||||
if res.StatusCode >= 500 && res.StatusCode < 600 {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
return retry
|
||||
}
|
29
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/export_test.go
generated
vendored
Normal file
29
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/export_test.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// V4Signer:
|
||||
// Exporting methods for testing
|
||||
|
||||
func (s *V4Signer) RequestTime(req *http.Request) time.Time {
|
||||
return s.requestTime(req)
|
||||
}
|
||||
|
||||
func (s *V4Signer) CanonicalRequest(req *http.Request) string {
|
||||
return s.canonicalRequest(req, "")
|
||||
}
|
||||
|
||||
func (s *V4Signer) StringToSign(t time.Time, creq string) string {
|
||||
return s.stringToSign(t, creq)
|
||||
}
|
||||
|
||||
func (s *V4Signer) Signature(t time.Time, sts string) string {
|
||||
return s.signature(t, sts)
|
||||
}
|
||||
|
||||
func (s *V4Signer) Authorization(header http.Header, t time.Time, signature string) string {
|
||||
return s.authorization(header, t, signature)
|
||||
}
|
265
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go
generated
vendored
Normal file
265
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go
generated
vendored
Normal file
|
@ -0,0 +1,265 @@
|
|||
package aws
|
||||
|
||||
var USGovWest = Region{
|
||||
"us-gov-west-1",
|
||||
ServiceInfo{"https://ec2.us-gov-west-1.amazonaws.com", V2Signature},
|
||||
"https://s3-fips-us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"",
|
||||
"https://sns.us-gov-west-1.amazonaws.com",
|
||||
"https://sqs.us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.us-gov.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://dynamodb.us-gov-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-gov-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature},
|
||||
"",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
}
|
||||
|
||||
var USEast = Region{
|
||||
"us-east-1",
|
||||
ServiceInfo{"https://ec2.us-east-1.amazonaws.com", V2Signature},
|
||||
"https://s3-external-1.amazonaws.com",
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
"https://sdb.amazonaws.com",
|
||||
"https://sns.us-east-1.amazonaws.com",
|
||||
"https://sqs.us-east-1.amazonaws.com",
|
||||
"https://email.us-east-1.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-east-1.amazonaws.com",
|
||||
"https://kms.us-east-1.amazonaws.com",
|
||||
"https://dynamodb.us-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.us-east-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-east-1.amazonaws.com",
|
||||
"https://elasticache.us-east-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var USWest = Region{
|
||||
"us-west-1",
|
||||
ServiceInfo{"https://ec2.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://s3-us-west-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.us-west-1.amazonaws.com",
|
||||
"https://sns.us-west-1.amazonaws.com",
|
||||
"https://sqs.us-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-west-1.amazonaws.com",
|
||||
"https://kms.us-west-1.amazonaws.com",
|
||||
"https://dynamodb.us-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.us-west-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-west-1.amazonaws.com",
|
||||
"https://elasticache.us-west-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var USWest2 = Region{
|
||||
"us-west-2",
|
||||
ServiceInfo{"https://ec2.us-west-2.amazonaws.com", V2Signature},
|
||||
"https://s3-us-west-2.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.us-west-2.amazonaws.com",
|
||||
"https://sns.us-west-2.amazonaws.com",
|
||||
"https://sqs.us-west-2.amazonaws.com",
|
||||
"https://email.us-west-2.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-west-2.amazonaws.com",
|
||||
"https://kms.us-west-2.amazonaws.com",
|
||||
"https://dynamodb.us-west-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-west-2.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature},
|
||||
"https://kinesis.us-west-2.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-west-2.amazonaws.com",
|
||||
"https://elasticache.us-west-2.amazonaws.com",
|
||||
}
|
||||
|
||||
var EUWest = Region{
|
||||
"eu-west-1",
|
||||
ServiceInfo{"https://ec2.eu-west-1.amazonaws.com", V2Signature},
|
||||
"https://s3-eu-west-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.eu-west-1.amazonaws.com",
|
||||
"https://sns.eu-west-1.amazonaws.com",
|
||||
"https://sqs.eu-west-1.amazonaws.com",
|
||||
"https://email.eu-west-1.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.eu-west-1.amazonaws.com",
|
||||
"https://kms.eu-west-1.amazonaws.com",
|
||||
"https://dynamodb.eu-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.eu-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.eu-west-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.eu-west-1.amazonaws.com",
|
||||
"https://elasticache.eu-west-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var EUCentral = Region{
|
||||
"eu-central-1",
|
||||
ServiceInfo{"https://ec2.eu-central-1.amazonaws.com", V4Signature},
|
||||
"https://s3-eu-central-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.eu-central-1.amazonaws.com",
|
||||
"https://sns.eu-central-1.amazonaws.com",
|
||||
"https://sqs.eu-central-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.eu-central-1.amazonaws.com",
|
||||
"https://kms.eu-central-1.amazonaws.com",
|
||||
"https://dynamodb.eu-central-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.eu-central-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.eu-central-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.eu-central-1.amazonaws.com",
|
||||
"",
|
||||
}
|
||||
|
||||
var APSoutheast = Region{
|
||||
"ap-southeast-1",
|
||||
ServiceInfo{"https://ec2.ap-southeast-1.amazonaws.com", V2Signature},
|
||||
"https://s3-ap-southeast-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.ap-southeast-1.amazonaws.com",
|
||||
"https://sns.ap-southeast-1.amazonaws.com",
|
||||
"https://sqs.ap-southeast-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
|
||||
"https://kms.ap-southeast-1.amazonaws.com",
|
||||
"https://dynamodb.ap-southeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-southeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.ap-southeast-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.ap-southeast-1.amazonaws.com",
|
||||
"https://elasticache.ap-southeast-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var APSoutheast2 = Region{
|
||||
"ap-southeast-2",
|
||||
ServiceInfo{"https://ec2.ap-southeast-2.amazonaws.com", V2Signature},
|
||||
"https://s3-ap-southeast-2.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.ap-southeast-2.amazonaws.com",
|
||||
"https://sns.ap-southeast-2.amazonaws.com",
|
||||
"https://sqs.ap-southeast-2.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
|
||||
"https://kms.ap-southeast-2.amazonaws.com",
|
||||
"https://dynamodb.ap-southeast-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-southeast-2.amazonaws.com",
|
||||
ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature},
|
||||
"https://kinesis.ap-southeast-2.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.ap-southeast-2.amazonaws.com",
|
||||
"https://elasticache.ap-southeast-2.amazonaws.com",
|
||||
}
|
||||
|
||||
var APNortheast = Region{
|
||||
"ap-northeast-1",
|
||||
ServiceInfo{"https://ec2.ap-northeast-1.amazonaws.com", V2Signature},
|
||||
"https://s3-ap-northeast-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.ap-northeast-1.amazonaws.com",
|
||||
"https://sns.ap-northeast-1.amazonaws.com",
|
||||
"https://sqs.ap-northeast-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
|
||||
"https://kms.ap-northeast-1.amazonaws.com",
|
||||
"https://dynamodb.ap-northeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-northeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.ap-northeast-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.ap-northeast-1.amazonaws.com",
|
||||
"https://elasticache.ap-northeast-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var SAEast = Region{
|
||||
"sa-east-1",
|
||||
ServiceInfo{"https://ec2.sa-east-1.amazonaws.com", V2Signature},
|
||||
"https://s3-sa-east-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.sa-east-1.amazonaws.com",
|
||||
"https://sns.sa-east-1.amazonaws.com",
|
||||
"https://sqs.sa-east-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.sa-east-1.amazonaws.com",
|
||||
"https://kms.sa-east-1.amazonaws.com",
|
||||
"https://dynamodb.sa-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.sa-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature},
|
||||
"",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.sa-east-1.amazonaws.com",
|
||||
"https://elasticache.sa-east-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var CNNorth1 = Region{
|
||||
"cn-north-1",
|
||||
ServiceInfo{"https://ec2.cn-north-1.amazonaws.com.cn", V2Signature},
|
||||
"https://s3.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"",
|
||||
"https://sns.cn-north-1.amazonaws.com.cn",
|
||||
"https://sqs.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"https://iam.cn-north-1.amazonaws.com.cn",
|
||||
"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"https://dynamodb.cn-north-1.amazonaws.com.cn",
|
||||
ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature},
|
||||
"https://autoscaling.cn-north-1.amazonaws.com.cn",
|
||||
ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature},
|
||||
"",
|
||||
"https://sts.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"",
|
||||
}
|
136
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry.go
generated
vendored
Normal file
136
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
maxDelay = 20 * time.Second
|
||||
defaultScale = 300 * time.Millisecond
|
||||
throttlingScale = 500 * time.Millisecond
|
||||
throttlingScaleRange = throttlingScale / 4
|
||||
defaultMaxRetries = 3
|
||||
dynamoDBScale = 25 * time.Millisecond
|
||||
dynamoDBMaxRetries = 10
|
||||
)
|
||||
|
||||
// A RetryPolicy encapsulates a strategy for implementing client retries.
|
||||
//
|
||||
// Default implementations are provided which match the AWS SDKs.
|
||||
type RetryPolicy interface {
|
||||
// ShouldRetry returns whether a client should retry a failed request.
|
||||
ShouldRetry(target string, r *http.Response, err error, numRetries int) bool
|
||||
|
||||
// Delay returns the time a client should wait before issuing a retry.
|
||||
Delay(target string, r *http.Response, err error, numRetries int) time.Duration
|
||||
}
|
||||
|
||||
// DefaultRetryPolicy implements the AWS SDK default retry policy.
|
||||
//
|
||||
// It will retry up to 3 times, and uses an exponential backoff with a scale
|
||||
// factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of
|
||||
// throttling, the delay will also include some randomness.
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90.
|
||||
type DefaultRetryPolicy struct {
|
||||
}
|
||||
|
||||
// ShouldRetry implements the RetryPolicy ShouldRetry method.
|
||||
func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
|
||||
return shouldRetry(r, err, numRetries, defaultMaxRetries)
|
||||
}
|
||||
|
||||
// Delay implements the RetryPolicy Delay method.
|
||||
func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
|
||||
scale := defaultScale
|
||||
if err, ok := err.(*Error); ok && isThrottlingException(err) {
|
||||
scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange)))
|
||||
}
|
||||
return exponentialBackoff(numRetries, scale)
|
||||
}
|
||||
|
||||
// DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy.
|
||||
//
|
||||
// It will retry up to 10 times, and uses an exponential backoff with a scale
|
||||
// factor of 25ms (25ms, 50ms, 100ms, ...).
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103.
|
||||
type DynamoDBRetryPolicy struct {
|
||||
}
|
||||
|
||||
// ShouldRetry implements the RetryPolicy ShouldRetry method.
|
||||
func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
|
||||
return shouldRetry(r, err, numRetries, dynamoDBMaxRetries)
|
||||
}
|
||||
|
||||
// Delay implements the RetryPolicy Delay method.
|
||||
func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
|
||||
return exponentialBackoff(numRetries, dynamoDBScale)
|
||||
}
|
||||
|
||||
// NeverRetryPolicy never retries requests and returns immediately on failure.
|
||||
type NeverRetryPolicy struct {
|
||||
}
|
||||
|
||||
// ShouldRetry implements the RetryPolicy ShouldRetry method.
|
||||
func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Delay implements the RetryPolicy Delay method.
|
||||
func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
// shouldRetry determines if we should retry the request.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/general/latest/gr/api-retries.html.
|
||||
func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool {
|
||||
// Once we've exceeded the max retry attempts, game over.
|
||||
if numRetries >= maxRetries {
|
||||
return false
|
||||
}
|
||||
|
||||
// Always retry temporary network errors.
|
||||
if err, ok := err.(net.Error); ok && err.Temporary() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Always retry 5xx responses.
|
||||
if r != nil && r.StatusCode >= 500 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Always retry throttling exceptions.
|
||||
if err, ok := err.(ServiceError); ok && isThrottlingException(err) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Other classes of failures indicate a problem with the request. Retrying
|
||||
// won't help.
|
||||
return false
|
||||
}
|
||||
|
||||
func exponentialBackoff(numRetries int, scale time.Duration) time.Duration {
|
||||
if numRetries < 0 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
delay := (1 << uint(numRetries)) * scale
|
||||
if delay > maxDelay {
|
||||
return maxDelay
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
func isThrottlingException(err ServiceError) bool {
|
||||
switch err.ErrorCode() {
|
||||
case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
303
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry_test.go
generated
vendored
Normal file
303
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry_test.go
generated
vendored
Normal file
|
@ -0,0 +1,303 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type testInput struct {
|
||||
res *http.Response
|
||||
err error
|
||||
numRetries int
|
||||
}
|
||||
|
||||
type testResult struct {
|
||||
shouldRetry bool
|
||||
delay time.Duration
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
input testInput
|
||||
defaultResult testResult
|
||||
dynamoDBResult testResult
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
// Test nil fields
|
||||
testCase{
|
||||
input: testInput{
|
||||
err: nil,
|
||||
res: nil,
|
||||
numRetries: 0,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 300 * time.Millisecond,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 25 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Test 3 different throttling exceptions
|
||||
testCase{
|
||||
input: testInput{
|
||||
err: &Error{
|
||||
Code: "Throttling",
|
||||
},
|
||||
numRetries: 0,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 617165505 * time.Nanosecond, // account for randomness with known seed
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 25 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
testCase{
|
||||
input: testInput{
|
||||
err: &Error{
|
||||
Code: "ThrottlingException",
|
||||
},
|
||||
numRetries: 0,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 579393152 * time.Nanosecond, // account for randomness with known seed
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 25 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
testCase{
|
||||
input: testInput{
|
||||
err: &Error{
|
||||
Code: "ProvisionedThroughputExceededException",
|
||||
},
|
||||
numRetries: 1,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 1105991654 * time.Nanosecond, // account for randomness with known seed
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 50 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Test a fake throttling exception
|
||||
testCase{
|
||||
input: testInput{
|
||||
err: &Error{
|
||||
Code: "MyMadeUpThrottlingCode",
|
||||
},
|
||||
numRetries: 0,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 300 * time.Millisecond,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 25 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Test 5xx errors
|
||||
testCase{
|
||||
input: testInput{
|
||||
res: &http.Response{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
},
|
||||
numRetries: 1,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 600 * time.Millisecond,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 50 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
testCase{
|
||||
input: testInput{
|
||||
res: &http.Response{
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
numRetries: 1,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 600 * time.Millisecond,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 50 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Test a random 400 error
|
||||
testCase{
|
||||
input: testInput{
|
||||
res: &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
},
|
||||
numRetries: 1,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 600 * time.Millisecond,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 50 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Test a temporary net.Error
|
||||
testCase{
|
||||
input: testInput{
|
||||
res: &http.Response{},
|
||||
err: &net.DNSError{
|
||||
IsTimeout: true,
|
||||
},
|
||||
numRetries: 2,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 1200 * time.Millisecond,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 100 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Test a non-temporary net.Error
|
||||
testCase{
|
||||
input: testInput{
|
||||
res: &http.Response{},
|
||||
err: &net.DNSError{
|
||||
IsTimeout: false,
|
||||
},
|
||||
numRetries: 3,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 2400 * time.Millisecond,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 200 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Assert failure after hitting max default retries
|
||||
testCase{
|
||||
input: testInput{
|
||||
err: &Error{
|
||||
Code: "ProvisionedThroughputExceededException",
|
||||
},
|
||||
numRetries: defaultMaxRetries,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: 4313582352 * time.Nanosecond, // account for randomness with known seed
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: true,
|
||||
delay: 200 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
// Assert failure after hitting max DynamoDB retries
|
||||
testCase{
|
||||
input: testInput{
|
||||
err: &Error{
|
||||
Code: "ProvisionedThroughputExceededException",
|
||||
},
|
||||
numRetries: dynamoDBMaxRetries,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: maxDelay,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: maxDelay,
|
||||
},
|
||||
},
|
||||
// Assert we never go over the maxDelay value
|
||||
testCase{
|
||||
input: testInput{
|
||||
numRetries: 25,
|
||||
},
|
||||
defaultResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: maxDelay,
|
||||
},
|
||||
dynamoDBResult: testResult{
|
||||
shouldRetry: false,
|
||||
delay: maxDelay,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestDefaultRetryPolicy(t *testing.T) {
|
||||
rand.Seed(0)
|
||||
var policy RetryPolicy
|
||||
policy = &DefaultRetryPolicy{}
|
||||
for _, test := range testCases {
|
||||
res := test.input.res
|
||||
err := test.input.err
|
||||
numRetries := test.input.numRetries
|
||||
|
||||
shouldRetry := policy.ShouldRetry("", res, err, numRetries)
|
||||
if shouldRetry != test.defaultResult.shouldRetry {
|
||||
t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, test.defaultResult.shouldRetry, res, err, numRetries)
|
||||
}
|
||||
delay := policy.Delay("", res, err, numRetries)
|
||||
if delay != test.defaultResult.delay {
|
||||
t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, test.defaultResult.delay, res, err, numRetries)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDynamoDBRetryPolicy(t *testing.T) {
|
||||
var policy RetryPolicy
|
||||
policy = &DynamoDBRetryPolicy{}
|
||||
for _, test := range testCases {
|
||||
res := test.input.res
|
||||
err := test.input.err
|
||||
numRetries := test.input.numRetries
|
||||
|
||||
shouldRetry := policy.ShouldRetry("", res, err, numRetries)
|
||||
if shouldRetry != test.dynamoDBResult.shouldRetry {
|
||||
t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, test.dynamoDBResult.shouldRetry, res, err, numRetries)
|
||||
}
|
||||
delay := policy.Delay("", res, err, numRetries)
|
||||
if delay != test.dynamoDBResult.delay {
|
||||
t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, test.dynamoDBResult.delay, res, err, numRetries)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeverRetryPolicy(t *testing.T) {
|
||||
var policy RetryPolicy
|
||||
policy = &NeverRetryPolicy{}
|
||||
for _, test := range testCases {
|
||||
res := test.input.res
|
||||
err := test.input.err
|
||||
numRetries := test.input.numRetries
|
||||
|
||||
shouldRetry := policy.ShouldRetry("", res, err, numRetries)
|
||||
if shouldRetry {
|
||||
t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, false, res, err, numRetries)
|
||||
}
|
||||
delay := policy.Delay("", res, err, numRetries)
|
||||
if delay != time.Duration(0) {
|
||||
t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, time.Duration(0), res, err, numRetries)
|
||||
}
|
||||
}
|
||||
}
|
450
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign.go
generated
vendored
Normal file
450
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign.go
generated
vendored
Normal file
|
@ -0,0 +1,450 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AWS specifies that the parameters in a signed request must
|
||||
// be provided in the natural order of the keys. This is distinct
|
||||
// from the natural order of the encoded value of key=value.
|
||||
// Percent and gocheck.Equals affect the sorting order.
|
||||
func EncodeSorted(values url.Values) string {
|
||||
// preallocate the arrays for perfomance
|
||||
keys := make([]string, 0, len(values))
|
||||
sarray := make([]string, 0, len(values))
|
||||
for k, _ := range values {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
for _, v := range values[k] {
|
||||
sarray = append(sarray, Encode(k)+"="+Encode(v))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(sarray, "&")
|
||||
}
|
||||
|
||||
type V2Signer struct {
|
||||
auth Auth
|
||||
service ServiceInfo
|
||||
host string
|
||||
}
|
||||
|
||||
var b64 = base64.StdEncoding
|
||||
|
||||
func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) {
|
||||
u, err := url.Parse(service.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &V2Signer{auth: auth, service: service, host: u.Host}, nil
|
||||
}
|
||||
|
||||
func (s *V2Signer) Sign(method, path string, params map[string]string) {
|
||||
params["AWSAccessKeyId"] = s.auth.AccessKey
|
||||
params["SignatureVersion"] = "2"
|
||||
params["SignatureMethod"] = "HmacSHA256"
|
||||
if s.auth.Token() != "" {
|
||||
params["SecurityToken"] = s.auth.Token()
|
||||
}
|
||||
// AWS specifies that the parameters in a signed request must
|
||||
// be provided in the natural order of the keys. This is distinct
|
||||
// from the natural order of the encoded value of key=value.
|
||||
// Percent and gocheck.Equals affect the sorting order.
|
||||
var keys, sarray []string
|
||||
for k, _ := range params {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
sarray = append(sarray, Encode(k)+"="+Encode(params[k]))
|
||||
}
|
||||
joined := strings.Join(sarray, "&")
|
||||
payload := method + "\n" + s.host + "\n" + path + "\n" + joined
|
||||
hash := hmac.New(sha256.New, []byte(s.auth.SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, b64.EncodedLen(hash.Size()))
|
||||
b64.Encode(signature, hash.Sum(nil))
|
||||
|
||||
params["Signature"] = string(signature)
|
||||
}
|
||||
|
||||
func (s *V2Signer) SignRequest(req *http.Request) error {
|
||||
req.ParseForm()
|
||||
req.Form.Set("AWSAccessKeyId", s.auth.AccessKey)
|
||||
req.Form.Set("SignatureVersion", "2")
|
||||
req.Form.Set("SignatureMethod", "HmacSHA256")
|
||||
if s.auth.Token() != "" {
|
||||
req.Form.Set("SecurityToken", s.auth.Token())
|
||||
}
|
||||
|
||||
payload := req.Method + "\n" + req.URL.Host + "\n" + req.URL.Path + "\n" + EncodeSorted(req.Form)
|
||||
hash := hmac.New(sha256.New, []byte(s.auth.SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, b64.EncodedLen(hash.Size()))
|
||||
b64.Encode(signature, hash.Sum(nil))
|
||||
|
||||
req.Form.Set("Signature", string(signature))
|
||||
|
||||
req.URL.RawQuery = req.Form.Encode()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Common date formats for signing requests
|
||||
const (
|
||||
ISO8601BasicFormat = "20060102T150405Z"
|
||||
ISO8601BasicFormatShort = "20060102"
|
||||
)
|
||||
|
||||
type Route53Signer struct {
|
||||
auth Auth
|
||||
}
|
||||
|
||||
func NewRoute53Signer(auth Auth) *Route53Signer {
|
||||
return &Route53Signer{auth: auth}
|
||||
}
|
||||
|
||||
// Creates the authorize signature based on the date stamp and secret key
|
||||
func (s *Route53Signer) getHeaderAuthorize(message string) string {
|
||||
hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey))
|
||||
hmacSha256.Write([]byte(message))
|
||||
cryptedString := hmacSha256.Sum(nil)
|
||||
|
||||
return base64.StdEncoding.EncodeToString(cryptedString)
|
||||
}
|
||||
|
||||
// Adds all the required headers for AWS Route53 API to the request
|
||||
// including the authorization
|
||||
func (s *Route53Signer) Sign(req *http.Request) {
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
delete(req.Header, "Date")
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s",
|
||||
s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date))
|
||||
|
||||
req.Header.Set("Host", req.Host)
|
||||
req.Header.Set("X-Amzn-Authorization", authHeader)
|
||||
req.Header.Set("Content-Type", "application/xml")
|
||||
if s.auth.Token() != "" {
|
||||
req.Header.Set("X-Amz-Security-Token", s.auth.Token())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
The V4Signer encapsulates all of the functionality to sign a request with the AWS
|
||||
Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
|
||||
*/
|
||||
type V4Signer struct {
|
||||
auth Auth
|
||||
serviceName string
|
||||
region Region
|
||||
// Add the x-amz-content-sha256 header
|
||||
IncludeXAmzContentSha256 bool
|
||||
}
|
||||
|
||||
/*
|
||||
Return a new instance of a V4Signer capable of signing AWS requests.
|
||||
*/
|
||||
func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer {
|
||||
return &V4Signer{
|
||||
auth: auth,
|
||||
serviceName: serviceName,
|
||||
region: region,
|
||||
IncludeXAmzContentSha256: false,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
|
||||
|
||||
The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date"
|
||||
or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires
|
||||
the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from
|
||||
the request.Host.
|
||||
|
||||
The signed request will include a new "Authorization" header indicating that the request has been signed.
|
||||
|
||||
Any changes to the request after signing the request will invalidate the signature.
|
||||
*/
|
||||
func (s *V4Signer) Sign(req *http.Request) {
|
||||
req.Header.Set("host", req.Host) // host header must be included as a signed header
|
||||
t := s.requestTime(req) // Get request time
|
||||
|
||||
payloadHash := ""
|
||||
|
||||
if _, ok := req.Form["X-Amz-Expires"]; ok {
|
||||
// We are authenticating the the request by using query params
|
||||
// (also known as pre-signing a url, http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
|
||||
payloadHash = "UNSIGNED-PAYLOAD"
|
||||
req.Header.Del("x-amz-date")
|
||||
|
||||
req.Form["X-Amz-SignedHeaders"] = []string{s.signedHeaders(req.Header)}
|
||||
req.Form["X-Amz-Algorithm"] = []string{"AWS4-HMAC-SHA256"}
|
||||
req.Form["X-Amz-Credential"] = []string{s.auth.AccessKey + "/" + s.credentialScope(t)}
|
||||
req.Form["X-Amz-Date"] = []string{t.Format(ISO8601BasicFormat)}
|
||||
req.URL.RawQuery = req.Form.Encode()
|
||||
} else {
|
||||
payloadHash = s.payloadHash(req)
|
||||
if s.IncludeXAmzContentSha256 {
|
||||
req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash
|
||||
}
|
||||
}
|
||||
creq := s.canonicalRequest(req, payloadHash) // Build canonical request
|
||||
sts := s.stringToSign(t, creq) // Build string to sign
|
||||
signature := s.signature(t, sts) // Calculate the AWS Signature Version 4
|
||||
auth := s.authorization(req.Header, t, signature) // Create Authorization header value
|
||||
|
||||
if _, ok := req.Form["X-Amz-Expires"]; ok {
|
||||
req.Form["X-Amz-Signature"] = []string{signature}
|
||||
} else {
|
||||
req.Header.Set("Authorization", auth) // Add Authorization header to request
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *V4Signer) SignRequest(req *http.Request) error {
|
||||
s.Sign(req)
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
requestTime method will parse the time from the request "x-amz-date" or "date" headers.
|
||||
If the "x-amz-date" header is present, that will take priority over the "date" header.
|
||||
If neither header is defined or we are unable to parse either header as a valid date
|
||||
then we will create a new "x-amz-date" header with the current time.
|
||||
*/
|
||||
func (s *V4Signer) requestTime(req *http.Request) time.Time {
|
||||
|
||||
// Get "x-amz-date" header
|
||||
date := req.Header.Get("x-amz-date")
|
||||
|
||||
// Attempt to parse as ISO8601BasicFormat
|
||||
t, err := time.Parse(ISO8601BasicFormat, date)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
|
||||
// Attempt to parse as http.TimeFormat
|
||||
t, err = time.Parse(http.TimeFormat, date)
|
||||
if err == nil {
|
||||
req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
|
||||
return t
|
||||
}
|
||||
|
||||
// Get "date" header
|
||||
date = req.Header.Get("date")
|
||||
|
||||
// Attempt to parse as http.TimeFormat
|
||||
t, err = time.Parse(http.TimeFormat, date)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
|
||||
// Create a current time header to be used
|
||||
t = time.Now().UTC()
|
||||
req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
|
||||
return t
|
||||
}
|
||||
|
||||
/*
|
||||
canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S)
|
||||
|
||||
CanonicalRequest =
|
||||
HTTPRequestMethod + '\n' +
|
||||
CanonicalURI + '\n' +
|
||||
CanonicalQueryString + '\n' +
|
||||
CanonicalHeaders + '\n' +
|
||||
SignedHeaders + '\n' +
|
||||
HexEncode(Hash(Payload))
|
||||
|
||||
payloadHash is optional; use the empty string and it will be calculated from the request
|
||||
*/
|
||||
func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string {
|
||||
if payloadHash == "" {
|
||||
payloadHash = s.payloadHash(req)
|
||||
}
|
||||
c := new(bytes.Buffer)
|
||||
fmt.Fprintf(c, "%s\n", req.Method)
|
||||
fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL))
|
||||
fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL))
|
||||
fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header))
|
||||
fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header))
|
||||
fmt.Fprintf(c, "%s", payloadHash)
|
||||
return c.String()
|
||||
}
|
||||
|
||||
func (s *V4Signer) canonicalURI(u *url.URL) string {
|
||||
u = &url.URL{Path: u.Path}
|
||||
canonicalPath := u.String()
|
||||
|
||||
slash := strings.HasSuffix(canonicalPath, "/")
|
||||
canonicalPath = path.Clean(canonicalPath)
|
||||
|
||||
if canonicalPath == "" || canonicalPath == "." {
|
||||
canonicalPath = "/"
|
||||
}
|
||||
|
||||
if canonicalPath != "/" && slash {
|
||||
canonicalPath += "/"
|
||||
}
|
||||
|
||||
return canonicalPath
|
||||
}
|
||||
|
||||
func (s *V4Signer) canonicalQueryString(u *url.URL) string {
|
||||
var a []string
|
||||
for k, vs := range u.Query() {
|
||||
k = url.QueryEscape(k)
|
||||
for _, v := range vs {
|
||||
if v == "" {
|
||||
a = append(a, k+"=")
|
||||
} else {
|
||||
v = url.QueryEscape(v)
|
||||
a = append(a, k+"="+v)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(a)
|
||||
return strings.Join(a, "&")
|
||||
}
|
||||
|
||||
func (s *V4Signer) canonicalHeaders(h http.Header) string {
|
||||
i, a, lowerCase := 0, make([]string, len(h)), make(map[string][]string)
|
||||
|
||||
for k, v := range h {
|
||||
lowerCase[strings.ToLower(k)] = v
|
||||
}
|
||||
|
||||
var keys []string
|
||||
for k := range lowerCase {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := lowerCase[k]
|
||||
for j, w := range v {
|
||||
v[j] = strings.Trim(w, " ")
|
||||
}
|
||||
sort.Strings(v)
|
||||
a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",")
|
||||
i++
|
||||
}
|
||||
return strings.Join(a, "\n")
|
||||
}
|
||||
|
||||
func (s *V4Signer) signedHeaders(h http.Header) string {
|
||||
i, a := 0, make([]string, len(h))
|
||||
for k, _ := range h {
|
||||
a[i] = strings.ToLower(k)
|
||||
i++
|
||||
}
|
||||
sort.Strings(a)
|
||||
return strings.Join(a, ";")
|
||||
}
|
||||
|
||||
func (s *V4Signer) payloadHash(req *http.Request) string {
|
||||
var b []byte
|
||||
if req.Body == nil {
|
||||
b = []byte("")
|
||||
} else {
|
||||
var err error
|
||||
b, err = ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
// TODO: I REALLY DON'T LIKE THIS PANIC!!!!
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
||||
return s.hash(string(b))
|
||||
}
|
||||
|
||||
/*
|
||||
stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu)
|
||||
|
||||
StringToSign =
|
||||
Algorithm + '\n' +
|
||||
RequestDate + '\n' +
|
||||
CredentialScope + '\n' +
|
||||
HexEncode(Hash(CanonicalRequest))
|
||||
*/
|
||||
func (s *V4Signer) stringToSign(t time.Time, creq string) string {
|
||||
w := new(bytes.Buffer)
|
||||
fmt.Fprint(w, "AWS4-HMAC-SHA256\n")
|
||||
fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat))
|
||||
fmt.Fprintf(w, "%s\n", s.credentialScope(t))
|
||||
fmt.Fprintf(w, "%s", s.hash(creq))
|
||||
return w.String()
|
||||
}
|
||||
|
||||
func (s *V4Signer) credentialScope(t time.Time) string {
|
||||
return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName)
|
||||
}
|
||||
|
||||
/*
|
||||
signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1)
|
||||
|
||||
signature = HexEncode(HMAC(derived-signing-key, string-to-sign))
|
||||
*/
|
||||
func (s *V4Signer) signature(t time.Time, sts string) string {
|
||||
h := s.hmac(s.derivedKey(t), []byte(sts))
|
||||
return fmt.Sprintf("%x", h)
|
||||
}
|
||||
|
||||
/*
|
||||
derivedKey method derives a signing key to be used for signing a request.
|
||||
|
||||
kSecret = Your AWS Secret Access Key
|
||||
kDate = HMAC("AWS4" + kSecret, Date)
|
||||
kRegion = HMAC(kDate, Region)
|
||||
kService = HMAC(kRegion, Service)
|
||||
kSigning = HMAC(kService, "aws4_request")
|
||||
*/
|
||||
func (s *V4Signer) derivedKey(t time.Time) []byte {
|
||||
h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort)))
|
||||
h = s.hmac(h, []byte(s.region.Name))
|
||||
h = s.hmac(h, []byte(s.serviceName))
|
||||
h = s.hmac(h, []byte("aws4_request"))
|
||||
return h
|
||||
}
|
||||
|
||||
/*
|
||||
authorization method generates the authorization header value.
|
||||
*/
|
||||
func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string {
|
||||
w := new(bytes.Buffer)
|
||||
fmt.Fprint(w, "AWS4-HMAC-SHA256 ")
|
||||
fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t))
|
||||
fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header))
|
||||
fmt.Fprintf(w, "Signature=%s", signature)
|
||||
return w.String()
|
||||
}
|
||||
|
||||
// hash method calculates the sha256 hash for a given string
|
||||
func (s *V4Signer) hash(in string) string {
|
||||
h := sha256.New()
|
||||
fmt.Fprintf(h, "%s", in)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
// hmac method calculates the sha256 hmac for a given slice of bytes
|
||||
func (s *V4Signer) hmac(key, data []byte) []byte {
|
||||
h := hmac.New(sha256.New, key)
|
||||
h.Write(data)
|
||||
return h.Sum(nil)
|
||||
}
|
569
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign_test.go
generated
vendored
Normal file
569
Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign_test.go
generated
vendored
Normal file
|
@ -0,0 +1,569 @@
|
|||
package aws_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"gopkg.in/check.v1"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = check.Suite(&V4SignerSuite{})
|
||||
|
||||
type V4SignerSuite struct {
|
||||
auth aws.Auth
|
||||
region aws.Region
|
||||
cases []V4SignerSuiteCase
|
||||
}
|
||||
|
||||
type V4SignerSuiteCase struct {
|
||||
label string
|
||||
request V4SignerSuiteCaseRequest
|
||||
canonicalRequest string
|
||||
stringToSign string
|
||||
signature string
|
||||
authorization string
|
||||
}
|
||||
|
||||
type V4SignerSuiteCaseRequest struct {
|
||||
method string
|
||||
host string
|
||||
url string
|
||||
headers []string
|
||||
body string
|
||||
}
|
||||
|
||||
func (s *V4SignerSuite) SetUpSuite(c *check.C) {
|
||||
s.auth = aws.Auth{AccessKey: "AKIDEXAMPLE", SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"}
|
||||
s.region = aws.USEast
|
||||
|
||||
// Test cases from the Signature Version 4 Test Suite (http://goo.gl/nguvs0)
|
||||
s.cases = append(s.cases,
|
||||
|
||||
// get-header-key-duplicate
|
||||
V4SignerSuiteCase{
|
||||
label: "get-header-key-duplicate",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar", "zoo:foobar", "zoo:zoobar"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:foobar,zoobar,zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3c52f0eaae2b61329c0a332e3fa15842a37bc5812cf4d80eb64784308850e313",
|
||||
signature: "54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed",
|
||||
},
|
||||
|
||||
// get-header-value-order
|
||||
V4SignerSuiteCase{
|
||||
label: "get-header-value-order",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p:z", "p:a", "p:p", "p:a"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:a,a,p,z\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n94c0389fefe0988cbbedc8606f0ca0b485b48da010d09fc844b45b697c8924fe",
|
||||
signature: "d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d",
|
||||
},
|
||||
|
||||
// get-header-value-trim
|
||||
V4SignerSuiteCase{
|
||||
label: "get-header-value-trim",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p: phfft "},
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:phfft\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndddd1902add08da1ac94782b05f9278c08dc7468db178a84f8950d93b30b1f35",
|
||||
signature: "debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
|
||||
},
|
||||
|
||||
// get-empty
|
||||
V4SignerSuiteCase{
|
||||
label: "get-relative-relative",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-single-relative
|
||||
V4SignerSuiteCase{
|
||||
label: "get-relative-relative",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/.",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-multiple-relative
|
||||
V4SignerSuiteCase{
|
||||
label: "get-relative-relative",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/./././",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-relative-relative
|
||||
V4SignerSuiteCase{
|
||||
label: "get-relative-relative",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/foo/bar/../..",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-relative
|
||||
V4SignerSuiteCase{
|
||||
label: "get-relative",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/foo/..",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-slash-dot-slash
|
||||
V4SignerSuiteCase{
|
||||
label: "get-slash-dot-slash",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/./",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-slash-pointless-dot
|
||||
V4SignerSuiteCase{
|
||||
label: "get-slash-pointless-dot",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/./foo",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n8021a97572ee460f87ca67f4e8c0db763216d84715f5424a843a5312a3321e2d",
|
||||
signature: "910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
|
||||
},
|
||||
|
||||
// get-slash
|
||||
V4SignerSuiteCase{
|
||||
label: "get-slash",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "//",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-slashes
|
||||
V4SignerSuiteCase{
|
||||
label: "get-slashes",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "//foo//",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/foo/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n6bb4476ee8745730c9cb79f33a0c70baa6d8af29c0077fa12e4e8f1dd17e7098",
|
||||
signature: "b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19",
|
||||
},
|
||||
|
||||
// get-space
|
||||
V4SignerSuiteCase{
|
||||
label: "get-space",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/%20/foo",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/%20/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n69c45fb9fe3fd76442b5086e50b2e9fec8298358da957b293ef26e506fdfb54b",
|
||||
signature: "f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374",
|
||||
},
|
||||
|
||||
// get-unreserved
|
||||
V4SignerSuiteCase{
|
||||
label: "get-unreserved",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndf63ee3247c0356c696a3b21f8d8490b01fa9cd5bc6550ef5ef5f4636b7b8901",
|
||||
signature: "830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e",
|
||||
},
|
||||
|
||||
// get-utf8
|
||||
V4SignerSuiteCase{
|
||||
label: "get-utf8",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/%E1%88%B4",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/%E1%88%B4\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n27ba31df5dbc6e063d8f87d62eb07143f7f271c5330a917840586ac1c85b6f6b",
|
||||
signature: "8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
|
||||
},
|
||||
|
||||
// get-vanilla-empty-query-key
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla-empty-query-key",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/?foo=bar",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n0846c2945b0832deb7a463c66af5c4f8bd54ec28c438e67a214445b157c9ddf8",
|
||||
signature: "56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f",
|
||||
},
|
||||
|
||||
// get-vanilla-query-order-key-case
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla-query-order-key-case",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/?foo=Zoo&foo=aha",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\nfoo=Zoo&foo=aha\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ne25f777ba161a0f1baf778a87faf057187cf5987f17953320e3ca399feb5f00d",
|
||||
signature: "be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
|
||||
},
|
||||
|
||||
// get-vanilla-query-order-key
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla-query-order-key",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/?a=foo&b=foo",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\na=foo&b=foo\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n2f23d14fe13caebf6dfda346285c6d9c14f49eaca8f5ec55c627dd7404f7a727",
|
||||
signature: "0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b",
|
||||
},
|
||||
|
||||
// get-vanilla-query-order-value
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla-query-order-value",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/?foo=b&foo=a",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\nfoo=a&foo=b\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n33dffc220e89131f8f6157a35c40903daa658608d9129ff9489e5cf5bbd9b11b",
|
||||
signature: "feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
|
||||
},
|
||||
|
||||
// get-vanilla-query-unreserved
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla-query-unreserved",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nd2578f3156d4c9d180713d1ff20601d8a3eed0dd35447d24603d7d67414bd6b5",
|
||||
signature: "f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb",
|
||||
},
|
||||
|
||||
// get-vanilla-query
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla-query",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// get-vanilla-ut8-query
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla-ut8-query",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/?ሴ=bar",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n%E1%88%B4=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nde5065ff39c131e6c2e2bd19cd9345a794bf3b561eab20b8d97b2093fc2a979e",
|
||||
signature: "6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
|
||||
},
|
||||
|
||||
// get-vanilla
|
||||
V4SignerSuiteCase{
|
||||
label: "get-vanilla",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "GET",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
|
||||
signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
|
||||
},
|
||||
|
||||
// post-header-key-case
|
||||
V4SignerSuiteCase{
|
||||
label: "post-header-key-case",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4",
|
||||
signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
|
||||
},
|
||||
|
||||
// post-header-key-sort
|
||||
V4SignerSuiteCase{
|
||||
label: "post-header-key-sort",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n34e1bddeb99e76ee01d63b5e28656111e210529efeec6cdfd46a48e4c734545d",
|
||||
signature: "b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
|
||||
},
|
||||
|
||||
// post-header-value-case
|
||||
V4SignerSuiteCase{
|
||||
label: "post-header-value-case",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "zoo:ZOOBAR"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:ZOOBAR\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3aae6d8274b8c03e2cc96fc7d6bda4b9bd7a0a184309344470b2c96953e124aa",
|
||||
signature: "273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
|
||||
},
|
||||
|
||||
// post-vanilla-empty-query-value
|
||||
V4SignerSuiteCase{
|
||||
label: "post-vanilla-empty-query-value",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/?foo=bar",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e",
|
||||
signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
|
||||
},
|
||||
|
||||
// post-vanilla-query
|
||||
V4SignerSuiteCase{
|
||||
label: "post-vanilla-query",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/?foo=bar",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e",
|
||||
signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
|
||||
},
|
||||
|
||||
// post-vanilla
|
||||
V4SignerSuiteCase{
|
||||
label: "post-vanilla",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4",
|
||||
signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
|
||||
},
|
||||
|
||||
// post-x-www-form-urlencoded-parameters
|
||||
V4SignerSuiteCase{
|
||||
label: "post-x-www-form-urlencoded-parameters",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"Content-Type:application/x-www-form-urlencoded; charset=utf8", "Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
body: "foo=bar",
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded; charset=utf8\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nc4115f9e54b5cecf192b1eaa23b8e88ed8dc5391bd4fde7b3fff3d9c9fe0af1f",
|
||||
signature: "b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71",
|
||||
},
|
||||
|
||||
// post-x-www-form-urlencoded
|
||||
V4SignerSuiteCase{
|
||||
label: "post-x-www-form-urlencoded",
|
||||
request: V4SignerSuiteCaseRequest{
|
||||
method: "POST",
|
||||
host: "host.foo.com",
|
||||
url: "/",
|
||||
headers: []string{"Content-Type:application/x-www-form-urlencoded", "Date:Mon, 09 Sep 2011 23:36:00 GMT"},
|
||||
body: "foo=bar",
|
||||
},
|
||||
canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a",
|
||||
stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n4c5c6e4b52fb5fb947a8733982a8a5a61b14f04345cbfe6e739236c76dd48f74",
|
||||
signature: "5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
|
||||
authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *V4SignerSuite) TestCases(c *check.C) {
|
||||
signer := aws.NewV4Signer(s.auth, "host", s.region)
|
||||
|
||||
for _, testCase := range s.cases {
|
||||
|
||||
req, err := http.NewRequest(testCase.request.method, "http://"+testCase.request.host+testCase.request.url, strings.NewReader(testCase.request.body))
|
||||
c.Assert(err, check.IsNil, check.Commentf("Testcase: %s", testCase.label))
|
||||
for _, v := range testCase.request.headers {
|
||||
h := strings.SplitN(v, ":", 2)
|
||||
req.Header.Add(h[0], h[1])
|
||||
}
|
||||
req.Header.Set("host", req.Host)
|
||||
|
||||
t := signer.RequestTime(req)
|
||||
|
||||
canonicalRequest := signer.CanonicalRequest(req)
|
||||
c.Check(canonicalRequest, check.Equals, testCase.canonicalRequest, check.Commentf("Testcase: %s", testCase.label))
|
||||
|
||||
stringToSign := signer.StringToSign(t, canonicalRequest)
|
||||
c.Check(stringToSign, check.Equals, testCase.stringToSign, check.Commentf("Testcase: %s", testCase.label))
|
||||
|
||||
signature := signer.Signature(t, stringToSign)
|
||||
c.Check(signature, check.Equals, testCase.signature, check.Commentf("Testcase: %s", testCase.label))
|
||||
|
||||
authorization := signer.Authorization(req.Header, t, signature)
|
||||
c.Check(authorization, check.Equals, testCase.authorization, check.Commentf("Testcase: %s", testCase.label))
|
||||
|
||||
signer.Sign(req)
|
||||
c.Check(req.Header.Get("Authorization"), check.Equals, testCase.authorization, check.Commentf("Testcase: %s", testCase.label))
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleV4Signer() {
|
||||
// Get auth from env vars
|
||||
auth, err := aws.EnvAuth()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
// Create a signer with the auth, name of the service, and aws region
|
||||
signer := aws.NewV4Signer(auth, "dynamodb", aws.USEast)
|
||||
|
||||
// Create a request
|
||||
req, err := http.NewRequest("POST", aws.USEast.DynamoDBEndpoint, strings.NewReader("sample_request"))
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
// Date or x-amz-date header is required to sign a request
|
||||
req.Header.Add("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
|
||||
// Sign the request
|
||||
signer.Sign(req)
|
||||
|
||||
// Issue signed request
|
||||
http.DefaultClient.Do(req)
|
||||
}
|
143
Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront.go
generated
vendored
Normal file
143
Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package cloudfront
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CloudFront struct {
|
||||
BaseURL string
|
||||
keyPairId string
|
||||
key *rsa.PrivateKey
|
||||
}
|
||||
|
||||
var base64Replacer = strings.NewReplacer("=", "_", "+", "-", "/", "~")
|
||||
|
||||
func NewKeyLess(auth aws.Auth, baseurl string) *CloudFront {
|
||||
return &CloudFront{keyPairId: auth.AccessKey, BaseURL: baseurl}
|
||||
}
|
||||
|
||||
func New(baseurl string, key *rsa.PrivateKey, keyPairId string) *CloudFront {
|
||||
return &CloudFront{
|
||||
BaseURL: baseurl,
|
||||
keyPairId: keyPairId,
|
||||
key: key,
|
||||
}
|
||||
}
|
||||
|
||||
type epochTime struct {
|
||||
EpochTime int64 `json:"AWS:EpochTime"`
|
||||
}
|
||||
|
||||
type condition struct {
|
||||
DateLessThan epochTime
|
||||
}
|
||||
|
||||
type statement struct {
|
||||
Resource string
|
||||
Condition condition
|
||||
}
|
||||
|
||||
type policy struct {
|
||||
Statement []statement
|
||||
}
|
||||
|
||||
func buildPolicy(resource string, expireTime time.Time) ([]byte, error) {
|
||||
p := &policy{
|
||||
Statement: []statement{
|
||||
statement{
|
||||
Resource: resource,
|
||||
Condition: condition{
|
||||
DateLessThan: epochTime{
|
||||
EpochTime: expireTime.Truncate(time.Millisecond).Unix(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return json.Marshal(p)
|
||||
}
|
||||
|
||||
func (cf *CloudFront) generateSignature(policy []byte) (string, error) {
|
||||
hash := sha1.New()
|
||||
_, err := hash.Write(policy)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hashed := hash.Sum(nil)
|
||||
var signed []byte
|
||||
if cf.key.Validate() == nil {
|
||||
signed, err = rsa.SignPKCS1v15(nil, cf.key, crypto.SHA1, hashed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
signed = hashed
|
||||
}
|
||||
encoded := base64Replacer.Replace(base64.StdEncoding.EncodeToString(signed))
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// Creates a signed url using RSAwithSHA1 as specified by
|
||||
// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-signature
|
||||
func (cf *CloudFront) CannedSignedURL(path, queryString string, expires time.Time) (string, error) {
|
||||
resource := cf.BaseURL + path
|
||||
if queryString != "" {
|
||||
resource = path + "?" + queryString
|
||||
}
|
||||
|
||||
policy, err := buildPolicy(resource, expires)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signature, err := cf.generateSignature(policy)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TOOD: Do this once
|
||||
uri, err := url.Parse(cf.BaseURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
uri.RawQuery = queryString
|
||||
if queryString != "" {
|
||||
uri.RawQuery += "&"
|
||||
}
|
||||
|
||||
expireTime := expires.Truncate(time.Millisecond).Unix()
|
||||
|
||||
uri.Path = path
|
||||
uri.RawQuery += fmt.Sprintf("Expires=%d&Signature=%s&Key-Pair-Id=%s", expireTime, signature, cf.keyPairId)
|
||||
|
||||
return uri.String(), nil
|
||||
}
|
||||
|
||||
func (cloudfront *CloudFront) SignedURL(path, querystrings string, expires time.Time) string {
|
||||
policy := `{"Statement":[{"Resource":"` + path + "?" + querystrings + `,"Condition":{"DateLessThan":{"AWS:EpochTime":` + strconv.FormatInt(expires.Truncate(time.Millisecond).Unix(), 10) + `}}}]}`
|
||||
|
||||
hash := sha1.New()
|
||||
hash.Write([]byte(policy))
|
||||
b := hash.Sum(nil)
|
||||
he := base64.StdEncoding.EncodeToString(b)
|
||||
|
||||
policySha1 := he
|
||||
|
||||
url := cloudfront.BaseURL + path + "?" + querystrings + "&Expires=" + strconv.FormatInt(expires.Unix(), 10) + "&Signature=" + policySha1 + "&Key-Pair-Id=" + cloudfront.keyPairId
|
||||
|
||||
return url
|
||||
}
|
52
Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront_test.go
generated
vendored
Normal file
52
Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront_test.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package cloudfront
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSignedCannedURL(t *testing.T) {
|
||||
rawKey, err := ioutil.ReadFile("testdata/key.pem")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pemKey, _ := pem.Decode(rawKey)
|
||||
privateKey, err := x509.ParsePKCS1PrivateKey(pemKey.Bytes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cf := &CloudFront{
|
||||
key: privateKey,
|
||||
keyPairId: "test-key-pair-1231245",
|
||||
BaseURL: "https://cloudfront.com",
|
||||
}
|
||||
|
||||
expireTime, err := time.Parse(time.RFC3339, "2014-03-28T14:00:21Z")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
query := make(url.Values)
|
||||
query.Add("test", "value")
|
||||
|
||||
uri, err := cf.CannedSignedURL("test", "test=value", expireTime)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parsed, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
signature := parsed.Query().Get("Signature")
|
||||
if signature == "" {
|
||||
t.Fatal("Encoded signature is empty")
|
||||
}
|
||||
}
|
6
Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/testdata/key.pub
generated
vendored
Normal file
6
Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/testdata/key.pub
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
-----BEGIN PUBLIC KEY-----
|
||||
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC0yMzp9DkPAE99DhsEaGkqougL
|
||||
vtmDKri4bZj0fFjmGmjyyjz9hlrsr87LHVWzH/7igK7040HG1UqypX3ijtJa9+6B
|
||||
KHwBBctboU3y4GfwFwVAOumY9UytFpyPlgUFrffZLQAywKkT24OgcfEj0G5kiQn7
|
||||
60wFnmSUtOuITo708QIDAQAB
|
||||
-----END PUBLIC KEY-----
|
27
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/export_test.go
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/export_test.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
)
|
||||
|
||||
var originalStrategy = attempts
|
||||
|
||||
func SetAttemptStrategy(s *aws.AttemptStrategy) {
|
||||
if s == nil {
|
||||
attempts = originalStrategy
|
||||
} else {
|
||||
attempts = *s
|
||||
}
|
||||
}
|
||||
|
||||
func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) {
|
||||
sign(auth, method, path, params, headers)
|
||||
}
|
||||
|
||||
func SetListPartsMax(n int) {
|
||||
listPartsMax = n
|
||||
}
|
||||
|
||||
func SetListMultiMax(n int) {
|
||||
listMultiMax = n
|
||||
}
|
202
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle.go
generated
vendored
Normal file
202
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Implements an interface for s3 bucket lifecycle configuration
|
||||
// See goo.gl/d0bbDf for details.
|
||||
|
||||
const (
|
||||
LifecycleRuleStatusEnabled = "Enabled"
|
||||
LifecycleRuleStatusDisabled = "Disabled"
|
||||
LifecycleRuleDateFormat = "2006-01-02"
|
||||
StorageClassGlacier = "GLACIER"
|
||||
)
|
||||
|
||||
type Expiration struct {
|
||||
Days *uint `xml:"Days,omitempty"`
|
||||
Date string `xml:"Date,omitempty"`
|
||||
}
|
||||
|
||||
// Returns Date as a time.Time.
|
||||
func (r *Expiration) ParseDate() (time.Time, error) {
|
||||
return time.Parse(LifecycleRuleDateFormat, r.Date)
|
||||
}
|
||||
|
||||
type Transition struct {
|
||||
Days *uint `xml:"Days,omitempty"`
|
||||
Date string `xml:"Date,omitempty"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
// Returns Date as a time.Time.
|
||||
func (r *Transition) ParseDate() (time.Time, error) {
|
||||
return time.Parse(LifecycleRuleDateFormat, r.Date)
|
||||
}
|
||||
|
||||
type NoncurrentVersionExpiration struct {
|
||||
Days *uint `xml:"NoncurrentDays,omitempty"`
|
||||
}
|
||||
|
||||
type NoncurrentVersionTransition struct {
|
||||
Days *uint `xml:"NoncurrentDays,omitempty"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
type LifecycleRule struct {
|
||||
ID string `xml:"ID"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Status string `xml:"Status"`
|
||||
NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
|
||||
NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||
Transition *Transition `xml:"Transition,omitempty"`
|
||||
Expiration *Expiration `xml:"Expiration,omitempty"`
|
||||
}
|
||||
|
||||
// Create a lifecycle rule with arbitrary identifier id and object name prefix
|
||||
// for which the rules should apply.
|
||||
func NewLifecycleRule(id, prefix string) *LifecycleRule {
|
||||
rule := &LifecycleRule{
|
||||
ID: id,
|
||||
Prefix: prefix,
|
||||
Status: LifecycleRuleStatusEnabled,
|
||||
}
|
||||
return rule
|
||||
}
|
||||
|
||||
// Adds a transition rule in days. Overwrites any previous transition rule.
|
||||
func (r *LifecycleRule) SetTransitionDays(days uint) {
|
||||
r.Transition = &Transition{
|
||||
Days: &days,
|
||||
StorageClass: StorageClassGlacier,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a transition rule as a date. Overwrites any previous transition rule.
|
||||
func (r *LifecycleRule) SetTransitionDate(date time.Time) {
|
||||
r.Transition = &Transition{
|
||||
Date: date.Format(LifecycleRuleDateFormat),
|
||||
StorageClass: StorageClassGlacier,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an expiration rule in days. Overwrites any previous expiration rule.
|
||||
// Days must be > 0.
|
||||
func (r *LifecycleRule) SetExpirationDays(days uint) {
|
||||
r.Expiration = &Expiration{
|
||||
Days: &days,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an expiration rule as a date. Overwrites any previous expiration rule.
|
||||
func (r *LifecycleRule) SetExpirationDate(date time.Time) {
|
||||
r.Expiration = &Expiration{
|
||||
Date: date.Format(LifecycleRuleDateFormat),
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a noncurrent version transition rule. Overwrites any previous
|
||||
// noncurrent version transition rule.
|
||||
func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) {
|
||||
r.NoncurrentVersionTransition = &NoncurrentVersionTransition{
|
||||
Days: &days,
|
||||
StorageClass: StorageClassGlacier,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a noncurrent version expiration rule. Days must be > 0. Overwrites
|
||||
// any previous noncurrent version expiration rule.
|
||||
func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) {
|
||||
r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{
|
||||
Days: &days,
|
||||
}
|
||||
}
|
||||
|
||||
// Marks the rule as disabled.
|
||||
func (r *LifecycleRule) Disable() {
|
||||
r.Status = LifecycleRuleStatusDisabled
|
||||
}
|
||||
|
||||
// Marks the rule as enabled (default).
|
||||
func (r *LifecycleRule) Enable() {
|
||||
r.Status = LifecycleRuleStatusEnabled
|
||||
}
|
||||
|
||||
type LifecycleConfiguration struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules *[]*LifecycleRule `xml:"Rule,omitempty"`
|
||||
}
|
||||
|
||||
// Adds a LifecycleRule to the configuration.
|
||||
func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) {
|
||||
var rules []*LifecycleRule
|
||||
if c.Rules != nil {
|
||||
rules = *c.Rules
|
||||
}
|
||||
rules = append(rules, r)
|
||||
c.Rules = &rules
|
||||
}
|
||||
|
||||
// Sets the bucket's lifecycle configuration.
|
||||
func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error {
|
||||
doc, err := xml.Marshal(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := makeXmlBuffer(doc)
|
||||
digest := md5.New()
|
||||
size, err := digest.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string][]string{
|
||||
"Content-Length": {strconv.FormatInt(int64(size), 10)},
|
||||
"Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))},
|
||||
}
|
||||
|
||||
req := &request{
|
||||
path: "/",
|
||||
method: "PUT",
|
||||
bucket: b.Name,
|
||||
headers: headers,
|
||||
payload: buf,
|
||||
params: url.Values{"lifecycle": {""}},
|
||||
}
|
||||
|
||||
return b.S3.queryV4Sign(req, nil)
|
||||
}
|
||||
|
||||
// Retrieves the lifecycle configuration for the bucket. AWS returns an error
|
||||
// if no lifecycle found.
|
||||
func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) {
|
||||
req := &request{
|
||||
method: "GET",
|
||||
bucket: b.Name,
|
||||
path: "/",
|
||||
params: url.Values{"lifecycle": {""}},
|
||||
}
|
||||
|
||||
conf := &LifecycleConfiguration{}
|
||||
err := b.S3.queryV4Sign(req, conf)
|
||||
return conf, err
|
||||
}
|
||||
|
||||
// Delete the bucket's lifecycle configuration.
|
||||
func (b *Bucket) DeleteLifecycleConfiguration() error {
|
||||
req := &request{
|
||||
method: "DELETE",
|
||||
bucket: b.Name,
|
||||
path: "/",
|
||||
params: url.Values{"lifecycle": {""}},
|
||||
}
|
||||
|
||||
return b.S3.queryV4Sign(req, nil)
|
||||
}
|
205
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle_test.go
generated
vendored
Normal file
205
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle_test.go
generated
vendored
Normal file
|
@ -0,0 +1,205 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"gopkg.in/check.v1"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *S) TestLifecycleConfiguration(c *check.C) {
|
||||
date, err := time.Parse(s3.LifecycleRuleDateFormat, "2014-09-10")
|
||||
c.Check(err, check.IsNil)
|
||||
|
||||
conf := &s3.LifecycleConfiguration{}
|
||||
|
||||
rule := s3.NewLifecycleRule("transition-days", "/")
|
||||
rule.SetTransitionDays(7)
|
||||
conf.AddRule(rule)
|
||||
|
||||
rule = s3.NewLifecycleRule("transition-date", "/")
|
||||
rule.SetTransitionDate(date)
|
||||
conf.AddRule(rule)
|
||||
|
||||
rule = s3.NewLifecycleRule("expiration-days", "")
|
||||
rule.SetExpirationDays(1)
|
||||
conf.AddRule(rule)
|
||||
|
||||
rule = s3.NewLifecycleRule("expiration-date", "")
|
||||
rule.SetExpirationDate(date)
|
||||
conf.AddRule(rule)
|
||||
|
||||
rule = s3.NewLifecycleRule("noncurrent-transition", "")
|
||||
rule.SetNoncurrentVersionTransitionDays(11)
|
||||
conf.AddRule(rule)
|
||||
|
||||
rule = s3.NewLifecycleRule("noncurrent-expiration", "")
|
||||
rule.SetNoncurrentVersionExpirationDays(1011)
|
||||
|
||||
// Test Disable() and Enable() toggling
|
||||
c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusEnabled)
|
||||
rule.Disable()
|
||||
c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusDisabled)
|
||||
rule.Enable()
|
||||
c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusEnabled)
|
||||
rule.Disable()
|
||||
c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusDisabled)
|
||||
|
||||
conf.AddRule(rule)
|
||||
|
||||
doc, err := xml.MarshalIndent(conf, "", " ")
|
||||
c.Check(err, check.IsNil)
|
||||
|
||||
expectedDoc := `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>transition-days</ID>
|
||||
<Prefix>/</Prefix>
|
||||
<Status>Enabled</Status>
|
||||
<Transition>
|
||||
<Days>7</Days>
|
||||
<StorageClass>GLACIER</StorageClass>
|
||||
</Transition>
|
||||
</Rule>
|
||||
<Rule>
|
||||
<ID>transition-date</ID>
|
||||
<Prefix>/</Prefix>
|
||||
<Status>Enabled</Status>
|
||||
<Transition>
|
||||
<Date>2014-09-10</Date>
|
||||
<StorageClass>GLACIER</StorageClass>
|
||||
</Transition>
|
||||
</Rule>
|
||||
<Rule>
|
||||
<ID>expiration-days</ID>
|
||||
<Prefix></Prefix>
|
||||
<Status>Enabled</Status>
|
||||
<Expiration>
|
||||
<Days>1</Days>
|
||||
</Expiration>
|
||||
</Rule>
|
||||
<Rule>
|
||||
<ID>expiration-date</ID>
|
||||
<Prefix></Prefix>
|
||||
<Status>Enabled</Status>
|
||||
<Expiration>
|
||||
<Date>2014-09-10</Date>
|
||||
</Expiration>
|
||||
</Rule>
|
||||
<Rule>
|
||||
<ID>noncurrent-transition</ID>
|
||||
<Prefix></Prefix>
|
||||
<Status>Enabled</Status>
|
||||
<NoncurrentVersionTransition>
|
||||
<NoncurrentDays>11</NoncurrentDays>
|
||||
<StorageClass>GLACIER</StorageClass>
|
||||
</NoncurrentVersionTransition>
|
||||
</Rule>
|
||||
<Rule>
|
||||
<ID>noncurrent-expiration</ID>
|
||||
<Prefix></Prefix>
|
||||
<Status>Disabled</Status>
|
||||
<NoncurrentVersionExpiration>
|
||||
<NoncurrentDays>1011</NoncurrentDays>
|
||||
</NoncurrentVersionExpiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`
|
||||
|
||||
c.Check(string(doc), check.Equals, expectedDoc)
|
||||
|
||||
// Unmarshalling test
|
||||
conf2 := &s3.LifecycleConfiguration{}
|
||||
err = xml.Unmarshal(doc, conf2)
|
||||
c.Check(err, check.IsNil)
|
||||
s.checkLifecycleConfigurationEqual(c, conf, conf2)
|
||||
}
|
||||
|
||||
func (s *S) checkLifecycleConfigurationEqual(c *check.C, conf, conf2 *s3.LifecycleConfiguration) {
|
||||
c.Check(len(*conf2.Rules), check.Equals, len(*conf.Rules))
|
||||
for i, rule := range *conf2.Rules {
|
||||
confRules := *conf.Rules
|
||||
c.Check(rule, check.DeepEquals, confRules[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) checkLifecycleRequest(c *check.C, req *http.Request) {
|
||||
// ?lifecycle= is the only query param
|
||||
v, ok := req.Form["lifecycle"]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
c.Assert(v, check.HasLen, 1)
|
||||
c.Assert(v[0], check.Equals, "")
|
||||
|
||||
c.Assert(req.Header["X-Amz-Date"], check.HasLen, 1)
|
||||
c.Assert(req.Header["X-Amz-Date"][0], check.Not(check.Equals), "")
|
||||
|
||||
// Lifecycle methods require V4 auth
|
||||
usesV4 := strings.HasPrefix(req.Header["Authorization"][0], "AWS4-HMAC-SHA256")
|
||||
c.Assert(usesV4, check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *S) TestPutLifecycleConfiguration(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
conf := &s3.LifecycleConfiguration{}
|
||||
rule := s3.NewLifecycleRule("id", "")
|
||||
rule.SetTransitionDays(7)
|
||||
conf.AddRule(rule)
|
||||
|
||||
doc, err := xml.Marshal(conf)
|
||||
c.Check(err, check.IsNil)
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err = b.PutLifecycleConfiguration(conf)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/")
|
||||
c.Assert(req.Header["Content-Md5"], check.HasLen, 1)
|
||||
c.Assert(req.Header["Content-Md5"][0], check.Not(check.Equals), "")
|
||||
s.checkLifecycleRequest(c, req)
|
||||
|
||||
// Check we sent the correct xml serialization
|
||||
data, err := ioutil.ReadAll(req.Body)
|
||||
req.Body.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
header := "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
|
||||
c.Assert(string(data), check.Equals, header+string(doc))
|
||||
}
|
||||
|
||||
func (s *S) TestGetLifecycleConfiguration(c *check.C) {
|
||||
conf := &s3.LifecycleConfiguration{}
|
||||
rule := s3.NewLifecycleRule("id", "")
|
||||
rule.SetTransitionDays(7)
|
||||
conf.AddRule(rule)
|
||||
|
||||
doc, err := xml.Marshal(conf)
|
||||
c.Check(err, check.IsNil)
|
||||
|
||||
testServer.Response(200, nil, string(doc))
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
conf2, err := b.GetLifecycleConfiguration()
|
||||
c.Check(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/")
|
||||
s.checkLifecycleRequest(c, req)
|
||||
s.checkLifecycleConfigurationEqual(c, conf, conf2)
|
||||
}
|
||||
|
||||
func (s *S) TestDeleteLifecycleConfiguration(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.DeleteLifecycleConfiguration()
|
||||
c.Check(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "DELETE")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/")
|
||||
s.checkLifecycleRequest(c, req)
|
||||
}
|
508
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi.go
generated
vendored
Normal file
508
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi.go
generated
vendored
Normal file
|
@ -0,0 +1,508 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Multi represents an unfinished multipart upload.
|
||||
//
|
||||
// Multipart uploads allow sending big objects in smaller chunks.
|
||||
// After all parts have been sent, the upload must be explicitly
|
||||
// completed by calling Complete with the list of parts.
|
||||
//
|
||||
// See http://goo.gl/vJfTG for an overview of multipart uploads.
|
||||
type Multi struct {
|
||||
Bucket *Bucket
|
||||
Key string
|
||||
UploadId string
|
||||
}
|
||||
|
||||
// That's the default. Here just for testing.
|
||||
var listMultiMax = 1000
|
||||
|
||||
type listMultiResp struct {
|
||||
NextKeyMarker string
|
||||
NextUploadIdMarker string
|
||||
IsTruncated bool
|
||||
Upload []Multi
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
|
||||
}
|
||||
|
||||
// ListMulti returns the list of unfinished multipart uploads in b.
|
||||
//
|
||||
// The prefix parameter limits the response to keys that begin with the
|
||||
// specified prefix. You can use prefixes to separate a bucket into different
|
||||
// groupings of keys (to get the feeling of folders, for example).
|
||||
//
|
||||
// The delim parameter causes the response to group all of the keys that
|
||||
// share a common prefix up to the next delimiter in a single entry within
|
||||
// the CommonPrefixes field. You can use delimiters to separate a bucket
|
||||
// into different groupings of keys, similar to how folders would work.
|
||||
//
|
||||
// See http://goo.gl/ePioY for details.
|
||||
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
|
||||
params := map[string][]string{
|
||||
"uploads": {""},
|
||||
"max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
|
||||
"prefix": {prefix},
|
||||
"delimiter": {delim},
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "GET",
|
||||
bucket: b.Name,
|
||||
params: params,
|
||||
}
|
||||
var resp listMultiResp
|
||||
err := b.S3.query(req, &resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for i := range resp.Upload {
|
||||
multi := &resp.Upload[i]
|
||||
multi.Bucket = b
|
||||
multis = append(multis, multi)
|
||||
}
|
||||
prefixes = append(prefixes, resp.CommonPrefixes...)
|
||||
if !resp.IsTruncated {
|
||||
return multis, prefixes, nil
|
||||
}
|
||||
params["key-marker"] = []string{resp.NextKeyMarker}
|
||||
params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
|
||||
attempt = attempts.Start() // Last request worked.
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Multi returns a multipart upload handler for the provided key
|
||||
// inside b. If a multipart upload exists for key, it is returned,
|
||||
// otherwise a new multipart upload is initiated with contType and perm.
|
||||
func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) {
|
||||
multis, _, err := b.ListMulti(key, "")
|
||||
if err != nil && !hasCode(err, "NoSuchUpload") {
|
||||
return nil, err
|
||||
}
|
||||
for _, m := range multis {
|
||||
if m.Key == key {
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
return b.InitMulti(key, contType, perm, options)
|
||||
}
|
||||
|
||||
// InitMulti initializes a new multipart upload at the provided
|
||||
// key inside b and returns a value for manipulating it.
|
||||
//
|
||||
// See http://goo.gl/XP8kL for details.
|
||||
func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) {
|
||||
headers := map[string][]string{
|
||||
"Content-Type": {contType},
|
||||
"Content-Length": {"0"},
|
||||
"x-amz-acl": {string(perm)},
|
||||
}
|
||||
options.addHeaders(headers)
|
||||
params := map[string][]string{
|
||||
"uploads": {""},
|
||||
}
|
||||
req := &request{
|
||||
method: "POST",
|
||||
bucket: b.Name,
|
||||
path: key,
|
||||
headers: headers,
|
||||
params: params,
|
||||
}
|
||||
var err error
|
||||
var resp struct {
|
||||
UploadId string `xml:"UploadId"`
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
err = b.S3.query(req, &resp)
|
||||
if !shouldRetry(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
|
||||
}
|
||||
|
||||
func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) {
|
||||
headers := map[string][]string{
|
||||
"x-amz-copy-source": {url.QueryEscape(source)},
|
||||
}
|
||||
options.addHeaders(headers)
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
"partNumber": {strconv.FormatInt(int64(n), 10)},
|
||||
}
|
||||
|
||||
sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/"))
|
||||
sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil)
|
||||
if err != nil {
|
||||
return nil, Part{}, err
|
||||
}
|
||||
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "PUT",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
headers: headers,
|
||||
params: params,
|
||||
}
|
||||
resp := &CopyObjectResult{}
|
||||
err = m.Bucket.S3.query(req, resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, Part{}, err
|
||||
}
|
||||
if resp.ETag == "" {
|
||||
return nil, Part{}, errors.New("part upload succeeded with no ETag")
|
||||
}
|
||||
return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// PutPart sends part n of the multipart upload, reading all the content from r.
|
||||
// Each part, except for the last one, must be at least 5MB in size.
|
||||
//
|
||||
// See http://goo.gl/pqZer for details.
|
||||
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
|
||||
partSize, _, md5b64, err := seekerInfo(r)
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
return m.putPart(n, r, partSize, md5b64)
|
||||
}
|
||||
|
||||
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
|
||||
headers := map[string][]string{
|
||||
"Content-Length": {strconv.FormatInt(partSize, 10)},
|
||||
"Content-MD5": {md5b64},
|
||||
}
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
"partNumber": {strconv.FormatInt(int64(n), 10)},
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
_, err := r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
req := &request{
|
||||
method: "PUT",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
headers: headers,
|
||||
params: params,
|
||||
payload: r,
|
||||
}
|
||||
err = m.Bucket.S3.prepare(req)
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
resp, err := m.Bucket.S3.run(req, nil)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
etag := resp.Header.Get("ETag")
|
||||
if etag == "" {
|
||||
return Part{}, errors.New("part upload succeeded with no ETag")
|
||||
}
|
||||
return Part{n, etag, partSize}, nil
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
|
||||
_, err = r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return 0, "", "", err
|
||||
}
|
||||
digest := md5.New()
|
||||
size, err = io.Copy(digest, r)
|
||||
if err != nil {
|
||||
return 0, "", "", err
|
||||
}
|
||||
sum := digest.Sum(nil)
|
||||
md5hex = hex.EncodeToString(sum)
|
||||
md5b64 = base64.StdEncoding.EncodeToString(sum)
|
||||
return size, md5hex, md5b64, nil
|
||||
}
|
||||
|
||||
type Part struct {
|
||||
N int `xml:"PartNumber"`
|
||||
ETag string
|
||||
Size int64
|
||||
}
|
||||
|
||||
type partSlice []Part
|
||||
|
||||
func (s partSlice) Len() int { return len(s) }
|
||||
func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
|
||||
func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
type listPartsResp struct {
|
||||
NextPartNumberMarker string
|
||||
IsTruncated bool
|
||||
Part []Part
|
||||
}
|
||||
|
||||
// That's the default. Here just for testing.
|
||||
var listPartsMax = 1000
|
||||
|
||||
// Kept for backcompatability. See the documentation for ListPartsFull
|
||||
func (m *Multi) ListParts() ([]Part, error) {
|
||||
return m.ListPartsFull(0, listPartsMax)
|
||||
}
|
||||
|
||||
// ListParts returns the list of previously uploaded parts in m,
|
||||
// ordered by part number (Only parts with higher part numbers than
|
||||
// partNumberMarker will be listed). Only up to maxParts parts will be
|
||||
// returned.
|
||||
//
|
||||
// See http://goo.gl/ePioY for details.
|
||||
func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) {
|
||||
if maxParts > listPartsMax {
|
||||
maxParts = listPartsMax
|
||||
}
|
||||
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
"max-parts": {strconv.FormatInt(int64(maxParts), 10)},
|
||||
"part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)},
|
||||
}
|
||||
var parts partSlice
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "GET",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
params: params,
|
||||
}
|
||||
var resp listPartsResp
|
||||
err := m.Bucket.S3.query(req, &resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parts = append(parts, resp.Part...)
|
||||
if !resp.IsTruncated {
|
||||
sort.Sort(parts)
|
||||
return parts, nil
|
||||
}
|
||||
params["part-number-marker"] = []string{resp.NextPartNumberMarker}
|
||||
attempt = attempts.Start() // Last request worked.
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
type ReaderAtSeeker interface {
|
||||
io.ReaderAt
|
||||
io.ReadSeeker
|
||||
}
|
||||
|
||||
// PutAll sends all of r via a multipart upload with parts no larger
|
||||
// than partSize bytes, which must be set to at least 5MB.
|
||||
// Parts previously uploaded are either reused if their checksum
|
||||
// and size match the new part, or otherwise overwritten with the
|
||||
// new content.
|
||||
// PutAll returns all the parts of m (reused or not).
|
||||
func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
|
||||
old, err := m.ListParts()
|
||||
if err != nil && !hasCode(err, "NoSuchUpload") {
|
||||
return nil, err
|
||||
}
|
||||
reuse := 0 // Index of next old part to consider reusing.
|
||||
current := 1 // Part number of latest good part handled.
|
||||
totalSize, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
first := true // Must send at least one empty part if the file is empty.
|
||||
var result []Part
|
||||
NextSection:
|
||||
for offset := int64(0); offset < totalSize || first; offset += partSize {
|
||||
first = false
|
||||
if offset+partSize > totalSize {
|
||||
partSize = totalSize - offset
|
||||
}
|
||||
section := io.NewSectionReader(r, offset, partSize)
|
||||
_, md5hex, md5b64, err := seekerInfo(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for reuse < len(old) && old[reuse].N <= current {
|
||||
// Looks like this part was already sent.
|
||||
part := &old[reuse]
|
||||
etag := `"` + md5hex + `"`
|
||||
if part.N == current && part.Size == partSize && part.ETag == etag {
|
||||
// Checksum matches. Reuse the old part.
|
||||
result = append(result, *part)
|
||||
current++
|
||||
continue NextSection
|
||||
}
|
||||
reuse++
|
||||
}
|
||||
|
||||
// Part wasn't found or doesn't match. Send it.
|
||||
part, err := m.putPart(current, section, partSize, md5b64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, part)
|
||||
current++
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type completeUpload struct {
|
||||
XMLName xml.Name `xml:"CompleteMultipartUpload"`
|
||||
Parts completeParts `xml:"Part"`
|
||||
}
|
||||
|
||||
type completePart struct {
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
|
||||
type completeParts []completePart
|
||||
|
||||
func (p completeParts) Len() int { return len(p) }
|
||||
func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
|
||||
func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// We can't know in advance whether we'll have an Error or a
|
||||
// CompleteMultipartUploadResult, so this structure is just a placeholder to
|
||||
// know the name of the XML object.
|
||||
type completeUploadResp struct {
|
||||
XMLName xml.Name
|
||||
InnerXML string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// Complete assembles the given previously uploaded parts into the
|
||||
// final object. This operation may take several minutes.
|
||||
//
|
||||
// See http://goo.gl/2Z7Tw for details.
|
||||
func (m *Multi) Complete(parts []Part) error {
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
}
|
||||
c := completeUpload{}
|
||||
for _, p := range parts {
|
||||
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
|
||||
}
|
||||
sort.Sort(c.Parts)
|
||||
data, err := xml.Marshal(&c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "POST",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
params: params,
|
||||
payload: bytes.NewReader(data),
|
||||
}
|
||||
var resp completeUploadResp
|
||||
if m.Bucket.Region.Name == "generic" {
|
||||
headers := make(http.Header)
|
||||
headers.Add("Content-Length", strconv.FormatInt(int64(len(data)), 10))
|
||||
req.headers = headers
|
||||
}
|
||||
err := m.Bucket.S3.query(req, &resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// A 200 error code does not guarantee that there were no errors (see
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html ),
|
||||
// so first figure out what kind of XML "object" we are dealing with.
|
||||
|
||||
if resp.XMLName.Local == "Error" {
|
||||
// S3.query does the unmarshalling for us, so we can't unmarshal
|
||||
// again in a different struct... So we need to duct-tape back the
|
||||
// original XML back together.
|
||||
fullErrorXml := "<Error>" + resp.InnerXML + "</Error>"
|
||||
s3err := &Error{}
|
||||
|
||||
if err := xml.Unmarshal([]byte(fullErrorXml), s3err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s3err
|
||||
}
|
||||
|
||||
if resp.XMLName.Local == "CompleteMultipartUploadResult" {
|
||||
// FIXME: One could probably add a CompleteFull method returning the
|
||||
// actual contents of the CompleteMultipartUploadResult object.
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("Invalid XML struct returned: " + resp.XMLName.Local)
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Abort deletes an unifinished multipart upload and any previously
|
||||
// uploaded parts for it.
|
||||
//
|
||||
// After a multipart upload is aborted, no additional parts can be
|
||||
// uploaded using it. However, if any part uploads are currently in
|
||||
// progress, those part uploads might or might not succeed. As a result,
|
||||
// it might be necessary to abort a given multipart upload multiple
|
||||
// times in order to completely free all storage consumed by all parts.
|
||||
//
|
||||
// NOTE: If the described scenario happens to you, please report back to
|
||||
// the goamz authors with details. In the future such retrying should be
|
||||
// handled internally, but it's not clear what happens precisely (Is an
|
||||
// error returned? Is the issue completely undetectable?).
|
||||
//
|
||||
// See http://goo.gl/dnyJw for details.
|
||||
func (m *Multi) Abort() error {
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "DELETE",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
params: params,
|
||||
}
|
||||
err := m.Bucket.S3.query(req, nil)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
480
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi_test.go
generated
vendored
Normal file
480
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi_test.go
generated
vendored
Normal file
|
@ -0,0 +1,480 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *S) TestInitMulti(c *check.C) {
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
metadata := make(map[string][]string)
|
||||
metadata["key1"] = []string{"value1"}
|
||||
metadata["key2"] = []string{"value2"}
|
||||
options := s3.Options{
|
||||
SSE: true,
|
||||
Meta: metadata,
|
||||
ContentEncoding: "text/utf8",
|
||||
CacheControl: "no-cache",
|
||||
RedirectLocation: "http://github.com/AdRoll/goamz",
|
||||
ContentMD5: "0000000000000000",
|
||||
}
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, options)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "POST")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"text/plain"})
|
||||
c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
|
||||
c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
|
||||
|
||||
c.Assert(req.Header["X-Amz-Server-Side-Encryption"], check.DeepEquals, []string{"AES256"})
|
||||
c.Assert(req.Header["Content-Encoding"], check.DeepEquals, []string{"text/utf8"})
|
||||
c.Assert(req.Header["Cache-Control"], check.DeepEquals, []string{"no-cache"})
|
||||
c.Assert(req.Header["Content-Md5"], check.DeepEquals, []string{"0000000000000000"})
|
||||
c.Assert(req.Header["X-Amz-Website-Redirect-Location"], check.DeepEquals, []string{"http://github.com/AdRoll/goamz"})
|
||||
c.Assert(req.Header["X-Amz-Meta-Key1"], check.DeepEquals, []string{"value1"})
|
||||
c.Assert(req.Header["X-Amz-Meta-Key2"], check.DeepEquals, []string{"value2"})
|
||||
|
||||
c.Assert(multi.UploadId, check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
}
|
||||
|
||||
func (s *S) TestMultiNoPreviousUpload(c *check.C) {
|
||||
// Don't retry the NoSuchUpload error.
|
||||
s.DisableRetries()
|
||||
|
||||
testServer.Response(404, nil, NoSuchUploadErrorDump)
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.Multi("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/")
|
||||
c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
|
||||
c.Assert(req.Form["prefix"], check.DeepEquals, []string{"multi"})
|
||||
|
||||
req = testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "POST")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
|
||||
|
||||
c.Assert(multi.UploadId, check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
}
|
||||
|
||||
func (s *S) TestMultiReturnOld(c *check.C) {
|
||||
testServer.Response(200, nil, ListMultiResultDump)
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.Multi("multi1", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(multi.Key, check.Equals, "multi1")
|
||||
c.Assert(multi.UploadId, check.Equals, "iUVug89pPvSswrikD")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/")
|
||||
c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
|
||||
c.Assert(req.Form["prefix"], check.DeepEquals, []string{"multi1"})
|
||||
}
|
||||
|
||||
func (s *S) TestListParts(c *check.C) {
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(200, nil, ListPartsResultDump1)
|
||||
testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
|
||||
testServer.Response(200, nil, ListPartsResultDump2)
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
parts, err := multi.ListParts()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(parts, check.HasLen, 3)
|
||||
c.Assert(parts[0].N, check.Equals, 1)
|
||||
c.Assert(parts[0].Size, check.Equals, int64(5))
|
||||
c.Assert(parts[0].ETag, check.Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
|
||||
c.Assert(parts[1].N, check.Equals, 2)
|
||||
c.Assert(parts[1].Size, check.Equals, int64(5))
|
||||
c.Assert(parts[1].ETag, check.Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
|
||||
c.Assert(parts[2].N, check.Equals, 3)
|
||||
c.Assert(parts[2].Size, check.Equals, int64(5))
|
||||
c.Assert(parts[2].ETag, check.Equals, `"49dcd91231f801159e893fb5c6674985"`)
|
||||
testServer.WaitRequest()
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
c.Assert(req.Form["max-parts"], check.DeepEquals, []string{"1000"})
|
||||
|
||||
testServer.WaitRequest() // The internal error.
|
||||
req = testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
c.Assert(req.Form["max-parts"], check.DeepEquals, []string{"1000"})
|
||||
c.Assert(req.Form["part-number-marker"], check.DeepEquals, []string{"2"})
|
||||
}
|
||||
|
||||
func (s *S) TestPutPart(c *check.C) {
|
||||
headers := map[string]string{
|
||||
"ETag": `"26f90efd10d614f100252ff56d88dad8"`,
|
||||
}
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(200, headers, "")
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(part.N, check.Equals, 1)
|
||||
c.Assert(part.Size, check.Equals, int64(8))
|
||||
c.Assert(part.ETag, check.Equals, headers["ETag"])
|
||||
|
||||
testServer.WaitRequest()
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"8"})
|
||||
c.Assert(req.Header["Content-Md5"], check.DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
|
||||
}
|
||||
|
||||
func (s *S) TestPutPartCopy(c *check.C) {
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
// PutPartCopy makes a Head request internally to verify access to the source object
|
||||
// and obtain its size
|
||||
testServer.Response(200, nil, "content")
|
||||
testServer.Response(200, nil, PutCopyResultDump)
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
res, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, "source-bucket/\u00FCber-fil\u00E9.jpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(part.N, check.Equals, 1)
|
||||
c.Assert(part.Size, check.Equals, int64(7))
|
||||
c.Assert(res, check.DeepEquals, &s3.CopyObjectResult{
|
||||
ETag: `"9b2cf535f27731c974343645a3985328"`,
|
||||
LastModified: `2009-10-28T22:32:00`})
|
||||
|
||||
// Verify the Head request
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "POST")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
testServer.WaitRequest()
|
||||
req = testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
|
||||
c.Assert(req.Header["X-Amz-Copy-Source"], check.DeepEquals, []string{`source-bucket%2F%C3%BCber-fil%C3%A9.jpg`})
|
||||
}
|
||||
|
||||
func readAll(r io.Reader) string {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func (s *S) TestPutAllNoPreviousUpload(c *check.C) {
|
||||
// Don't retry the NoSuchUpload error.
|
||||
s.DisableRetries()
|
||||
|
||||
etag1 := map[string]string{"ETag": `"etag1"`}
|
||||
etag2 := map[string]string{"ETag": `"etag2"`}
|
||||
etag3 := map[string]string{"ETag": `"etag3"`}
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(404, nil, NoSuchUploadErrorDump)
|
||||
testServer.Response(200, etag1, "")
|
||||
testServer.Response(200, etag2, "")
|
||||
testServer.Response(200, etag3, "")
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
|
||||
c.Assert(parts, check.HasLen, 3)
|
||||
c.Assert(parts[0].ETag, check.Equals, `"etag1"`)
|
||||
c.Assert(parts[1].ETag, check.Equals, `"etag2"`)
|
||||
c.Assert(parts[2].ETag, check.Equals, `"etag3"`)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Init
|
||||
testServer.WaitRequest()
|
||||
|
||||
// List old parts. Won't find anything.
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
|
||||
// Send part 1.
|
||||
req = testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"})
|
||||
c.Assert(readAll(req.Body), check.Equals, "part1")
|
||||
|
||||
// Send part 2.
|
||||
req = testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"2"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"})
|
||||
c.Assert(readAll(req.Body), check.Equals, "part2")
|
||||
|
||||
// Send part 3 with shorter body.
|
||||
req = testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"3"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"4"})
|
||||
c.Assert(readAll(req.Body), check.Equals, "last")
|
||||
}
|
||||
|
||||
func (s *S) TestPutAllZeroSizeFile(c *check.C) {
|
||||
// Don't retry the NoSuchUpload error.
|
||||
s.DisableRetries()
|
||||
|
||||
etag1 := map[string]string{"ETag": `"etag1"`}
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(404, nil, NoSuchUploadErrorDump)
|
||||
testServer.Response(200, etag1, "")
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Must send at least one part, so that completing it will work.
|
||||
parts, err := multi.PutAll(strings.NewReader(""), 5)
|
||||
c.Assert(parts, check.HasLen, 1)
|
||||
c.Assert(parts[0].ETag, check.Equals, `"etag1"`)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Init
|
||||
testServer.WaitRequest()
|
||||
|
||||
// List old parts. Won't find anything.
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
|
||||
// Send empty part.
|
||||
req = testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"0"})
|
||||
c.Assert(readAll(req.Body), check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *S) TestPutAllResume(c *check.C) {
|
||||
etag2 := map[string]string{"ETag": `"etag2"`}
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(200, nil, ListPartsResultDump1)
|
||||
testServer.Response(200, nil, ListPartsResultDump2)
|
||||
testServer.Response(200, etag2, "")
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// "part1" and "part3" match the checksums in ResultDump1.
|
||||
// The middle one is a mismatch (it refers to "part2").
|
||||
parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
|
||||
c.Assert(parts, check.HasLen, 3)
|
||||
c.Assert(parts[0].N, check.Equals, 1)
|
||||
c.Assert(parts[0].Size, check.Equals, int64(5))
|
||||
c.Assert(parts[0].ETag, check.Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
|
||||
c.Assert(parts[1].N, check.Equals, 2)
|
||||
c.Assert(parts[1].Size, check.Equals, int64(5))
|
||||
c.Assert(parts[1].ETag, check.Equals, `"etag2"`)
|
||||
c.Assert(parts[2].N, check.Equals, 3)
|
||||
c.Assert(parts[2].Size, check.Equals, int64(5))
|
||||
c.Assert(parts[2].ETag, check.Equals, `"49dcd91231f801159e893fb5c6674985"`)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Init
|
||||
testServer.WaitRequest()
|
||||
|
||||
// List old parts, broken in two requests.
|
||||
for i := 0; i < 2; i++ {
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
}
|
||||
|
||||
// Send part 2, as it didn't match the checksum.
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"2"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"})
|
||||
c.Assert(readAll(req.Body), check.Equals, "partX")
|
||||
}
|
||||
|
||||
func (s *S) TestMultiComplete(c *check.C) {
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(200, nil, MultiCompleteDump)
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
testServer.WaitRequest()
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "POST")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
|
||||
var payload struct {
|
||||
XMLName xml.Name
|
||||
Part []struct {
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
}
|
||||
|
||||
dec := xml.NewDecoder(req.Body)
|
||||
err = dec.Decode(&payload)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(payload.XMLName.Local, check.Equals, "CompleteMultipartUpload")
|
||||
c.Assert(len(payload.Part), check.Equals, 2)
|
||||
c.Assert(payload.Part[0].PartNumber, check.Equals, 1)
|
||||
c.Assert(payload.Part[0].ETag, check.Equals, `"ETag1"`)
|
||||
c.Assert(payload.Part[1].PartNumber, check.Equals, 2)
|
||||
c.Assert(payload.Part[1].ETag, check.Equals, `"ETag2"`)
|
||||
}
|
||||
|
||||
func (s *S) TestMultiCompleteError(c *check.C) {
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
// Note the 200 response. Completing will hold the connection on some
|
||||
// kind of long poll, and may return a late error even after a 200.
|
||||
testServer.Response(200, nil, InternalErrorDump)
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
testServer.WaitRequest()
|
||||
testServer.WaitRequest()
|
||||
}
|
||||
|
||||
func (s *S) TestMultiAbort(c *check.C) {
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = multi.Abort()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
testServer.WaitRequest()
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "DELETE")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
}
|
||||
|
||||
func (s *S) TestListMulti(c *check.C) {
|
||||
testServer.Response(200, nil, ListMultiResultDump)
|
||||
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multis, prefixes, err := b.ListMulti("", "/")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(prefixes, check.DeepEquals, []string{"a/", "b/"})
|
||||
c.Assert(multis, check.HasLen, 2)
|
||||
c.Assert(multis[0].Key, check.Equals, "multi1")
|
||||
c.Assert(multis[0].UploadId, check.Equals, "iUVug89pPvSswrikD")
|
||||
c.Assert(multis[1].Key, check.Equals, "multi2")
|
||||
c.Assert(multis[1].UploadId, check.Equals, "DkirwsSvPp98guVUi")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/")
|
||||
c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
|
||||
c.Assert(req.Form["prefix"], check.DeepEquals, []string{""})
|
||||
c.Assert(req.Form["delimiter"], check.DeepEquals, []string{"/"})
|
||||
c.Assert(req.Form["max-uploads"], check.DeepEquals, []string{"1000"})
|
||||
}
|
||||
|
||||
func (s *S) TestMultiCompleteSupportRadosGW(c *check.C) {
|
||||
testServer.Response(200, nil, InitMultiResultDump)
|
||||
testServer.Response(200, nil, MultiCompleteDump)
|
||||
s.s3.Region.Name = "generic"
|
||||
b := s.s3.Bucket("sample")
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
testServer.WaitRequest()
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "POST")
|
||||
c.Assert(req.URL.Path, check.Equals, "/sample/multi")
|
||||
c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
|
||||
c.Assert(req.Header["Content-Length"], check.NotNil)
|
||||
|
||||
var payload struct {
|
||||
XMLName xml.Name
|
||||
Part []struct {
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
}
|
||||
|
||||
dec := xml.NewDecoder(req.Body)
|
||||
err = dec.Decode(&payload)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(payload.XMLName.Local, check.Equals, "CompleteMultipartUpload")
|
||||
c.Assert(len(payload.Part), check.Equals, 2)
|
||||
c.Assert(payload.Part[0].PartNumber, check.Equals, 1)
|
||||
c.Assert(payload.Part[0].ETag, check.Equals, `"ETag1"`)
|
||||
c.Assert(payload.Part[1].PartNumber, check.Equals, 2)
|
||||
c.Assert(payload.Part[1].ETag, check.Equals, `"ETag2"`)
|
||||
}
|
248
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/responses_test.go
generated
vendored
Normal file
248
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/responses_test.go
generated
vendored
Normal file
|
@ -0,0 +1,248 @@
|
|||
package s3_test
|
||||
|
||||
var PutCopyResultDump = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<CopyObjectResult>
|
||||
<LastModified>2009-10-28T22:32:00</LastModified>
|
||||
<ETag>"9b2cf535f27731c974343645a3985328"</ETag>
|
||||
</CopyObjectResult>
|
||||
`
|
||||
|
||||
var GetObjectErrorDump = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Error><Code>NoSuchBucket</Code><Message>The specified bucket does not exist</Message>
|
||||
<BucketName>non-existent-bucket</BucketName><RequestId>3F1B667FAD71C3D8</RequestId>
|
||||
<HostId>L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D</HostId></Error>
|
||||
`
|
||||
|
||||
var GetListResultDump1 = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
|
||||
<Name>quotes</Name>
|
||||
<Prefix>N</Prefix>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<Contents>
|
||||
<Key>Nelson</Key>
|
||||
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
|
||||
<ETag>"828ef3fdfa96f00ad9f27c383fc9ac7f"</ETag>
|
||||
<Size>5</Size>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<Owner>
|
||||
<ID>bcaf161ca5fb16fd081034f</ID>
|
||||
<DisplayName>webfile</DisplayName>
|
||||
</Owner>
|
||||
</Contents>
|
||||
<Contents>
|
||||
<Key>Neo</Key>
|
||||
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
|
||||
<ETag>"828ef3fdfa96f00ad9f27c383fc9ac7f"</ETag>
|
||||
<Size>4</Size>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<Owner>
|
||||
<ID>bcaf1ffd86a5fb16fd081034f</ID>
|
||||
<DisplayName>webfile</DisplayName>
|
||||
</Owner>
|
||||
</Contents>
|
||||
</ListBucketResult>
|
||||
`
|
||||
|
||||
var GetListResultDump2 = `
|
||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Name>example-bucket</Name>
|
||||
<Prefix>photos/2006/</Prefix>
|
||||
<Marker>some-marker</Marker>
|
||||
<MaxKeys>1000</MaxKeys>
|
||||
<Delimiter>/</Delimiter>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
|
||||
<CommonPrefixes>
|
||||
<Prefix>photos/2006/feb/</Prefix>
|
||||
</CommonPrefixes>
|
||||
<CommonPrefixes>
|
||||
<Prefix>photos/2006/jan/</Prefix>
|
||||
</CommonPrefixes>
|
||||
</ListBucketResult>
|
||||
`
|
||||
|
||||
var InitMultiResultDump = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Bucket>sample</Bucket>
|
||||
<Key>multi</Key>
|
||||
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
|
||||
</InitiateMultipartUploadResult>
|
||||
`
|
||||
|
||||
var ListPartsResultDump1 = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Bucket>sample</Bucket>
|
||||
<Key>multi</Key>
|
||||
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
|
||||
<Initiator>
|
||||
<ID>bb5c0f63b0b25f2d099c</ID>
|
||||
<DisplayName>joe</DisplayName>
|
||||
</Initiator>
|
||||
<Owner>
|
||||
<ID>bb5c0f63b0b25f2d099c</ID>
|
||||
<DisplayName>joe</DisplayName>
|
||||
</Owner>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<PartNumberMarker>0</PartNumberMarker>
|
||||
<NextPartNumberMarker>2</NextPartNumberMarker>
|
||||
<MaxParts>2</MaxParts>
|
||||
<IsTruncated>true</IsTruncated>
|
||||
<Part>
|
||||
<PartNumber>1</PartNumber>
|
||||
<LastModified>2013-01-30T13:45:51.000Z</LastModified>
|
||||
<ETag>"ffc88b4ca90a355f8ddba6b2c3b2af5c"</ETag>
|
||||
<Size>5</Size>
|
||||
</Part>
|
||||
<Part>
|
||||
<PartNumber>2</PartNumber>
|
||||
<LastModified>2013-01-30T13:45:52.000Z</LastModified>
|
||||
<ETag>"d067a0fa9dc61a6e7195ca99696b5a89"</ETag>
|
||||
<Size>5</Size>
|
||||
</Part>
|
||||
</ListPartsResult>
|
||||
`
|
||||
|
||||
var ListPartsResultDump2 = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Bucket>sample</Bucket>
|
||||
<Key>multi</Key>
|
||||
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
|
||||
<Initiator>
|
||||
<ID>bb5c0f63b0b25f2d099c</ID>
|
||||
<DisplayName>joe</DisplayName>
|
||||
</Initiator>
|
||||
<Owner>
|
||||
<ID>bb5c0f63b0b25f2d099c</ID>
|
||||
<DisplayName>joe</DisplayName>
|
||||
</Owner>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<PartNumberMarker>2</PartNumberMarker>
|
||||
<NextPartNumberMarker>3</NextPartNumberMarker>
|
||||
<MaxParts>2</MaxParts>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<Part>
|
||||
<PartNumber>3</PartNumber>
|
||||
<LastModified>2013-01-30T13:46:50.000Z</LastModified>
|
||||
<ETag>"49dcd91231f801159e893fb5c6674985"</ETag>
|
||||
<Size>5</Size>
|
||||
</Part>
|
||||
</ListPartsResult>
|
||||
`
|
||||
|
||||
var ListMultiResultDump = `
|
||||
<?xml version="1.0"?>
|
||||
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Bucket>goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a</Bucket>
|
||||
<KeyMarker/>
|
||||
<UploadIdMarker/>
|
||||
<NextKeyMarker>multi1</NextKeyMarker>
|
||||
<NextUploadIdMarker>iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--</NextUploadIdMarker>
|
||||
<Delimiter>/</Delimiter>
|
||||
<MaxUploads>1000</MaxUploads>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<Upload>
|
||||
<Key>multi1</Key>
|
||||
<UploadId>iUVug89pPvSswrikD</UploadId>
|
||||
<Initiator>
|
||||
<ID>bb5c0f63b0b25f2d0</ID>
|
||||
<DisplayName>gustavoniemeyer</DisplayName>
|
||||
</Initiator>
|
||||
<Owner>
|
||||
<ID>bb5c0f63b0b25f2d0</ID>
|
||||
<DisplayName>gustavoniemeyer</DisplayName>
|
||||
</Owner>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
|
||||
</Upload>
|
||||
<Upload>
|
||||
<Key>multi2</Key>
|
||||
<UploadId>DkirwsSvPp98guVUi</UploadId>
|
||||
<Initiator>
|
||||
<ID>bb5c0f63b0b25f2d0</ID>
|
||||
<DisplayName>joe</DisplayName>
|
||||
</Initiator>
|
||||
<Owner>
|
||||
<ID>bb5c0f63b0b25f2d0</ID>
|
||||
<DisplayName>joe</DisplayName>
|
||||
</Owner>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
|
||||
</Upload>
|
||||
<CommonPrefixes>
|
||||
<Prefix>a/</Prefix>
|
||||
</CommonPrefixes>
|
||||
<CommonPrefixes>
|
||||
<Prefix>b/</Prefix>
|
||||
</CommonPrefixes>
|
||||
</ListMultipartUploadsResult>
|
||||
`
|
||||
|
||||
var NoSuchUploadErrorDump = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Error>
|
||||
<Code>NoSuchUpload</Code>
|
||||
<Message>Not relevant</Message>
|
||||
<BucketName>sample</BucketName>
|
||||
<RequestId>3F1B667FAD71C3D8</RequestId>
|
||||
<HostId>kjhwqk</HostId>
|
||||
</Error>
|
||||
`
|
||||
|
||||
var MultiCompleteDump = `
|
||||
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>
|
||||
<Bucket>Example-Bucket</Bucket>
|
||||
<Key>Example-Object</Key>
|
||||
<ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>
|
||||
</CompleteMultipartUploadResult>
|
||||
`
|
||||
|
||||
var InternalErrorDump = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Error>
|
||||
<Code>InternalError</Code>
|
||||
<Message>Not relevant</Message>
|
||||
<BucketName>sample</BucketName>
|
||||
<RequestId>3F1B667FAD71C3D8</RequestId>
|
||||
<HostId>kjhwqk</HostId>
|
||||
</Error>
|
||||
`
|
||||
|
||||
var GetServiceDump = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
|
||||
<Owner>
|
||||
<ID>bcaf1ffd86f461ca5fb16fd081034f</ID>
|
||||
<DisplayName>webfile</DisplayName>
|
||||
</Owner>
|
||||
<Buckets>
|
||||
<Bucket>
|
||||
<Name>quotes</Name>
|
||||
<CreationDate>2006-02-03T16:45:09.000Z</CreationDate>
|
||||
</Bucket>
|
||||
<Bucket>
|
||||
<Name>samples</Name>
|
||||
<CreationDate>2006-02-03T16:41:58.000Z</CreationDate>
|
||||
</Bucket>
|
||||
</Buckets>
|
||||
</ListAllMyBucketsResult>
|
||||
`
|
||||
|
||||
var GetLocationUsStandard = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
|
||||
`
|
||||
|
||||
var GetLocationUsWest1 = `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">us-west-1</LocationConstraint>
|
||||
`
|
||||
|
||||
var BucketWebsiteConfigurationDump = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><RedirectAllRequestsTo><HostName>example.com</HostName></RedirectAllRequestsTo></WebsiteConfiguration>`
|
1298
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go
generated
vendored
Normal file
1298
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
513
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go
generated
vendored
Normal file
513
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go
generated
vendored
Normal file
|
@ -0,0 +1,513 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"github.com/AdRoll/goamz/testutil"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
type S struct {
|
||||
s3 *s3.S3
|
||||
}
|
||||
|
||||
var _ = check.Suite(&S{})
|
||||
|
||||
var testServer = testutil.NewHTTPServer()
|
||||
|
||||
func (s *S) SetUpSuite(c *check.C) {
|
||||
testServer.Start()
|
||||
auth := aws.Auth{AccessKey: "abc", SecretKey: "123"}
|
||||
s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
|
||||
}
|
||||
|
||||
func (s *S) TearDownSuite(c *check.C) {
|
||||
s3.SetAttemptStrategy(nil)
|
||||
}
|
||||
|
||||
func (s *S) SetUpTest(c *check.C) {
|
||||
attempts := aws.AttemptStrategy{
|
||||
Total: 300 * time.Millisecond,
|
||||
Delay: 100 * time.Millisecond,
|
||||
}
|
||||
s3.SetAttemptStrategy(&attempts)
|
||||
}
|
||||
|
||||
func (s *S) TearDownTest(c *check.C) {
|
||||
testServer.Flush()
|
||||
}
|
||||
|
||||
func (s *S) DisableRetries() {
|
||||
s3.SetAttemptStrategy(&aws.AttemptStrategy{})
|
||||
}
|
||||
|
||||
// PutBucket docs: http://goo.gl/kBTCu
|
||||
|
||||
func (s *S) TestPutBucket(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.PutBucket(s3.Private)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
}
|
||||
|
||||
// PutBucketWebsite docs: http://goo.gl/TpRlUy
|
||||
|
||||
func (s *S) TestPutBucketWebsite(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
config := s3.WebsiteConfiguration{
|
||||
RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{HostName: "example.com"},
|
||||
}
|
||||
err := b.PutBucketWebsite(config)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
req.Body.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(body), check.Equals, BucketWebsiteConfigurationDump)
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/")
|
||||
c.Assert(req.URL.RawQuery, check.Equals, "website=")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
}
|
||||
|
||||
// Head docs: http://bit.ly/17K1ylI
|
||||
|
||||
func (s *S) TestHead(c *check.C) {
|
||||
testServer.Response(200, nil, "content")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
resp, err := b.Head("name", nil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "HEAD")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(resp.ContentLength, check.FitsTypeOf, int64(0))
|
||||
c.Assert(resp, check.FitsTypeOf, &http.Response{})
|
||||
}
|
||||
|
||||
// DeleteBucket docs: http://goo.gl/GoBrY
|
||||
|
||||
func (s *S) TestDelBucket(c *check.C) {
|
||||
testServer.Response(204, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.DelBucket()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "DELETE")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
}
|
||||
|
||||
// GetObject docs: http://goo.gl/isCO7
|
||||
|
||||
func (s *S) TestGet(c *check.C) {
|
||||
testServer.Response(200, nil, "content")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
data, err := b.Get("name")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(data), check.Equals, "content")
|
||||
}
|
||||
|
||||
func (s *S) TestGetWithPlus(c *check.C) {
|
||||
testServer.Response(200, nil, "content")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
_, err := b.Get("has+plus")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(req.RequestURI, check.Equals, "http://localhost:4444/bucket/has%2Bplus")
|
||||
}
|
||||
|
||||
func (s *S) TestURL(c *check.C) {
|
||||
testServer.Response(200, nil, "content")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
url := b.URL("name")
|
||||
r, err := http.Get(url)
|
||||
c.Assert(err, check.IsNil)
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
r.Body.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(data), check.Equals, "content")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
}
|
||||
|
||||
func (s *S) TestGetReader(c *check.C) {
|
||||
testServer.Response(200, nil, "content")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
rc, err := b.GetReader("name")
|
||||
c.Assert(err, check.IsNil)
|
||||
data, err := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(data), check.Equals, "content")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
}
|
||||
|
||||
func (s *S) TestGetNotFound(c *check.C) {
|
||||
for i := 0; i < 10; i++ {
|
||||
testServer.Response(404, nil, GetObjectErrorDump)
|
||||
}
|
||||
|
||||
b := s.s3.Bucket("non-existent-bucket")
|
||||
data, err := b.Get("non-existent")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/non-existent-bucket/non-existent")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
|
||||
s3err, _ := err.(*s3.Error)
|
||||
c.Assert(s3err, check.NotNil)
|
||||
c.Assert(s3err.StatusCode, check.Equals, 404)
|
||||
c.Assert(s3err.BucketName, check.Equals, "non-existent-bucket")
|
||||
c.Assert(s3err.RequestId, check.Equals, "3F1B667FAD71C3D8")
|
||||
c.Assert(s3err.HostId, check.Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
|
||||
c.Assert(s3err.Code, check.Equals, "NoSuchBucket")
|
||||
c.Assert(s3err.Message, check.Equals, "The specified bucket does not exist")
|
||||
c.Assert(s3err.Error(), check.Equals, "The specified bucket does not exist")
|
||||
c.Assert(data, check.IsNil)
|
||||
}
|
||||
|
||||
// PutObject docs: http://goo.gl/FEBPD
|
||||
|
||||
func (s *S) TestPutObject(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
const DISPOSITION = "attachment; filename=\"0x1a2b3c.jpg\""
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{ContentDisposition: DISPOSITION})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
|
||||
c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"})
|
||||
c.Assert(req.Header["Content-Disposition"], check.DeepEquals, []string{DISPOSITION})
|
||||
//c.Assert(req.Header["Content-MD5"], gocheck.DeepEquals, "...")
|
||||
c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
|
||||
}
|
||||
|
||||
func (s *S) TestPutObjectReducedRedundancy(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{StorageClass: s3.ReducedRedundancy})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
|
||||
c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"})
|
||||
c.Assert(req.Header["X-Amz-Storage-Class"], check.DeepEquals, []string{"REDUCED_REDUNDANCY"})
|
||||
}
|
||||
|
||||
// PutCopy docs: http://goo.gl/mhEHtA
|
||||
func (s *S) TestPutCopy(c *check.C) {
|
||||
testServer.Response(200, nil, PutCopyResultDump)
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
res, err := b.PutCopy("name", s3.Private, s3.CopyOptions{},
|
||||
// 0xFC is ü - 0xE9 is é
|
||||
"source-bucket/\u00FCber-fil\u00E9.jpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(res, check.DeepEquals, &s3.CopyObjectResult{
|
||||
ETag: `"9b2cf535f27731c974343645a3985328"`,
|
||||
LastModified: `2009-10-28T22:32:00`})
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"0"})
|
||||
c.Assert(req.Header["X-Amz-Copy-Source"], check.DeepEquals, []string{`source-bucket%2F%C3%BCber-fil%C3%A9.jpg`})
|
||||
c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
|
||||
}
|
||||
|
||||
func (s *S) TestPutObjectReadTimeout(c *check.C) {
|
||||
s.s3.ReadTimeout = 50 * time.Millisecond
|
||||
defer func() {
|
||||
s.s3.ReadTimeout = 0
|
||||
}()
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
|
||||
|
||||
// Make sure that we get a timeout error.
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
// Set the response after the request times out so that the next request will work.
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
// This time set the response within our timeout period so that we expect the call
|
||||
// to return successfully.
|
||||
go func() {
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
testServer.Response(200, nil, "")
|
||||
}()
|
||||
err = b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *S) TestPutReader(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
buf := bytes.NewBufferString("content")
|
||||
err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "PUT")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
|
||||
c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"})
|
||||
c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"})
|
||||
//c.Assert(req.Header["Content-MD5"], gocheck.Equals, "...")
|
||||
c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
|
||||
}
|
||||
|
||||
// DelObject docs: http://goo.gl/APeTt
|
||||
|
||||
func (s *S) TestDelObject(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
err := b.Del("name")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "DELETE")
|
||||
c.Assert(req.URL.Path, check.Equals, "/bucket/name")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
}
|
||||
|
||||
func (s *S) TestDelMultiObjects(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
objects := []s3.Object{s3.Object{Key: "test"}}
|
||||
err := b.DelMulti(s3.Delete{
|
||||
Quiet: false,
|
||||
Objects: objects,
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "POST")
|
||||
c.Assert(req.URL.RawQuery, check.Equals, "delete=")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
c.Assert(req.Header["Content-MD5"], check.Not(check.Equals), "")
|
||||
c.Assert(req.Header["Content-Type"], check.Not(check.Equals), "")
|
||||
c.Assert(req.ContentLength, check.Not(check.Equals), "")
|
||||
}
|
||||
|
||||
// Bucket List Objects docs: http://goo.gl/YjQTc
|
||||
|
||||
func (s *S) TestList(c *check.C) {
|
||||
testServer.Response(200, nil, GetListResultDump1)
|
||||
|
||||
b := s.s3.Bucket("quotes")
|
||||
|
||||
data, err := b.List("N", "", "", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/quotes/")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
c.Assert(req.Form["prefix"], check.DeepEquals, []string{"N"})
|
||||
c.Assert(req.Form["delimiter"], check.DeepEquals, []string{""})
|
||||
c.Assert(req.Form["marker"], check.DeepEquals, []string{""})
|
||||
c.Assert(req.Form["max-keys"], check.DeepEquals, []string(nil))
|
||||
|
||||
c.Assert(data.Name, check.Equals, "quotes")
|
||||
c.Assert(data.Prefix, check.Equals, "N")
|
||||
c.Assert(data.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(data.Contents), check.Equals, 2)
|
||||
|
||||
c.Assert(data.Contents[0].Key, check.Equals, "Nelson")
|
||||
c.Assert(data.Contents[0].LastModified, check.Equals, "2006-01-01T12:00:00.000Z")
|
||||
c.Assert(data.Contents[0].ETag, check.Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
|
||||
c.Assert(data.Contents[0].Size, check.Equals, int64(5))
|
||||
c.Assert(data.Contents[0].StorageClass, check.Equals, "STANDARD")
|
||||
c.Assert(data.Contents[0].Owner.ID, check.Equals, "bcaf161ca5fb16fd081034f")
|
||||
c.Assert(data.Contents[0].Owner.DisplayName, check.Equals, "webfile")
|
||||
|
||||
c.Assert(data.Contents[1].Key, check.Equals, "Neo")
|
||||
c.Assert(data.Contents[1].LastModified, check.Equals, "2006-01-01T12:00:00.000Z")
|
||||
c.Assert(data.Contents[1].ETag, check.Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
|
||||
c.Assert(data.Contents[1].Size, check.Equals, int64(4))
|
||||
c.Assert(data.Contents[1].StorageClass, check.Equals, "STANDARD")
|
||||
c.Assert(data.Contents[1].Owner.ID, check.Equals, "bcaf1ffd86a5fb16fd081034f")
|
||||
c.Assert(data.Contents[1].Owner.DisplayName, check.Equals, "webfile")
|
||||
}
|
||||
|
||||
func (s *S) TestListWithDelimiter(c *check.C) {
|
||||
testServer.Response(200, nil, GetListResultDump2)
|
||||
|
||||
b := s.s3.Bucket("quotes")
|
||||
|
||||
data, err := b.List("photos/2006/", "/", "some-marker", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(req.Method, check.Equals, "GET")
|
||||
c.Assert(req.URL.Path, check.Equals, "/quotes/")
|
||||
c.Assert(req.Header["Date"], check.Not(check.Equals), "")
|
||||
c.Assert(req.Form["prefix"], check.DeepEquals, []string{"photos/2006/"})
|
||||
c.Assert(req.Form["delimiter"], check.DeepEquals, []string{"/"})
|
||||
c.Assert(req.Form["marker"], check.DeepEquals, []string{"some-marker"})
|
||||
c.Assert(req.Form["max-keys"], check.DeepEquals, []string{"1000"})
|
||||
|
||||
c.Assert(data.Name, check.Equals, "example-bucket")
|
||||
c.Assert(data.Prefix, check.Equals, "photos/2006/")
|
||||
c.Assert(data.Delimiter, check.Equals, "/")
|
||||
c.Assert(data.Marker, check.Equals, "some-marker")
|
||||
c.Assert(data.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(data.Contents), check.Equals, 0)
|
||||
c.Assert(data.CommonPrefixes, check.DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
|
||||
}
|
||||
|
||||
func (s *S) TestExists(c *check.C) {
|
||||
testServer.Response(200, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
result, err := b.Exists("name")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
|
||||
c.Assert(req.Method, check.Equals, "HEAD")
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result, check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *S) TestExistsNotFound404(c *check.C) {
|
||||
testServer.Response(404, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
result, err := b.Exists("name")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
|
||||
c.Assert(req.Method, check.Equals, "HEAD")
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result, check.Equals, false)
|
||||
}
|
||||
|
||||
func (s *S) TestExistsNotFound403(c *check.C) {
|
||||
testServer.Response(403, nil, "")
|
||||
|
||||
b := s.s3.Bucket("bucket")
|
||||
result, err := b.Exists("name")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
|
||||
c.Assert(req.Method, check.Equals, "HEAD")
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result, check.Equals, false)
|
||||
}
|
||||
|
||||
func (s *S) TestGetService(c *check.C) {
|
||||
testServer.Response(200, nil, GetServiceDump)
|
||||
|
||||
expected := s3.GetServiceResp{
|
||||
Owner: s3.Owner{
|
||||
ID: "bcaf1ffd86f461ca5fb16fd081034f",
|
||||
DisplayName: "webfile",
|
||||
},
|
||||
Buckets: []s3.BucketInfo{
|
||||
s3.BucketInfo{
|
||||
Name: "quotes",
|
||||
CreationDate: "2006-02-03T16:45:09.000Z",
|
||||
},
|
||||
s3.BucketInfo{
|
||||
Name: "samples",
|
||||
CreationDate: "2006-02-03T16:41:58.000Z",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
received, err := s.s3.GetService()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(*received, check.DeepEquals, expected)
|
||||
}
|
||||
|
||||
func (s *S) TestLocation(c *check.C) {
|
||||
testServer.Response(200, nil, GetLocationUsStandard)
|
||||
expectedUsStandard := "us-east-1"
|
||||
|
||||
bucketUsStandard := s.s3.Bucket("us-east-1")
|
||||
resultUsStandard, err := bucketUsStandard.Location()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(resultUsStandard, check.Equals, expectedUsStandard)
|
||||
|
||||
testServer.Response(200, nil, GetLocationUsWest1)
|
||||
expectedUsWest1 := "us-west-1"
|
||||
|
||||
bucketUsWest1 := s.s3.Bucket("us-west-1")
|
||||
resultUsWest1, err := bucketUsWest1.Location()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(resultUsWest1, check.Equals, expectedUsWest1)
|
||||
}
|
||||
|
||||
func (s *S) TestSupportRadosGW(c *check.C) {
|
||||
testServer.Response(200, nil, "content")
|
||||
s.s3.Region.Name = "generic"
|
||||
b := s.s3.Bucket("bucket")
|
||||
_, err := b.Get("rgw")
|
||||
|
||||
req := testServer.WaitRequest()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(req.RequestURI, check.Equals, "/bucket/rgw")
|
||||
}
|
603
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3i_test.go
generated
vendored
Normal file
603
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3i_test.go
generated
vendored
Normal file
|
@ -0,0 +1,603 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"github.com/AdRoll/goamz/testutil"
|
||||
"gopkg.in/check.v1"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AmazonServer represents an Amazon S3 server.
|
||||
type AmazonServer struct {
|
||||
auth aws.Auth
|
||||
}
|
||||
|
||||
func (s *AmazonServer) SetUp(c *check.C) {
|
||||
auth, err := aws.EnvAuth()
|
||||
if err != nil {
|
||||
c.Fatal(err.Error())
|
||||
}
|
||||
s.auth = auth
|
||||
}
|
||||
|
||||
var _ = check.Suite(&AmazonClientSuite{Region: aws.USEast})
|
||||
var _ = check.Suite(&AmazonClientSuite{Region: aws.EUWest})
|
||||
var _ = check.Suite(&AmazonDomainClientSuite{Region: aws.USEast})
|
||||
|
||||
// AmazonClientSuite tests the client against a live S3 server.
|
||||
type AmazonClientSuite struct {
|
||||
aws.Region
|
||||
srv AmazonServer
|
||||
ClientTests
|
||||
}
|
||||
|
||||
func (s *AmazonClientSuite) SetUpSuite(c *check.C) {
|
||||
if !testutil.Amazon {
|
||||
c.Skip("live tests against AWS disabled (no -amazon)")
|
||||
}
|
||||
s.srv.SetUp(c)
|
||||
s.s3 = s3.New(s.srv.auth, s.Region)
|
||||
// In case tests were interrupted in the middle before.
|
||||
s.ClientTests.Cleanup()
|
||||
}
|
||||
|
||||
func (s *AmazonClientSuite) TearDownTest(c *check.C) {
|
||||
s.ClientTests.Cleanup()
|
||||
}
|
||||
|
||||
// AmazonDomainClientSuite tests the client against a live S3
|
||||
// server using bucket names in the endpoint domain name rather
|
||||
// than the request path.
|
||||
type AmazonDomainClientSuite struct {
|
||||
aws.Region
|
||||
srv AmazonServer
|
||||
ClientTests
|
||||
}
|
||||
|
||||
func (s *AmazonDomainClientSuite) SetUpSuite(c *check.C) {
|
||||
if !testutil.Amazon {
|
||||
c.Skip("live tests against AWS disabled (no -amazon)")
|
||||
}
|
||||
s.srv.SetUp(c)
|
||||
region := s.Region
|
||||
region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
|
||||
s.s3 = s3.New(s.srv.auth, region)
|
||||
s.ClientTests.Cleanup()
|
||||
}
|
||||
|
||||
func (s *AmazonDomainClientSuite) TearDownTest(c *check.C) {
|
||||
s.ClientTests.Cleanup()
|
||||
}
|
||||
|
||||
// ClientTests defines integration tests designed to test the client.
|
||||
// It is not used as a test suite in itself, but embedded within
|
||||
// another type.
|
||||
type ClientTests struct {
|
||||
s3 *s3.S3
|
||||
authIsBroken bool
|
||||
}
|
||||
|
||||
func (s *ClientTests) Cleanup() {
|
||||
killBucket(testBucket(s.s3))
|
||||
}
|
||||
|
||||
func testBucket(s *s3.S3) *s3.Bucket {
|
||||
// Watch out! If this function is corrupted and made to match with something
|
||||
// people own, killBucket will happily remove *everything* inside the bucket.
|
||||
key := s.Auth.AccessKey
|
||||
if len(key) >= 8 {
|
||||
key = s.Auth.AccessKey[:8]
|
||||
}
|
||||
return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key))
|
||||
}
|
||||
|
||||
var attempts = aws.AttemptStrategy{
|
||||
Min: 5,
|
||||
Total: 20 * time.Second,
|
||||
Delay: 100 * time.Millisecond,
|
||||
}
|
||||
|
||||
func killBucket(b *s3.Bucket) {
|
||||
var err error
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
err = b.DelBucket()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if _, ok := err.(*net.DNSError); ok {
|
||||
return
|
||||
}
|
||||
e, ok := err.(*s3.Error)
|
||||
if ok && e.Code == "NoSuchBucket" {
|
||||
return
|
||||
}
|
||||
if ok && e.Code == "BucketNotEmpty" {
|
||||
// Errors are ignored here. Just retry.
|
||||
resp, err := b.List("", "", "", 1000)
|
||||
if err == nil {
|
||||
for _, key := range resp.Contents {
|
||||
_ = b.Del(key.Key)
|
||||
}
|
||||
}
|
||||
multis, _, _ := b.ListMulti("", "")
|
||||
for _, m := range multis {
|
||||
_ = m.Abort()
|
||||
}
|
||||
}
|
||||
}
|
||||
message := "cannot delete test bucket"
|
||||
if err != nil {
|
||||
message += ": " + err.Error()
|
||||
}
|
||||
panic(message)
|
||||
}
|
||||
|
||||
func get(url string) ([]byte, error) {
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
if attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
if attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (s *ClientTests) TestBasicFunctionality(c *check.C) {
|
||||
b := testBucket(s.s3)
|
||||
err := b.PutBucket(s3.PublicRead)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer b.Del("name")
|
||||
|
||||
data, err := b.Get("name")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(data), check.Equals, "yo!")
|
||||
|
||||
data, err = get(b.URL("name"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(data), check.Equals, "yo!")
|
||||
|
||||
buf := bytes.NewBufferString("hey!")
|
||||
err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer b.Del("name2")
|
||||
|
||||
rc, err := b.GetReader("name2")
|
||||
c.Assert(err, check.IsNil)
|
||||
data, err = ioutil.ReadAll(rc)
|
||||
c.Check(err, check.IsNil)
|
||||
c.Check(string(data), check.Equals, "hey!")
|
||||
rc.Close()
|
||||
|
||||
data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour)))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(data), check.Equals, "hey!")
|
||||
|
||||
if !s.authIsBroken {
|
||||
data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour)))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(data), check.Matches, "(?s).*AccessDenied.*")
|
||||
}
|
||||
|
||||
err = b.DelBucket()
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
s3err, ok := err.(*s3.Error)
|
||||
c.Assert(ok, check.Equals, true)
|
||||
c.Assert(s3err.Code, check.Equals, "BucketNotEmpty")
|
||||
c.Assert(s3err.BucketName, check.Equals, b.Name)
|
||||
c.Assert(s3err.Message, check.Equals, "The bucket you tried to delete is not empty")
|
||||
|
||||
err = b.Del("name")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = b.Del("name2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = b.DelBucket()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *ClientTests) TestGetNotFound(c *check.C) {
|
||||
b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
|
||||
data, err := b.Get("non-existent")
|
||||
|
||||
s3err, _ := err.(*s3.Error)
|
||||
c.Assert(s3err, check.NotNil)
|
||||
c.Assert(s3err.StatusCode, check.Equals, 404)
|
||||
c.Assert(s3err.Code, check.Equals, "NoSuchBucket")
|
||||
c.Assert(s3err.Message, check.Equals, "The specified bucket does not exist")
|
||||
c.Assert(data, check.IsNil)
|
||||
}
|
||||
|
||||
// Communicate with all endpoints to see if they are alive.
|
||||
func (s *ClientTests) TestRegions(c *check.C) {
|
||||
errs := make(chan error, len(aws.Regions))
|
||||
for _, region := range aws.Regions {
|
||||
go func(r aws.Region) {
|
||||
s := s3.New(s.s3.Auth, r)
|
||||
b := s.Bucket("goamz-" + s.Auth.AccessKey)
|
||||
_, err := b.Get("non-existent")
|
||||
errs <- err
|
||||
}(region)
|
||||
}
|
||||
for _ = range aws.Regions {
|
||||
err := <-errs
|
||||
if err != nil {
|
||||
s3_err, ok := err.(*s3.Error)
|
||||
if ok {
|
||||
c.Check(s3_err.Code, check.Matches, "NoSuchBucket")
|
||||
} else if _, ok = err.(*net.DNSError); ok {
|
||||
// Okay as well.
|
||||
} else {
|
||||
c.Errorf("Non-S3 error: %s", err)
|
||||
}
|
||||
} else {
|
||||
c.Errorf("Test should have errored but it seems to have succeeded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var objectNames = []string{
|
||||
"index.html",
|
||||
"index2.html",
|
||||
"photos/2006/February/sample2.jpg",
|
||||
"photos/2006/February/sample3.jpg",
|
||||
"photos/2006/February/sample4.jpg",
|
||||
"photos/2006/January/sample.jpg",
|
||||
"test/bar",
|
||||
"test/foo",
|
||||
}
|
||||
|
||||
func keys(names ...string) []s3.Key {
|
||||
ks := make([]s3.Key, len(names))
|
||||
for i, name := range names {
|
||||
ks[i].Key = name
|
||||
}
|
||||
return ks
|
||||
}
|
||||
|
||||
// As the ListResp specifies all the parameters to the
|
||||
// request too, we use it to specify request parameters
|
||||
// and expected results. The Contents field is
|
||||
// used only for the key names inside it.
|
||||
var listTests = []s3.ListResp{
|
||||
// normal list.
|
||||
{
|
||||
Contents: keys(objectNames...),
|
||||
}, {
|
||||
Marker: objectNames[0],
|
||||
Contents: keys(objectNames[1:]...),
|
||||
}, {
|
||||
Marker: objectNames[0] + "a",
|
||||
Contents: keys(objectNames[1:]...),
|
||||
}, {
|
||||
Marker: "z",
|
||||
},
|
||||
|
||||
// limited results.
|
||||
{
|
||||
MaxKeys: 2,
|
||||
Contents: keys(objectNames[0:2]...),
|
||||
IsTruncated: true,
|
||||
}, {
|
||||
MaxKeys: 2,
|
||||
Marker: objectNames[0],
|
||||
Contents: keys(objectNames[1:3]...),
|
||||
IsTruncated: true,
|
||||
}, {
|
||||
MaxKeys: 2,
|
||||
Marker: objectNames[len(objectNames)-2],
|
||||
Contents: keys(objectNames[len(objectNames)-1:]...),
|
||||
},
|
||||
|
||||
// with delimiter
|
||||
{
|
||||
Delimiter: "/",
|
||||
CommonPrefixes: []string{"photos/", "test/"},
|
||||
Contents: keys("index.html", "index2.html"),
|
||||
}, {
|
||||
Delimiter: "/",
|
||||
Prefix: "photos/2006/",
|
||||
CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
|
||||
}, {
|
||||
Delimiter: "/",
|
||||
Prefix: "t",
|
||||
CommonPrefixes: []string{"test/"},
|
||||
}, {
|
||||
Delimiter: "/",
|
||||
MaxKeys: 1,
|
||||
Contents: keys("index.html"),
|
||||
IsTruncated: true,
|
||||
}, {
|
||||
Delimiter: "/",
|
||||
MaxKeys: 1,
|
||||
Marker: "index2.html",
|
||||
CommonPrefixes: []string{"photos/"},
|
||||
IsTruncated: true,
|
||||
}, {
|
||||
Delimiter: "/",
|
||||
MaxKeys: 1,
|
||||
Marker: "photos/",
|
||||
CommonPrefixes: []string{"test/"},
|
||||
IsTruncated: false,
|
||||
}, {
|
||||
Delimiter: "Feb",
|
||||
CommonPrefixes: []string{"photos/2006/Feb"},
|
||||
Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
|
||||
},
|
||||
}
|
||||
|
||||
func (s *ClientTests) TestDoublePutBucket(c *check.C) {
|
||||
b := testBucket(s.s3)
|
||||
err := b.PutBucket(s3.PublicRead)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = b.PutBucket(s3.PublicRead)
|
||||
if err != nil {
|
||||
c.Assert(err, check.FitsTypeOf, new(s3.Error))
|
||||
c.Assert(err.(*s3.Error).Code, check.Equals, "BucketAlreadyOwnedByYou")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientTests) TestBucketList(c *check.C) {
|
||||
b := testBucket(s.s3)
|
||||
err := b.PutBucket(s3.Private)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
objData := make(map[string][]byte)
|
||||
for i, path := range objectNames {
|
||||
data := []byte(strings.Repeat("a", i))
|
||||
err := b.Put(path, data, "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer b.Del(path)
|
||||
objData[path] = data
|
||||
}
|
||||
|
||||
for i, t := range listTests {
|
||||
c.Logf("test %d", i)
|
||||
resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(resp.Name, check.Equals, b.Name)
|
||||
c.Check(resp.Delimiter, check.Equals, t.Delimiter)
|
||||
c.Check(resp.IsTruncated, check.Equals, t.IsTruncated)
|
||||
c.Check(resp.CommonPrefixes, check.DeepEquals, t.CommonPrefixes)
|
||||
checkContents(c, resp.Contents, objData, t.Contents)
|
||||
}
|
||||
}
|
||||
|
||||
func etag(data []byte) string {
|
||||
sum := md5.New()
|
||||
sum.Write(data)
|
||||
return fmt.Sprintf(`"%x"`, sum.Sum(nil))
|
||||
}
|
||||
|
||||
func checkContents(c *check.C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
|
||||
c.Assert(contents, check.HasLen, len(expected))
|
||||
for i, k := range contents {
|
||||
c.Check(k.Key, check.Equals, expected[i].Key)
|
||||
// TODO mtime
|
||||
c.Check(k.Size, check.Equals, int64(len(data[k.Key])))
|
||||
c.Check(k.ETag, check.Equals, etag(data[k.Key]))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientTests) TestMultiInitPutList(c *check.C) {
|
||||
b := testBucket(s.s3)
|
||||
err := b.PutBucket(s3.Private)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(multi.UploadId, check.Matches, ".+")
|
||||
defer multi.Abort()
|
||||
|
||||
var sent []s3.Part
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("<part %d>", i+1)))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(p.N, check.Equals, i+1)
|
||||
c.Assert(p.Size, check.Equals, int64(8))
|
||||
c.Assert(p.ETag, check.Matches, ".+")
|
||||
sent = append(sent, p)
|
||||
}
|
||||
|
||||
s3.SetListPartsMax(2)
|
||||
|
||||
parts, err := multi.ListParts()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(parts, check.HasLen, len(sent))
|
||||
for i := range parts {
|
||||
c.Assert(parts[i].N, check.Equals, sent[i].N)
|
||||
c.Assert(parts[i].Size, check.Equals, sent[i].Size)
|
||||
c.Assert(parts[i].ETag, check.Equals, sent[i].ETag)
|
||||
}
|
||||
|
||||
err = multi.Complete(parts)
|
||||
s3err, failed := err.(*s3.Error)
|
||||
c.Assert(failed, check.Equals, true)
|
||||
c.Assert(s3err.Code, check.Equals, "EntityTooSmall")
|
||||
|
||||
err = multi.Abort()
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = multi.ListParts()
|
||||
s3err, ok := err.(*s3.Error)
|
||||
c.Assert(ok, check.Equals, true)
|
||||
c.Assert(s3err.Code, check.Equals, "NoSuchUpload")
|
||||
}
|
||||
|
||||
// This may take a minute or more due to the minimum size accepted S3
|
||||
// on multipart upload parts.
|
||||
func (s *ClientTests) TestMultiComplete(c *check.C) {
|
||||
b := testBucket(s.s3)
|
||||
err := b.PutBucket(s3.Private)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
contentType := "text/plain"
|
||||
meta := make(map[string][]string)
|
||||
meta["X-Amz-Meta-TestField"] = []string{"testValue"}
|
||||
options := s3.Options{ContentEncoding: "identity", ContentDisposition: "inline", Meta: meta}
|
||||
multi, err := b.InitMulti("multi", contentType, s3.Private, options)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(multi.UploadId, check.Matches, ".+")
|
||||
defer multi.Abort()
|
||||
|
||||
// Minimum size S3 accepts for all but the last part is 5MB.
|
||||
data1 := make([]byte, 5*1024*1024)
|
||||
data2 := []byte("<part 2>")
|
||||
|
||||
part1, err := multi.PutPart(1, bytes.NewReader(data1))
|
||||
c.Assert(err, check.IsNil)
|
||||
part2, err := multi.PutPart(2, bytes.NewReader(data2))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Purposefully reversed. The order requirement must be handled.
|
||||
err = multi.Complete([]s3.Part{part2, part1})
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
data, err := b.Get("multi")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(len(data), check.Equals, len(data1)+len(data2))
|
||||
for i := range data1 {
|
||||
if data[i] != data1[i] {
|
||||
c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
|
||||
}
|
||||
}
|
||||
c.Assert(string(data[len(data1):]), check.Equals, string(data2))
|
||||
|
||||
resp, err := b.GetResponse("multi")
|
||||
c.Assert(resp.Header.Get("Content-Type"), check.Equals, contentType)
|
||||
c.Assert(resp.Header.Get("x-amz-acl"), check.Equals, s3.Private)
|
||||
c.Assert(resp.Header.Get("Content-MD5"), check.Equals, options.ContentMD5)
|
||||
c.Assert(resp.Header.Get("Content-Encoding"), check.Equals, options.ContentEncoding)
|
||||
c.Assert(resp.Header.Get("Content-Disposition"), check.Equals, options.ContentDisposition)
|
||||
for k, values := range meta {
|
||||
c.Assert(resp.Header.Get(k), check.Equals, strings.Join(values, ","))
|
||||
}
|
||||
}
|
||||
|
||||
type multiList []*s3.Multi
|
||||
|
||||
func (l multiList) Len() int { return len(l) }
|
||||
func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
|
||||
func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
|
||||
func (s *ClientTests) TestListMulti(c *check.C) {
|
||||
b := testBucket(s.s3)
|
||||
err := b.PutBucket(s3.Private)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Ensure an empty state before testing its behavior.
|
||||
multis, _, err := b.ListMulti("", "")
|
||||
for _, m := range multis {
|
||||
err := m.Abort()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
keys := []string{
|
||||
"a/multi2",
|
||||
"a/multi3",
|
||||
"b/multi4",
|
||||
"multi1",
|
||||
}
|
||||
for _, key := range keys {
|
||||
m, err := b.InitMulti(key, "", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer m.Abort()
|
||||
}
|
||||
|
||||
// Amazon's implementation of the multiple-request listing for
|
||||
// multipart uploads in progress seems broken in multiple ways.
|
||||
// (next tokens are not provided, etc).
|
||||
//s3.SetListMultiMax(2)
|
||||
|
||||
multis, prefixes, err := b.ListMulti("", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
|
||||
multis, prefixes, err = b.ListMulti("", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
sort.Sort(multiList(multis))
|
||||
c.Assert(prefixes, check.IsNil)
|
||||
var gotKeys []string
|
||||
for _, m := range multis {
|
||||
gotKeys = append(gotKeys, m.Key)
|
||||
}
|
||||
c.Assert(gotKeys, check.DeepEquals, keys)
|
||||
for _, m := range multis {
|
||||
c.Assert(m.Bucket, check.Equals, b)
|
||||
c.Assert(m.UploadId, check.Matches, ".+")
|
||||
}
|
||||
|
||||
multis, prefixes, err = b.ListMulti("", "/")
|
||||
for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
|
||||
multis, prefixes, err = b.ListMulti("", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(prefixes, check.DeepEquals, []string{"a/", "b/"})
|
||||
c.Assert(multis, check.HasLen, 1)
|
||||
c.Assert(multis[0].Bucket, check.Equals, b)
|
||||
c.Assert(multis[0].Key, check.Equals, "multi1")
|
||||
c.Assert(multis[0].UploadId, check.Matches, ".+")
|
||||
|
||||
for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
|
||||
multis, prefixes, err = b.ListMulti("", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
multis, prefixes, err = b.ListMulti("a/", "/")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(prefixes, check.IsNil)
|
||||
c.Assert(multis, check.HasLen, 2)
|
||||
c.Assert(multis[0].Bucket, check.Equals, b)
|
||||
c.Assert(multis[0].Key, check.Equals, "a/multi2")
|
||||
c.Assert(multis[0].UploadId, check.Matches, ".+")
|
||||
c.Assert(multis[1].Bucket, check.Equals, b)
|
||||
c.Assert(multis[1].Key, check.Equals, "a/multi3")
|
||||
c.Assert(multis[1].UploadId, check.Matches, ".+")
|
||||
}
|
||||
|
||||
func (s *ClientTests) TestMultiPutAllZeroLength(c *check.C) {
|
||||
b := testBucket(s.s3)
|
||||
err := b.PutBucket(s3.Private)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer multi.Abort()
|
||||
|
||||
// This tests an edge case. Amazon requires at least one
|
||||
// part for multiprat uploads to work, even the part is empty.
|
||||
parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(parts, check.HasLen, 1)
|
||||
c.Assert(parts[0].Size, check.Equals, int64(0))
|
||||
c.Assert(parts[0].ETag, check.Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
|
||||
|
||||
err = multi.Complete(parts)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
87
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3t_test.go
generated
vendored
Normal file
87
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3t_test.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"github.com/AdRoll/goamz/s3/s3test"
|
||||
"github.com/AdRoll/goamz/testutil"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type LocalServer struct {
|
||||
auth aws.Auth
|
||||
region aws.Region
|
||||
srv *s3test.Server
|
||||
config *s3test.Config
|
||||
}
|
||||
|
||||
func (s *LocalServer) SetUp(c *check.C) {
|
||||
srv, err := s3test.NewServer(s.config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(srv, check.NotNil)
|
||||
|
||||
s.srv = srv
|
||||
s.region = aws.Region{
|
||||
Name: "faux-region-1",
|
||||
S3Endpoint: srv.URL(),
|
||||
S3LocationConstraint: true, // s3test server requires a LocationConstraint
|
||||
}
|
||||
}
|
||||
|
||||
// LocalServerSuite defines tests that will run
|
||||
// against the local s3test server. It includes
|
||||
// selected tests from ClientTests;
|
||||
// when the s3test functionality is sufficient, it should
|
||||
// include all of them, and ClientTests can be simply embedded.
|
||||
type LocalServerSuite struct {
|
||||
srv LocalServer
|
||||
clientTests ClientTests
|
||||
}
|
||||
|
||||
var (
|
||||
// run tests twice, once in us-east-1 mode, once not.
|
||||
_ = check.Suite(&LocalServerSuite{})
|
||||
_ = check.Suite(&LocalServerSuite{
|
||||
srv: LocalServer{
|
||||
config: &s3test.Config{
|
||||
Send409Conflict: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
func (s *LocalServerSuite) SetUpSuite(c *check.C) {
|
||||
s.srv.SetUp(c)
|
||||
s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
|
||||
|
||||
// TODO Sadly the fake server ignores auth completely right now. :-(
|
||||
s.clientTests.authIsBroken = true
|
||||
s.clientTests.Cleanup()
|
||||
}
|
||||
|
||||
func (s *LocalServerSuite) TearDownTest(c *check.C) {
|
||||
s.clientTests.Cleanup()
|
||||
}
|
||||
|
||||
func (s *LocalServerSuite) TestBasicFunctionality(c *check.C) {
|
||||
s.clientTests.TestBasicFunctionality(c)
|
||||
}
|
||||
|
||||
func (s *LocalServerSuite) TestGetNotFound(c *check.C) {
|
||||
s.clientTests.TestGetNotFound(c)
|
||||
}
|
||||
|
||||
func (s *LocalServerSuite) TestBucketList(c *check.C) {
|
||||
s.clientTests.TestBucketList(c)
|
||||
}
|
||||
|
||||
func (s *LocalServerSuite) TestDoublePutBucket(c *check.C) {
|
||||
s.clientTests.TestDoublePutBucket(c)
|
||||
}
|
||||
|
||||
func (s *LocalServerSuite) TestMultiComplete(c *check.C) {
|
||||
if !testutil.Amazon {
|
||||
c.Skip("live tests against AWS disabled (no -amazon)")
|
||||
}
|
||||
s.clientTests.TestMultiComplete(c)
|
||||
}
|
966
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go
generated
vendored
Normal file
966
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go
generated
vendored
Normal file
|
@ -0,0 +1,966 @@
|
|||
package s3test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const debug = false
|
||||
|
||||
var rangePattern = regexp.MustCompile(`^bytes=([\d]*)-([\d]*)$`)
|
||||
|
||||
type s3Error struct {
|
||||
statusCode int
|
||||
XMLName struct{} `xml:"Error"`
|
||||
Code string
|
||||
Message string
|
||||
BucketName string
|
||||
RequestId string
|
||||
HostId string
|
||||
}
|
||||
|
||||
type action struct {
|
||||
srv *Server
|
||||
w http.ResponseWriter
|
||||
req *http.Request
|
||||
reqId string
|
||||
}
|
||||
|
||||
// Config controls the internal behaviour of the Server. A nil config is the default
|
||||
// and behaves as if all configurations assume their default behaviour. Once passed
|
||||
// to NewServer, the configuration must not be modified.
|
||||
type Config struct {
|
||||
// Send409Conflict controls how the Server will respond to calls to PUT on a
|
||||
// previously existing bucket. The default is false, and corresponds to the
|
||||
// us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
|
||||
// all other regions.
|
||||
// http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
Send409Conflict bool
|
||||
|
||||
// Address on which to listen. By default, a random port is assigned by the
|
||||
// operating system and the server listens on localhost.
|
||||
ListenAddress string
|
||||
}
|
||||
|
||||
func (c *Config) send409Conflict() bool {
|
||||
if c != nil {
|
||||
return c.Send409Conflict
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Server is a fake S3 server for testing purposes.
|
||||
// All of the data for the server is kept in memory.
|
||||
type Server struct {
|
||||
url string
|
||||
reqId int
|
||||
listener net.Listener
|
||||
mu sync.Mutex
|
||||
buckets map[string]*bucket
|
||||
config *Config
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
name string
|
||||
acl s3.ACL
|
||||
ctime time.Time
|
||||
objects map[string]*object
|
||||
multipartUploads map[string][]*multipartUploadPart
|
||||
multipartMeta map[string]http.Header
|
||||
}
|
||||
|
||||
type object struct {
|
||||
name string
|
||||
mtime time.Time
|
||||
meta http.Header // metadata to return with requests.
|
||||
checksum []byte // also held as Content-MD5 in meta.
|
||||
data []byte
|
||||
}
|
||||
|
||||
type multipartUploadPart struct {
|
||||
index uint
|
||||
data []byte
|
||||
etag string
|
||||
lastModified time.Time
|
||||
}
|
||||
|
||||
type multipartUploadPartByIndex []*multipartUploadPart
|
||||
|
||||
func (x multipartUploadPartByIndex) Len() int {
|
||||
return len(x)
|
||||
}
|
||||
|
||||
func (x multipartUploadPartByIndex) Swap(i, j int) {
|
||||
x[i], x[j] = x[j], x[i]
|
||||
}
|
||||
|
||||
func (x multipartUploadPartByIndex) Less(i, j int) bool {
|
||||
return x[i].index < x[j].index
|
||||
}
|
||||
|
||||
// A resource encapsulates the subject of an HTTP request.
|
||||
// The resource referred to may or may not exist
|
||||
// when the request is made.
|
||||
type resource interface {
|
||||
put(a *action) interface{}
|
||||
get(a *action) interface{}
|
||||
post(a *action) interface{}
|
||||
delete(a *action) interface{}
|
||||
}
|
||||
|
||||
func NewServer(config *Config) (*Server, error) {
|
||||
listenAddress := "localhost:0"
|
||||
|
||||
if config != nil && config.ListenAddress != "" {
|
||||
listenAddress = config.ListenAddress
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", listenAddress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot listen on localhost: %v", err)
|
||||
}
|
||||
srv := &Server{
|
||||
listener: l,
|
||||
url: "http://" + l.Addr().String(),
|
||||
buckets: make(map[string]*bucket),
|
||||
config: config,
|
||||
}
|
||||
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
srv.serveHTTP(w, req)
|
||||
}))
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// Quit closes down the server.
|
||||
func (srv *Server) Quit() {
|
||||
srv.listener.Close()
|
||||
}
|
||||
|
||||
// URL returns a URL for the server.
|
||||
func (srv *Server) URL() string {
|
||||
return srv.url
|
||||
}
|
||||
|
||||
func fatalf(code int, codeStr string, errf string, a ...interface{}) {
|
||||
panic(&s3Error{
|
||||
statusCode: code,
|
||||
Code: codeStr,
|
||||
Message: fmt.Sprintf(errf, a...),
|
||||
})
|
||||
}
|
||||
|
||||
// serveHTTP serves the S3 protocol.
|
||||
func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
// ignore error from ParseForm as it's usually spurious.
|
||||
req.ParseForm()
|
||||
|
||||
srv.mu.Lock()
|
||||
defer srv.mu.Unlock()
|
||||
|
||||
if debug {
|
||||
log.Printf("s3test %q %q", req.Method, req.URL)
|
||||
}
|
||||
a := &action{
|
||||
srv: srv,
|
||||
w: w,
|
||||
req: req,
|
||||
reqId: fmt.Sprintf("%09X", srv.reqId),
|
||||
}
|
||||
srv.reqId++
|
||||
|
||||
var r resource
|
||||
defer func() {
|
||||
switch err := recover().(type) {
|
||||
case *s3Error:
|
||||
switch r := r.(type) {
|
||||
case objectResource:
|
||||
err.BucketName = r.bucket.name
|
||||
case bucketResource:
|
||||
err.BucketName = r.name
|
||||
}
|
||||
err.RequestId = a.reqId
|
||||
// TODO HostId
|
||||
w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
|
||||
w.WriteHeader(err.statusCode)
|
||||
xmlMarshal(w, err)
|
||||
case nil:
|
||||
default:
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
r = srv.resourceForURL(req.URL)
|
||||
|
||||
var resp interface{}
|
||||
switch req.Method {
|
||||
case "PUT":
|
||||
resp = r.put(a)
|
||||
case "GET", "HEAD":
|
||||
resp = r.get(a)
|
||||
case "DELETE":
|
||||
resp = r.delete(a)
|
||||
case "POST":
|
||||
resp = r.post(a)
|
||||
default:
|
||||
fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
|
||||
}
|
||||
if resp != nil && req.Method != "HEAD" {
|
||||
xmlMarshal(w, resp)
|
||||
}
|
||||
}
|
||||
|
||||
// xmlMarshal is the same as xml.Marshal except that
|
||||
// it panics on error. The marshalling should not fail,
|
||||
// but we want to know if it does.
|
||||
func xmlMarshal(w io.Writer, x interface{}) {
|
||||
if err := xml.NewEncoder(w).Encode(x); err != nil {
|
||||
panic(fmt.Errorf("error marshalling %#v: %v", x, err))
|
||||
}
|
||||
}
|
||||
|
||||
// In a fully implemented test server, each of these would have
|
||||
// its own resource type.
|
||||
var unimplementedBucketResourceNames = map[string]bool{
|
||||
"acl": true,
|
||||
"lifecycle": true,
|
||||
"policy": true,
|
||||
"location": true,
|
||||
"logging": true,
|
||||
"notification": true,
|
||||
"versions": true,
|
||||
"requestPayment": true,
|
||||
"versioning": true,
|
||||
"website": true,
|
||||
"uploads": true,
|
||||
}
|
||||
|
||||
var unimplementedObjectResourceNames = map[string]bool{
|
||||
"acl": true,
|
||||
"torrent": true,
|
||||
}
|
||||
|
||||
var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
|
||||
|
||||
// resourceForURL returns a resource object for the given URL.
|
||||
func (srv *Server) resourceForURL(u *url.URL) (r resource) {
|
||||
m := pathRegexp.FindStringSubmatch(u.Path)
|
||||
if m == nil {
|
||||
fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
|
||||
}
|
||||
bucketName := m[2]
|
||||
objectName := m[4]
|
||||
if bucketName == "" {
|
||||
return nullResource{} // root
|
||||
}
|
||||
b := bucketResource{
|
||||
name: bucketName,
|
||||
bucket: srv.buckets[bucketName],
|
||||
}
|
||||
q := u.Query()
|
||||
if objectName == "" {
|
||||
for name := range q {
|
||||
if unimplementedBucketResourceNames[name] {
|
||||
return nullResource{}
|
||||
}
|
||||
}
|
||||
return b
|
||||
|
||||
}
|
||||
if b.bucket == nil {
|
||||
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
|
||||
}
|
||||
objr := objectResource{
|
||||
name: objectName,
|
||||
version: q.Get("versionId"),
|
||||
bucket: b.bucket,
|
||||
}
|
||||
for name := range q {
|
||||
if unimplementedObjectResourceNames[name] {
|
||||
return nullResource{}
|
||||
}
|
||||
}
|
||||
if obj := objr.bucket.objects[objr.name]; obj != nil {
|
||||
objr.object = obj
|
||||
}
|
||||
return objr
|
||||
}
|
||||
|
||||
// nullResource has error stubs for all resource methods.
|
||||
type nullResource struct{}
|
||||
|
||||
func notAllowed() interface{} {
|
||||
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nullResource) put(a *action) interface{} { return notAllowed() }
|
||||
func (nullResource) get(a *action) interface{} { return notAllowed() }
|
||||
func (nullResource) post(a *action) interface{} { return notAllowed() }
|
||||
func (nullResource) delete(a *action) interface{} { return notAllowed() }
|
||||
|
||||
const timeFormat = "2006-01-02T15:04:05.000Z"
|
||||
const lastModifiedTimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
type bucketResource struct {
|
||||
name string
|
||||
bucket *bucket // non-nil if the bucket already exists.
|
||||
}
|
||||
|
||||
// GET on a bucket lists the objects in the bucket.
|
||||
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
|
||||
func (r bucketResource) get(a *action) interface{} {
|
||||
if r.bucket == nil {
|
||||
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
|
||||
}
|
||||
delimiter := a.req.Form.Get("delimiter")
|
||||
marker := a.req.Form.Get("marker")
|
||||
maxKeys := -1
|
||||
if s := a.req.Form.Get("max-keys"); s != "" {
|
||||
i, err := strconv.Atoi(s)
|
||||
if err != nil || i < 0 {
|
||||
fatalf(400, "invalid value for max-keys: %q", s)
|
||||
}
|
||||
maxKeys = i
|
||||
}
|
||||
prefix := a.req.Form.Get("prefix")
|
||||
a.w.Header().Set("Content-Type", "application/xml")
|
||||
|
||||
if a.req.Method == "HEAD" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var objs orderedObjects
|
||||
|
||||
// first get all matching objects and arrange them in alphabetical order.
|
||||
for name, obj := range r.bucket.objects {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
objs = append(objs, obj)
|
||||
}
|
||||
}
|
||||
sort.Sort(objs)
|
||||
|
||||
if maxKeys <= 0 {
|
||||
maxKeys = 1000
|
||||
}
|
||||
resp := &s3.ListResp{
|
||||
Name: r.bucket.name,
|
||||
Prefix: prefix,
|
||||
Delimiter: delimiter,
|
||||
Marker: marker,
|
||||
MaxKeys: maxKeys,
|
||||
}
|
||||
|
||||
var prefixes []string
|
||||
for _, obj := range objs {
|
||||
if !strings.HasPrefix(obj.name, prefix) {
|
||||
continue
|
||||
}
|
||||
name := obj.name
|
||||
isPrefix := false
|
||||
if delimiter != "" {
|
||||
if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
|
||||
name = obj.name[:len(prefix)+i+len(delimiter)]
|
||||
if prefixes != nil && prefixes[len(prefixes)-1] == name {
|
||||
continue
|
||||
}
|
||||
isPrefix = true
|
||||
}
|
||||
}
|
||||
if name <= marker {
|
||||
continue
|
||||
}
|
||||
if len(resp.Contents)+len(prefixes) >= maxKeys {
|
||||
resp.IsTruncated = true
|
||||
break
|
||||
}
|
||||
if isPrefix {
|
||||
prefixes = append(prefixes, name)
|
||||
} else {
|
||||
// Contents contains only keys not found in CommonPrefixes
|
||||
resp.Contents = append(resp.Contents, obj.s3Key())
|
||||
}
|
||||
}
|
||||
resp.CommonPrefixes = prefixes
|
||||
return resp
|
||||
}
|
||||
|
||||
// orderedObjects holds a slice of objects that can be sorted
|
||||
// by name.
|
||||
type orderedObjects []*object
|
||||
|
||||
func (s orderedObjects) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s orderedObjects) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s orderedObjects) Less(i, j int) bool {
|
||||
return s[i].name < s[j].name
|
||||
}
|
||||
|
||||
func (obj *object) s3Key() s3.Key {
|
||||
return s3.Key{
|
||||
Key: obj.name,
|
||||
LastModified: obj.mtime.UTC().Format(timeFormat),
|
||||
Size: int64(len(obj.data)),
|
||||
ETag: fmt.Sprintf(`"%x"`, obj.checksum),
|
||||
// TODO StorageClass
|
||||
// TODO Owner
|
||||
}
|
||||
}
|
||||
|
||||
// DELETE on a bucket deletes the bucket if it's not empty.
|
||||
func (r bucketResource) delete(a *action) interface{} {
|
||||
b := r.bucket
|
||||
if b == nil {
|
||||
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
|
||||
}
|
||||
if len(b.objects) > 0 {
|
||||
fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
|
||||
}
|
||||
delete(a.srv.buckets, b.name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PUT on a bucket creates the bucket.
|
||||
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
|
||||
func (r bucketResource) put(a *action) interface{} {
|
||||
var created bool
|
||||
if r.bucket == nil {
|
||||
if !validBucketName(r.name) {
|
||||
fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
|
||||
}
|
||||
if loc := locationConstraint(a); loc == "" {
|
||||
fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
|
||||
}
|
||||
// TODO validate acl
|
||||
r.bucket = &bucket{
|
||||
name: r.name,
|
||||
// TODO default acl
|
||||
objects: make(map[string]*object),
|
||||
multipartUploads: make(map[string][]*multipartUploadPart),
|
||||
multipartMeta: make(map[string]http.Header),
|
||||
}
|
||||
a.srv.buckets[r.name] = r.bucket
|
||||
created = true
|
||||
}
|
||||
if !created && a.srv.config.send409Conflict() {
|
||||
fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
|
||||
}
|
||||
r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r bucketResource) post(a *action) interface{} {
|
||||
if _, multiDel := a.req.URL.Query()["delete"]; multiDel {
|
||||
return r.multiDel(a)
|
||||
}
|
||||
|
||||
fatalf(400, "Method", "bucket operation not supported")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b bucketResource) multiDel(a *action) interface{} {
|
||||
type multiDelRequestObject struct {
|
||||
Key string
|
||||
VersionId string
|
||||
}
|
||||
|
||||
type multiDelRequest struct {
|
||||
Quiet bool
|
||||
Object []*multiDelRequestObject
|
||||
}
|
||||
|
||||
type multiDelDelete struct {
|
||||
XMLName struct{} `xml:"Deleted"`
|
||||
Key string
|
||||
}
|
||||
|
||||
type multiDelError struct {
|
||||
XMLName struct{} `xml:"Error"`
|
||||
Key string
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
type multiDelResult struct {
|
||||
XMLName struct{} `xml:"DeleteResult"`
|
||||
Deleted []*multiDelDelete
|
||||
Error []*multiDelError
|
||||
}
|
||||
|
||||
req := &multiDelRequest{}
|
||||
|
||||
if err := xml.NewDecoder(a.req.Body).Decode(req); err != nil {
|
||||
fatalf(400, "InvalidRequest", err.Error())
|
||||
}
|
||||
|
||||
res := &multiDelResult{
|
||||
Deleted: []*multiDelDelete{},
|
||||
Error: []*multiDelError{},
|
||||
}
|
||||
|
||||
for _, o := range req.Object {
|
||||
if _, exists := b.bucket.objects[o.Key]; exists {
|
||||
delete(b.bucket.objects, o.Key)
|
||||
|
||||
res.Deleted = append(res.Deleted, &multiDelDelete{
|
||||
Key: o.Key,
|
||||
})
|
||||
} else {
|
||||
res.Error = append(res.Error, &multiDelError{
|
||||
Key: o.Key,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// validBucketName returns whether name is a valid bucket name.
|
||||
// Here are the rules, from:
|
||||
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
|
||||
//
|
||||
// Can contain lowercase letters, numbers, periods (.), underscores (_),
|
||||
// and dashes (-). You can use uppercase letters for buckets only in the
|
||||
// US Standard region.
|
||||
//
|
||||
// Must start with a number or letter
|
||||
//
|
||||
// Must be between 3 and 255 characters long
|
||||
//
|
||||
// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
|
||||
// but the real S3 server does not seem to check that rule, so we will not
|
||||
// check it either.
|
||||
//
|
||||
func validBucketName(name string) bool {
|
||||
if len(name) < 3 || len(name) > 255 {
|
||||
return false
|
||||
}
|
||||
r := name[0]
|
||||
if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
|
||||
return false
|
||||
}
|
||||
for _, r := range name {
|
||||
switch {
|
||||
case r >= '0' && r <= '9':
|
||||
case r >= 'a' && r <= 'z':
|
||||
case r == '_' || r == '-':
|
||||
case r == '.':
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var responseParams = map[string]bool{
|
||||
"content-type": true,
|
||||
"content-language": true,
|
||||
"expires": true,
|
||||
"cache-control": true,
|
||||
"content-disposition": true,
|
||||
"content-encoding": true,
|
||||
}
|
||||
|
||||
type objectResource struct {
|
||||
name string
|
||||
version string
|
||||
bucket *bucket // always non-nil.
|
||||
object *object // may be nil.
|
||||
}
|
||||
|
||||
// GET on an object gets the contents of the object.
|
||||
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
|
||||
func (objr objectResource) get(a *action) interface{} {
|
||||
obj := objr.object
|
||||
if obj == nil {
|
||||
fatalf(404, "NoSuchKey", "The specified key does not exist.")
|
||||
}
|
||||
h := a.w.Header()
|
||||
// add metadata
|
||||
for name, d := range obj.meta {
|
||||
h[name] = d
|
||||
}
|
||||
// override header values in response to request parameters.
|
||||
for name, vals := range a.req.Form {
|
||||
if strings.HasPrefix(name, "response-") {
|
||||
name = name[len("response-"):]
|
||||
if !responseParams[name] {
|
||||
continue
|
||||
}
|
||||
h.Set(name, vals[0])
|
||||
}
|
||||
}
|
||||
|
||||
data := obj.data
|
||||
status := http.StatusOK
|
||||
if r := a.req.Header.Get("Range"); r != "" {
|
||||
// s3 ignores invalid ranges
|
||||
if matches := rangePattern.FindStringSubmatch(r); len(matches) == 3 {
|
||||
var err error
|
||||
start := 0
|
||||
end := len(obj.data) - 1
|
||||
if matches[1] != "" {
|
||||
start, err = strconv.Atoi(matches[1])
|
||||
}
|
||||
if err == nil && matches[2] != "" {
|
||||
end, err = strconv.Atoi(matches[2])
|
||||
}
|
||||
if err == nil && start >= 0 && end >= start {
|
||||
if start >= len(obj.data) {
|
||||
fatalf(416, "InvalidRequest", "The requested range is not satisfiable")
|
||||
}
|
||||
if end > len(obj.data)-1 {
|
||||
end = len(obj.data) - 1
|
||||
}
|
||||
data = obj.data[start : end+1]
|
||||
status = http.StatusPartialContent
|
||||
h.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, len(obj.data)))
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO Last-Modified-Since
|
||||
// TODO If-Modified-Since
|
||||
// TODO If-Unmodified-Since
|
||||
// TODO If-Match
|
||||
// TODO If-None-Match
|
||||
// TODO Connection: close ??
|
||||
// TODO x-amz-request-id
|
||||
h.Set("Content-Length", fmt.Sprint(len(data)))
|
||||
h.Set("ETag", "\""+hex.EncodeToString(obj.checksum)+"\"")
|
||||
h.Set("Last-Modified", obj.mtime.Format(lastModifiedTimeFormat))
|
||||
|
||||
if status != http.StatusOK {
|
||||
a.w.WriteHeader(status)
|
||||
}
|
||||
|
||||
if a.req.Method == "HEAD" {
|
||||
return nil
|
||||
}
|
||||
// TODO avoid holding the lock when writing data.
|
||||
_, err := a.w.Write(data)
|
||||
if err != nil {
|
||||
// we can't do much except just log the fact.
|
||||
log.Printf("error writing data: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var metaHeaders = map[string]bool{
|
||||
"Content-MD5": true,
|
||||
"x-amz-acl": true,
|
||||
"Content-Type": true,
|
||||
"Content-Encoding": true,
|
||||
"Content-Disposition": true,
|
||||
}
|
||||
|
||||
// PUT on an object creates the object.
|
||||
func (objr objectResource) put(a *action) interface{} {
|
||||
// TODO Cache-Control header
|
||||
// TODO Expires header
|
||||
// TODO x-amz-server-side-encryption
|
||||
// TODO x-amz-storage-class
|
||||
|
||||
uploadId := a.req.URL.Query().Get("uploadId")
|
||||
var partNumber uint
|
||||
|
||||
// Check that the upload ID is valid if this is a multipart upload
|
||||
if uploadId != "" {
|
||||
if _, ok := objr.bucket.multipartUploads[uploadId]; !ok {
|
||||
fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")
|
||||
}
|
||||
|
||||
partNumberStr := a.req.URL.Query().Get("partNumber")
|
||||
|
||||
if partNumberStr == "" {
|
||||
fatalf(400, "InvalidRequest", "Missing partNumber parameter")
|
||||
}
|
||||
|
||||
number, err := strconv.ParseUint(partNumberStr, 10, 32)
|
||||
|
||||
if err != nil {
|
||||
fatalf(400, "InvalidRequest", "partNumber is not a number")
|
||||
}
|
||||
|
||||
partNumber = uint(number)
|
||||
}
|
||||
|
||||
var expectHash []byte
|
||||
if c := a.req.Header.Get("Content-MD5"); c != "" {
|
||||
var err error
|
||||
expectHash, err = base64.StdEncoding.DecodeString(c)
|
||||
if err != nil || len(expectHash) != md5.Size {
|
||||
fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
|
||||
}
|
||||
}
|
||||
sum := md5.New()
|
||||
// TODO avoid holding lock while reading data.
|
||||
data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
|
||||
if err != nil {
|
||||
fatalf(400, "TODO", "read error")
|
||||
}
|
||||
gotHash := sum.Sum(nil)
|
||||
if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
|
||||
fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
|
||||
}
|
||||
if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
|
||||
fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
|
||||
}
|
||||
|
||||
etag := fmt.Sprintf("\"%x\"", gotHash)
|
||||
|
||||
a.w.Header().Add("ETag", etag)
|
||||
|
||||
if uploadId == "" {
|
||||
// For traditional uploads
|
||||
|
||||
// TODO is this correct, or should we erase all previous metadata?
|
||||
obj := objr.object
|
||||
if obj == nil {
|
||||
obj = &object{
|
||||
name: objr.name,
|
||||
meta: make(http.Header),
|
||||
}
|
||||
}
|
||||
|
||||
// PUT request has been successful - save data and metadata
|
||||
for key, values := range a.req.Header {
|
||||
key = http.CanonicalHeaderKey(key)
|
||||
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
|
||||
obj.meta[key] = values
|
||||
}
|
||||
}
|
||||
|
||||
if copySource := a.req.Header.Get("X-Amz-Copy-Source"); copySource != "" {
|
||||
idx := strings.IndexByte(copySource, '/')
|
||||
|
||||
if idx == -1 {
|
||||
fatalf(400, "InvalidRequest", "Wrongly formatted X-Amz-Copy-Source")
|
||||
}
|
||||
|
||||
sourceBucketName := copySource[0:idx]
|
||||
sourceKey := copySource[1+idx:]
|
||||
|
||||
sourceBucket := a.srv.buckets[sourceBucketName]
|
||||
|
||||
if sourceBucket == nil {
|
||||
fatalf(404, "NoSuchBucket", "The specified source bucket does not exist")
|
||||
}
|
||||
|
||||
sourceObject := sourceBucket.objects[sourceKey]
|
||||
|
||||
if sourceObject == nil {
|
||||
fatalf(404, "NoSuchKey", "The specified source key does not exist")
|
||||
}
|
||||
|
||||
obj.data = make([]byte, len(sourceObject.data))
|
||||
copy(obj.data, sourceObject.data)
|
||||
|
||||
obj.checksum = make([]byte, len(sourceObject.checksum))
|
||||
copy(obj.checksum, sourceObject.checksum)
|
||||
|
||||
obj.meta = make(http.Header, len(sourceObject.meta))
|
||||
|
||||
for k, v := range sourceObject.meta {
|
||||
obj.meta[k] = make([]string, len(v))
|
||||
copy(obj.meta[k], v)
|
||||
}
|
||||
} else {
|
||||
obj.data = data
|
||||
obj.checksum = gotHash
|
||||
}
|
||||
obj.mtime = time.Now()
|
||||
objr.bucket.objects[objr.name] = obj
|
||||
} else {
|
||||
// For multipart commit
|
||||
|
||||
parts := objr.bucket.multipartUploads[uploadId]
|
||||
part := &multipartUploadPart{
|
||||
index: partNumber,
|
||||
data: data,
|
||||
etag: etag,
|
||||
lastModified: time.Now(),
|
||||
}
|
||||
|
||||
objr.bucket.multipartUploads[uploadId] = append(parts, part)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (objr objectResource) delete(a *action) interface{} {
|
||||
uploadId := a.req.URL.Query().Get("uploadId")
|
||||
|
||||
if uploadId == "" {
|
||||
// Traditional object delete
|
||||
delete(objr.bucket.objects, objr.name)
|
||||
} else {
|
||||
// Multipart commit abort
|
||||
_, ok := objr.bucket.multipartUploads[uploadId]
|
||||
|
||||
if !ok {
|
||||
fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")
|
||||
}
|
||||
|
||||
delete(objr.bucket.multipartUploads, uploadId)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (objr objectResource) post(a *action) interface{} {
|
||||
// Check if we're initializing a multipart upload
|
||||
if _, ok := a.req.URL.Query()["uploads"]; ok {
|
||||
type multipartInitResponse struct {
|
||||
XMLName struct{} `xml:"InitiateMultipartUploadResult"`
|
||||
Bucket string
|
||||
Key string
|
||||
UploadId string
|
||||
}
|
||||
|
||||
uploadId := strconv.FormatInt(rand.Int63(), 16)
|
||||
|
||||
objr.bucket.multipartUploads[uploadId] = []*multipartUploadPart{}
|
||||
objr.bucket.multipartMeta[uploadId] = make(http.Header)
|
||||
for key, values := range a.req.Header {
|
||||
key = http.CanonicalHeaderKey(key)
|
||||
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
|
||||
objr.bucket.multipartMeta[uploadId][key] = values
|
||||
}
|
||||
}
|
||||
|
||||
return &multipartInitResponse{
|
||||
Bucket: objr.bucket.name,
|
||||
Key: objr.name,
|
||||
UploadId: uploadId,
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we're completing a multipart upload
|
||||
if uploadId := a.req.URL.Query().Get("uploadId"); uploadId != "" {
|
||||
type multipartCompleteRequestPart struct {
|
||||
XMLName struct{} `xml:"Part"`
|
||||
PartNumber uint
|
||||
ETag string
|
||||
}
|
||||
|
||||
type multipartCompleteRequest struct {
|
||||
XMLName struct{} `xml:"CompleteMultipartUpload"`
|
||||
Part []multipartCompleteRequestPart
|
||||
}
|
||||
|
||||
type multipartCompleteResponse struct {
|
||||
XMLName struct{} `xml:"CompleteMultipartUploadResult"`
|
||||
Location string
|
||||
Bucket string
|
||||
Key string
|
||||
ETag string
|
||||
}
|
||||
|
||||
parts, ok := objr.bucket.multipartUploads[uploadId]
|
||||
|
||||
if !ok {
|
||||
fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")
|
||||
}
|
||||
|
||||
req := &multipartCompleteRequest{}
|
||||
|
||||
if err := xml.NewDecoder(a.req.Body).Decode(req); err != nil {
|
||||
fatalf(400, "InvalidRequest", err.Error())
|
||||
}
|
||||
|
||||
if len(req.Part) != len(parts) {
|
||||
fatalf(400, "InvalidRequest", fmt.Sprintf("Number of parts does not match: expected %d, received %d", len(parts), len(req.Part)))
|
||||
}
|
||||
|
||||
sum := md5.New()
|
||||
data := &bytes.Buffer{}
|
||||
w := io.MultiWriter(sum, data)
|
||||
|
||||
sort.Sort(multipartUploadPartByIndex(parts))
|
||||
|
||||
for i, p := range parts {
|
||||
reqPart := req.Part[i]
|
||||
|
||||
if reqPart.PartNumber != p.index {
|
||||
fatalf(400, "InvalidRequest", "Bad part number")
|
||||
}
|
||||
|
||||
if reqPart.ETag != p.etag {
|
||||
fatalf(400, "InvalidRequest", fmt.Sprintf("Invalid etag for part %d", reqPart.PartNumber))
|
||||
}
|
||||
|
||||
w.Write(p.data)
|
||||
}
|
||||
|
||||
delete(objr.bucket.multipartUploads, uploadId)
|
||||
|
||||
obj := objr.object
|
||||
|
||||
if obj == nil {
|
||||
obj = &object{
|
||||
name: objr.name,
|
||||
meta: make(http.Header),
|
||||
}
|
||||
}
|
||||
|
||||
obj.data = data.Bytes()
|
||||
obj.checksum = sum.Sum(nil)
|
||||
obj.mtime = time.Now()
|
||||
objr.bucket.objects[objr.name] = obj
|
||||
obj.meta = objr.bucket.multipartMeta[uploadId]
|
||||
|
||||
objectLocation := fmt.Sprintf("http://%s/%s/%s", a.srv.listener.Addr().String(), objr.bucket.name, objr.name)
|
||||
|
||||
return &multipartCompleteResponse{
|
||||
Location: objectLocation,
|
||||
Bucket: objr.bucket.name,
|
||||
Key: objr.name,
|
||||
ETag: uploadId,
|
||||
}
|
||||
}
|
||||
|
||||
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateBucketConfiguration struct {
|
||||
LocationConstraint string
|
||||
}
|
||||
|
||||
// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
|
||||
// If there is no body, an empty string will be returned.
|
||||
func locationConstraint(a *action) string {
|
||||
var body bytes.Buffer
|
||||
if _, err := io.Copy(&body, a.req.Body); err != nil {
|
||||
fatalf(400, "InvalidRequest", err.Error())
|
||||
}
|
||||
if body.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
var loc CreateBucketConfiguration
|
||||
if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
|
||||
fatalf(400, "InvalidRequest", err.Error())
|
||||
}
|
||||
return loc.LocationConstraint
|
||||
}
|
120
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign.go
generated
vendored
Normal file
120
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var b64 = base64.StdEncoding
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// S3 signing (http://goo.gl/G1LrK)
|
||||
|
||||
var s3ParamsToSign = map[string]bool{
|
||||
"acl": true,
|
||||
"location": true,
|
||||
"logging": true,
|
||||
"notification": true,
|
||||
"partNumber": true,
|
||||
"policy": true,
|
||||
"requestPayment": true,
|
||||
"torrent": true,
|
||||
"uploadId": true,
|
||||
"uploads": true,
|
||||
"versionId": true,
|
||||
"versioning": true,
|
||||
"versions": true,
|
||||
"response-content-type": true,
|
||||
"response-content-language": true,
|
||||
"response-expires": true,
|
||||
"response-cache-control": true,
|
||||
"response-content-disposition": true,
|
||||
"response-content-encoding": true,
|
||||
"website": true,
|
||||
"delete": true,
|
||||
}
|
||||
|
||||
func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
|
||||
var md5, ctype, date, xamz string
|
||||
var xamzDate bool
|
||||
var keys, sarray []string
|
||||
xheaders := make(map[string]string)
|
||||
for k, v := range headers {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "content-md5":
|
||||
md5 = v[0]
|
||||
case "content-type":
|
||||
ctype = v[0]
|
||||
case "date":
|
||||
if !xamzDate {
|
||||
date = v[0]
|
||||
}
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
keys = append(keys, k)
|
||||
xheaders[k] = strings.Join(v, ",")
|
||||
if k == "x-amz-date" {
|
||||
xamzDate = true
|
||||
date = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
sort.StringSlice(keys).Sort()
|
||||
for i := range keys {
|
||||
key := keys[i]
|
||||
value := xheaders[key]
|
||||
sarray = append(sarray, key+":"+value)
|
||||
}
|
||||
xamz = strings.Join(sarray, "\n") + "\n"
|
||||
}
|
||||
|
||||
expires := false
|
||||
if v, ok := params["Expires"]; ok {
|
||||
// Query string request authentication alternative.
|
||||
expires = true
|
||||
date = v[0]
|
||||
params["AWSAccessKeyId"] = []string{auth.AccessKey}
|
||||
}
|
||||
|
||||
sarray = sarray[0:0]
|
||||
for k, v := range params {
|
||||
if s3ParamsToSign[k] {
|
||||
for _, vi := range v {
|
||||
if vi == "" {
|
||||
sarray = append(sarray, k)
|
||||
} else {
|
||||
// "When signing you do not encode these values."
|
||||
sarray = append(sarray, k+"="+vi)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(sarray) > 0 {
|
||||
sort.StringSlice(sarray).Sort()
|
||||
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
|
||||
}
|
||||
|
||||
payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
|
||||
hash := hmac.New(sha1.New, []byte(auth.SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, b64.EncodedLen(hash.Size()))
|
||||
b64.Encode(signature, hash.Sum(nil))
|
||||
|
||||
if expires {
|
||||
params["Signature"] = []string{string(signature)}
|
||||
} else {
|
||||
headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
|
||||
}
|
||||
if debug {
|
||||
log.Printf("Signature payload: %q", payload)
|
||||
log.Printf("Signature: %q", signature)
|
||||
}
|
||||
}
|
148
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign_test.go
generated
vendored
Normal file
148
Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign_test.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"github.com/AdRoll/goamz/aws"
|
||||
"github.com/AdRoll/goamz/s3"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// S3 ReST authentication docs: http://goo.gl/G1LrK
|
||||
|
||||
var testAuth = aws.Auth{AccessKey: "0PN5J17HBGZHT7JJ3X82", SecretKey: "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o"}
|
||||
|
||||
func (s *S) TestSignExampleObjectGet(c *check.C) {
|
||||
method := "GET"
|
||||
path := "/johnsmith/photos/puppy.jpg"
|
||||
headers := map[string][]string{
|
||||
"Host": {"johnsmith.s3.amazonaws.com"},
|
||||
"Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, nil, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleObjectPut(c *check.C) {
|
||||
method := "PUT"
|
||||
path := "/johnsmith/photos/puppy.jpg"
|
||||
headers := map[string][]string{
|
||||
"Host": {"johnsmith.s3.amazonaws.com"},
|
||||
"Date": {"Tue, 27 Mar 2007 21:15:45 +0000"},
|
||||
"Content-Type": {"image/jpeg"},
|
||||
"Content-Length": {"94328"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, nil, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleList(c *check.C) {
|
||||
method := "GET"
|
||||
path := "/johnsmith/"
|
||||
params := map[string][]string{
|
||||
"prefix": {"photos"},
|
||||
"max-keys": {"50"},
|
||||
"marker": {"puppy"},
|
||||
}
|
||||
headers := map[string][]string{
|
||||
"Host": {"johnsmith.s3.amazonaws.com"},
|
||||
"Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
|
||||
"User-Agent": {"Mozilla/5.0"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, params, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleFetch(c *check.C) {
|
||||
method := "GET"
|
||||
path := "/johnsmith/"
|
||||
params := map[string][]string{
|
||||
"acl": {""},
|
||||
}
|
||||
headers := map[string][]string{
|
||||
"Host": {"johnsmith.s3.amazonaws.com"},
|
||||
"Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, params, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleDelete(c *check.C) {
|
||||
method := "DELETE"
|
||||
path := "/johnsmith/photos/puppy.jpg"
|
||||
params := map[string][]string{}
|
||||
headers := map[string][]string{
|
||||
"Host": {"s3.amazonaws.com"},
|
||||
"Date": {"Tue, 27 Mar 2007 21:20:27 +0000"},
|
||||
"User-Agent": {"dotnet"},
|
||||
"x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, params, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleUpload(c *check.C) {
|
||||
method := "PUT"
|
||||
path := "/static.johnsmith.net/db-backup.dat.gz"
|
||||
params := map[string][]string{}
|
||||
headers := map[string][]string{
|
||||
"Host": {"static.johnsmith.net:8080"},
|
||||
"Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
|
||||
"User-Agent": {"curl/7.15.5"},
|
||||
"x-amz-acl": {"public-read"},
|
||||
"content-type": {"application/x-download"},
|
||||
"Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="},
|
||||
"X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"},
|
||||
"X-Amz-Meta-FileChecksum": {"0x02661779"},
|
||||
"X-Amz-Meta-ChecksumAlgorithm": {"crc32"},
|
||||
"Content-Disposition": {"attachment; filename=database.dat"},
|
||||
"Content-Encoding": {"gzip"},
|
||||
"Content-Length": {"5913339"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, params, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleListAllMyBuckets(c *check.C) {
|
||||
method := "GET"
|
||||
path := "/"
|
||||
headers := map[string][]string{
|
||||
"Host": {"s3.amazonaws.com"},
|
||||
"Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, nil, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleUnicodeKeys(c *check.C) {
|
||||
method := "GET"
|
||||
path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re"
|
||||
headers := map[string][]string{
|
||||
"Host": {"s3.amazonaws.com"},
|
||||
"Date": {"Wed, 28 Mar 2007 01:49:49 +0000"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, nil, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
||||
|
||||
func (s *S) TestSignExampleCustomSSE(c *check.C) {
|
||||
method := "GET"
|
||||
path := "/secret/config"
|
||||
params := map[string][]string{}
|
||||
headers := map[string][]string{
|
||||
"Host": {"secret.johnsmith.net:8080"},
|
||||
"Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
|
||||
"x-amz-server-side-encryption-customer-key": {"MWJhakVna1dQT1B0SDFMeGtVVnRQRTFGaU1ldFJrU0I="},
|
||||
"x-amz-server-side-encryption-customer-key-MD5": {"glIqxpqQ4a9aoK/iLttKzQ=="},
|
||||
"x-amz-server-side-encryption-customer-algorithm": {"AES256"},
|
||||
}
|
||||
s3.Sign(testAuth, method, path, params, headers)
|
||||
expected := "AWS 0PN5J17HBGZHT7JJ3X82:Xq6PWmIo0aOWq+LDjCEiCGgbmHE="
|
||||
c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
|
||||
}
|
29
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore
generated
vendored
Normal file
29
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# Editor swap files
|
||||
*.swp
|
||||
*~
|
||||
.DS_Store
|
19
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
before_script:
|
||||
- go get -u golang.org/x/tools/cmd/vet
|
||||
- go get -u github.com/golang/lint/golint
|
||||
|
||||
go: tip
|
||||
script:
|
||||
- test -z "$(gofmt -s -l -w management | tee /dev/stderr)"
|
||||
- test -z "$(gofmt -s -l -w storage | tee /dev/stderr)"
|
||||
- go build -v ./...
|
||||
- go test -v ./storage/... -check.v
|
||||
- test -z "$(golint ./storage/... | tee /dev/stderr)"
|
||||
- go vet ./storage/...
|
||||
- go test -v ./management/...
|
||||
- test -z "$(golint ./management/... | grep -v 'should have comment' | grep -v 'stutters' | tee /dev/stderr)"
|
||||
- go vet ./management/...
|
88
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md
generated
vendored
Normal file
88
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
# Microsoft Azure SDK for Go
|
||||
|
||||
This project provides various Go packages to perform operations
|
||||
on Microsoft Azure REST APIs.
|
||||
|
||||
[](https://godoc.org/github.com/Azure/azure-sdk-for-go) [](https://travis-ci.org/Azure/azure-sdk-for-go)
|
||||
|
||||
See list of implemented API clients [here](http://godoc.org/github.com/Azure/azure-sdk-for-go).
|
||||
|
||||
> **NOTE:** This repository is under heavy ongoing development and
|
||||
is likely to break over time. We currently do not have any releases
|
||||
yet. If you are planning to use the repository, please consider vendoring
|
||||
the packages in your project and update them when a stable tag is out.
|
||||
|
||||
# Installation
|
||||
|
||||
go get -d github.com/Azure/azure-sdk-for-go/management
|
||||
|
||||
# Usage
|
||||
|
||||
Read Godoc of the repository at: http://godoc.org/github.com/Azure/azure-sdk-for-go/
|
||||
|
||||
The client currently supports authentication to the Service Management
|
||||
API with certificates or Azure `.publishSettings` file. You can
|
||||
download the `.publishSettings` file for your subscriptions
|
||||
[here](https://manage.windowsazure.com/publishsettings).
|
||||
|
||||
### Example: Creating a Linux Virtual Machine
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/management"
|
||||
"github.com/Azure/azure-sdk-for-go/management/hostedservice"
|
||||
"github.com/Azure/azure-sdk-for-go/management/virtualmachine"
|
||||
"github.com/Azure/azure-sdk-for-go/management/vmutils"
|
||||
)
|
||||
|
||||
func main() {
|
||||
dnsName := "test-vm-from-go"
|
||||
storageAccount := "mystorageaccount"
|
||||
location := "West US"
|
||||
vmSize := "Small"
|
||||
vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB"
|
||||
userName := "testuser"
|
||||
userPassword := "Test123"
|
||||
|
||||
client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// create hosted service
|
||||
if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{
|
||||
ServiceName: dnsName,
|
||||
Location: location,
|
||||
Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// create virtual machine
|
||||
role := vmutils.NewVMConfiguration(dnsName, vmSize)
|
||||
vmutils.ConfigureDeploymentFromPlatformImage(
|
||||
&role,
|
||||
vmImage,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName),
|
||||
"")
|
||||
vmutils.ConfigureForLinux(&role, dnsName, userName, userPassword)
|
||||
vmutils.ConfigureWithPublicSSH(&role)
|
||||
|
||||
operationID, err := virtualmachine.NewClient(client).
|
||||
CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := client.WaitForOperation(operationID, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
This project is published under [Apache 2.0 License](LICENSE).
|
858
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
Normal file
858
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
Normal file
|
@ -0,0 +1,858 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||
// Service.
|
||||
type BlobStorageClient struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
// A Container is an entry in ContainerListResponse.
|
||||
type Container struct {
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
// TODO (ahmetalpbalkan) Metadata
|
||||
}
|
||||
|
||||
// ContainerProperties contains various properties of a container returned from
|
||||
// various endpoints like ListContainers.
|
||||
type ContainerProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
LeaseDuration string `xml:"LeaseDuration"`
|
||||
// TODO (ahmetalpbalkan) remaining fields
|
||||
}
|
||||
|
||||
// ContainerListResponse contains the response fields from
|
||||
// ListContainers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ContainerListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Containers []Container `xml:"Containers>Container"`
|
||||
}
|
||||
|
||||
// A Blob is an entry in BlobListResponse.
|
||||
type Blob struct {
|
||||
Name string `xml:"Name"`
|
||||
Properties BlobProperties `xml:"Properties"`
|
||||
// TODO (ahmetalpbalkan) Metadata
|
||||
}
|
||||
|
||||
// BlobProperties contains various properties of a blob
|
||||
// returned in various endpoints like ListBlobs or GetBlobProperties.
|
||||
type BlobProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
ContentMD5 string `xml:"Content-MD5"`
|
||||
ContentLength int64 `xml:"Content-Length"`
|
||||
ContentType string `xml:"Content-Type"`
|
||||
ContentEncoding string `xml:"Content-Encoding"`
|
||||
BlobType BlobType `xml:"x-ms-blob-blob-type"`
|
||||
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
|
||||
CopyID string `xml:"CopyId"`
|
||||
CopyStatus string `xml:"CopyStatus"`
|
||||
CopySource string `xml:"CopySource"`
|
||||
CopyProgress string `xml:"CopyProgress"`
|
||||
CopyCompletionTime string `xml:"CopyCompletionTime"`
|
||||
CopyStatusDescription string `xml:"CopyStatusDescription"`
|
||||
}
|
||||
|
||||
// BlobListResponse contains the response fields from ListBlobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type BlobListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Blobs []Blob `xml:"Blobs>Blob"`
|
||||
}
|
||||
|
||||
// ListContainersParameters defines the set of customizable parameters to make a
|
||||
// List Containers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ListContainersParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListContainersParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ListBlobsParameters defines the set of customizable
|
||||
// parameters to make a List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type ListBlobsParameters struct {
|
||||
Prefix string
|
||||
Delimiter string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListBlobsParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// BlobType defines the type of the Azure Blob.
|
||||
type BlobType string
|
||||
|
||||
// Types of page blobs
|
||||
const (
|
||||
BlobTypeBlock BlobType = "BlockBlob"
|
||||
BlobTypePage BlobType = "PageBlob"
|
||||
)
|
||||
|
||||
// PageWriteType defines the type updates that are going to be
|
||||
// done on the page blob.
|
||||
type PageWriteType string
|
||||
|
||||
// Types of operations on page blobs
|
||||
const (
|
||||
PageWriteTypeUpdate PageWriteType = "update"
|
||||
PageWriteTypeClear PageWriteType = "clear"
|
||||
)
|
||||
|
||||
const (
|
||||
blobCopyStatusPending = "pending"
|
||||
blobCopyStatusSuccess = "success"
|
||||
blobCopyStatusAborted = "aborted"
|
||||
blobCopyStatusFailed = "failed"
|
||||
)
|
||||
|
||||
// BlockListType is used to filter out types of blocks in a Get Blocks List call
|
||||
// for a block blob.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
|
||||
// block types.
|
||||
type BlockListType string
|
||||
|
||||
// Filters for listing blocks in block blobs
|
||||
const (
|
||||
BlockListTypeAll BlockListType = "all"
|
||||
BlockListTypeCommitted BlockListType = "committed"
|
||||
BlockListTypeUncommitted BlockListType = "uncommitted"
|
||||
)
|
||||
|
||||
// ContainerAccessType defines the access level to the container from a public
|
||||
// request.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
||||
// blob-public-access" header.
|
||||
type ContainerAccessType string
|
||||
|
||||
// Access options for containers
|
||||
const (
|
||||
ContainerAccessTypePrivate ContainerAccessType = ""
|
||||
ContainerAccessTypeBlob ContainerAccessType = "blob"
|
||||
ContainerAccessTypeContainer ContainerAccessType = "container"
|
||||
)
|
||||
|
||||
// Maximum sizes (per REST API) for various concepts
|
||||
const (
|
||||
MaxBlobBlockSize = 4 * 1024 * 1024
|
||||
MaxBlobPageSize = 4 * 1024 * 1024
|
||||
)
|
||||
|
||||
// BlockStatus defines states a block for a block blob can
|
||||
// be in.
|
||||
type BlockStatus string
|
||||
|
||||
// List of statuses that can be used to refer to a block in a block list
|
||||
const (
|
||||
BlockStatusUncommitted BlockStatus = "Uncommitted"
|
||||
BlockStatusCommitted BlockStatus = "Committed"
|
||||
BlockStatusLatest BlockStatus = "Latest"
|
||||
)
|
||||
|
||||
// Block is used to create Block entities for Put Block List
|
||||
// call.
|
||||
type Block struct {
|
||||
ID string
|
||||
Status BlockStatus
|
||||
}
|
||||
|
||||
// BlockListResponse contains the response fields from Get Block List call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
|
||||
type BlockListResponse struct {
|
||||
XMLName xml.Name `xml:"BlockList"`
|
||||
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
|
||||
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
|
||||
}
|
||||
|
||||
// BlockResponse contains the block information returned
|
||||
// in the GetBlockListCall.
|
||||
type BlockResponse struct {
|
||||
Name string `xml:"Name"`
|
||||
Size int64 `xml:"Size"`
|
||||
}
|
||||
|
||||
// GetPageRangesResponse contains the reponse fields from
|
||||
// Get Page Ranges call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||
type GetPageRangesResponse struct {
|
||||
XMLName xml.Name `xml:"PageList"`
|
||||
PageList []PageRange `xml:"PageRange"`
|
||||
}
|
||||
|
||||
// PageRange contains information about a page of a page blob from
|
||||
// Get Pages Range call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||
type PageRange struct {
|
||||
Start int64 `xml:"Start"`
|
||||
End int64 `xml:"End"`
|
||||
}
|
||||
|
||||
var (
|
||||
errBlobCopyAborted = errors.New("storage: blob copy is aborted")
|
||||
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
|
||||
)
|
||||
|
||||
// ListContainers returns the list of containers in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out ContainerListResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CreateContainer creates a blob container within the storage account
|
||||
// with given name and access level. Returns error if container already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
|
||||
func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error {
|
||||
resp, err := b.createContainer(name, access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// CreateContainerIfNotExists creates a blob container if it does not exist. Returns
|
||||
// true if container is newly created or false if container already exists.
|
||||
func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) {
|
||||
resp, err := b.createContainer(name, access)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
return resp.statusCode == http.StatusCreated, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) {
|
||||
verb := "PUT"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["Content-Length"] = "0"
|
||||
if access != "" {
|
||||
headers["x-ms-blob-public-access"] = string(access)
|
||||
}
|
||||
return b.client.exec(verb, uri, headers, nil)
|
||||
}
|
||||
|
||||
// ContainerExists returns true if a container with given name exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (b BlobStorageClient) ContainerExists(name string) (bool, error) {
|
||||
verb := "HEAD"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// DeleteContainer deletes the container with given name on the storage
|
||||
// account. If the container does not exist returns error.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (b BlobStorageClient) DeleteContainer(name string) error {
|
||||
resp, err := b.deleteContainer(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteContainerIfExists deletes the container with given name on the storage
|
||||
// account if it exists. Returns true if container is deleted with this call, or
|
||||
// false if the container did not exist at the time of the Delete Container
|
||||
// operation.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) {
|
||||
resp, err := b.deleteContainer(name)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) {
|
||||
verb := "DELETE"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
return b.client.exec(verb, uri, headers, nil)
|
||||
}
|
||||
|
||||
// ListBlobs returns an object that contains list of blobs in the container,
|
||||
// pagination token and other information in the response of List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out BlobListResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// BlobExists returns true if a blob with given name exists on the specified
|
||||
// container of the storage account.
|
||||
func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
|
||||
verb := "HEAD"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// GetBlobURL gets the canonical URL to the blob with the specified name in the
|
||||
// specified container. This method does not create a publicly accessible URL if
|
||||
// the blob or container is private and this method does not check if the blob
|
||||
// exists.
|
||||
func (b BlobStorageClient) GetBlobURL(container, name string) string {
|
||||
if container == "" {
|
||||
container = "$root"
|
||||
}
|
||||
return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
}
|
||||
|
||||
// GetBlob returns a stream to read the blob. Caller must call Close() the
|
||||
// reader to close on the underlying connection.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
|
||||
func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) {
|
||||
resp, err := b.getBlobRange(container, name, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.body, nil
|
||||
}
|
||||
|
||||
// GetBlobRange reads the specified range of a blob to a stream. The bytesRange
|
||||
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
|
||||
func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) {
|
||||
resp, err := b.getBlobRange(container, name, bytesRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.body, nil
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) {
|
||||
verb := "GET"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
if bytesRange != "" {
|
||||
headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange)
|
||||
}
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// GetBlobProperties provides various information about the specified
|
||||
// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
|
||||
func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) {
|
||||
verb := "HEAD"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := resp.headers.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var sequenceNum int64
|
||||
sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number")
|
||||
if sequenceNumStr != "" {
|
||||
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &BlobProperties{
|
||||
LastModified: resp.headers.Get("Last-Modified"),
|
||||
Etag: resp.headers.Get("Etag"),
|
||||
ContentMD5: resp.headers.Get("Content-MD5"),
|
||||
ContentLength: contentLength,
|
||||
ContentEncoding: resp.headers.Get("Content-Encoding"),
|
||||
SequenceNumber: sequenceNum,
|
||||
CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"),
|
||||
CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"),
|
||||
CopyID: resp.headers.Get("x-ms-copy-id"),
|
||||
CopyProgress: resp.headers.Get("x-ms-copy-progress"),
|
||||
CopySource: resp.headers.Get("x-ms-copy-source"),
|
||||
CopyStatus: resp.headers.Get("x-ms-copy-status"),
|
||||
BlobType: BlobType(resp.headers.Get("x-ms-blob-type")),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateBlockBlob initializes an empty block blob with no blocks.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
|
||||
func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", 0)
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// PutBlock saves the given data chunk to the specified block blob with
|
||||
// given ID.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
|
||||
func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error {
|
||||
return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk))
|
||||
}
|
||||
|
||||
// PutBlockWithLength saves the given data stream of exactly specified size to
|
||||
// the block blob with given ID. It is an alternative to PutBlocks where data
|
||||
// comes as stream but the length is known in advance.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
|
||||
func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader) error {
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}})
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", size)
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// PutBlockList saves list of blocks to the specified block blob.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx
|
||||
func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error {
|
||||
blockListXML := prepareBlockListRequest(blocks)
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}})
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// GetBlockList retrieves list of blocks in the specified block blob.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
|
||||
func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) {
|
||||
params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}}
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out BlockListResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// PutPageBlob initializes an empty page blob with specified name and maximum
|
||||
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
|
||||
// be created using this method before writing pages.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
|
||||
func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", 0)
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// PutPage writes a range of pages to a page blob or clears the given range.
|
||||
// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned
|
||||
// with 512-byte boundaries and chunk must be of size multiplies by 512.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/ee691975.aspx
|
||||
func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}})
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||
headers["x-ms-page-write"] = string(writeType)
|
||||
headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte)
|
||||
|
||||
var contentLength int64
|
||||
var data io.Reader
|
||||
if writeType == PageWriteTypeClear {
|
||||
contentLength = 0
|
||||
data = bytes.NewReader([]byte{})
|
||||
} else {
|
||||
contentLength = int64(len(chunk))
|
||||
data = bytes.NewReader(chunk)
|
||||
}
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", contentLength)
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// GetPageRanges returns the list of valid page ranges for a page blob.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||
func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}})
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out GetPageRangesResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CopyBlob starts a blob copy operation and waits for the operation to
|
||||
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
|
||||
// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore
|
||||
// this helper method works faster on smaller files.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx
|
||||
func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error {
|
||||
copyID, err := b.startBlobCopy(container, name, sourceBlob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return b.waitForBlobCopy(container, name, copyID)
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) {
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["Content-Length"] = "0"
|
||||
headers["x-ms-copy-source"] = sourceBlob
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
copyID := resp.headers.Get("x-ms-copy-id")
|
||||
if copyID == "" {
|
||||
return "", errors.New("Got empty copy id header")
|
||||
}
|
||||
return copyID, nil
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error {
|
||||
for {
|
||||
props, err := b.GetBlobProperties(container, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if props.CopyID != copyID {
|
||||
return errBlobCopyIDMismatch
|
||||
}
|
||||
|
||||
switch props.CopyStatus {
|
||||
case blobCopyStatusSuccess:
|
||||
return nil
|
||||
case blobCopyStatusPending:
|
||||
continue
|
||||
case blobCopyStatusAborted:
|
||||
return errBlobCopyAborted
|
||||
case blobCopyStatusFailed:
|
||||
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription)
|
||||
default:
|
||||
return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteBlob deletes the given blob from the specified container.
|
||||
// If the blob does not exists at the time of the Delete Blob operation, it
|
||||
// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
|
||||
func (b BlobStorageClient) DeleteBlob(container, name string) error {
|
||||
resp, err := b.deleteBlob(container, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteBlobIfExists deletes the given blob from the specified container If the
|
||||
// blob is deleted with this call, returns true. Otherwise returns false.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
|
||||
func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) {
|
||||
resp, err := b.deleteBlob(container, name)
|
||||
if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) {
|
||||
verb := "DELETE"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
return b.client.exec(verb, uri, headers, nil)
|
||||
}
|
||||
|
||||
// helper method to construct the path to a container given its name
|
||||
func pathForContainer(name string) string {
|
||||
return fmt.Sprintf("/%s", name)
|
||||
}
|
||||
|
||||
// helper method to construct the path to a blob given its container and blob
|
||||
// name
|
||||
func pathForBlob(container, name string) string {
|
||||
return fmt.Sprintf("/%s/%s", container, name)
|
||||
}
|
||||
|
||||
// GetBlobSASURI creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||
func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) {
|
||||
var (
|
||||
signedPermissions = permissions
|
||||
blobURL = b.GetBlobURL(container, name)
|
||||
)
|
||||
canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
signedExpiry := expiry.Format(time.RFC3339)
|
||||
signedResource := "b"
|
||||
|
||||
stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sig := b.client.computeHmac256(stringToSign)
|
||||
sasParams := url.Values{
|
||||
"sv": {b.client.apiVersion},
|
||||
"se": {signedExpiry},
|
||||
"sr": {signedResource},
|
||||
"sp": {signedPermissions},
|
||||
"sig": {sig},
|
||||
}
|
||||
|
||||
sasURL, err := url.Parse(blobURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sasURL.RawQuery = sasParams.Encode()
|
||||
return sasURL.String(), nil
|
||||
}
|
||||
|
||||
func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) {
|
||||
var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string
|
||||
|
||||
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
if signedVersion >= "2013-08-15" {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||
}
|
||||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
||||
}
|
625
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go
generated
vendored
Normal file
625
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go
generated
vendored
Normal file
|
@ -0,0 +1,625 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
chk "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type StorageBlobSuite struct{}
|
||||
|
||||
var _ = chk.Suite(&StorageBlobSuite{})
|
||||
|
||||
const testContainerPrefix = "zzzztest-"
|
||||
|
||||
func getBlobClient(c *chk.C) BlobStorageClient {
|
||||
return getBasicClient(c).GetBlobService()
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) {
|
||||
c.Assert(pathForContainer("foo"), chk.Equals, "/foo")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) {
|
||||
c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) {
|
||||
_, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP")
|
||||
c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15
|
||||
|
||||
out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) {
|
||||
api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true)
|
||||
c.Assert(err, chk.IsNil)
|
||||
cli := api.GetBlobService()
|
||||
expiry := time.Time{}
|
||||
|
||||
expectedParts := url.URL{
|
||||
Scheme: "https",
|
||||
Host: "foo.blob.core.windows.net",
|
||||
Path: "container/name",
|
||||
RawQuery: url.Values{
|
||||
"sv": {"2013-08-15"},
|
||||
"sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="},
|
||||
"sr": {"b"},
|
||||
"sp": {"r"},
|
||||
"se": {"0001-01-01T00:00:00Z"},
|
||||
}.Encode()}
|
||||
|
||||
u, err := cli.GetBlobSASURI("container", "name", expiry, "r")
|
||||
c.Assert(err, chk.IsNil)
|
||||
sasParts, err := url.Parse(u)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(expectedParts.String(), chk.Equals, sasParts.String())
|
||||
c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query())
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
body := []byte(randString(100))
|
||||
expiry := time.Now().UTC().Add(time.Hour)
|
||||
permissions := "r"
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil)
|
||||
|
||||
sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
resp, err := http.Get(sasURI)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
blobResp, err := ioutil.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
c.Assert(resp.StatusCode, chk.Equals, http.StatusOK)
|
||||
c.Assert(len(blobResp), chk.Equals, len(body))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(deleteTestContainers(cli), chk.IsNil)
|
||||
|
||||
const n = 5
|
||||
const pageSize = 2
|
||||
|
||||
// Create test containers
|
||||
created := []string{}
|
||||
for i := 0; i < n; i++ {
|
||||
name := randContainer()
|
||||
c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil)
|
||||
created = append(created, name)
|
||||
}
|
||||
sort.Strings(created)
|
||||
|
||||
// Defer test container deletions
|
||||
defer func() {
|
||||
var wg sync.WaitGroup
|
||||
for _, cnt := range created {
|
||||
wg.Add(1)
|
||||
go func(name string) {
|
||||
c.Assert(cli.DeleteContainer(name), chk.IsNil)
|
||||
wg.Done()
|
||||
}(cnt)
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
// Paginate results
|
||||
seen := []string{}
|
||||
marker := ""
|
||||
for {
|
||||
resp, err := cli.ListContainers(ListContainersParameters{
|
||||
Prefix: testContainerPrefix,
|
||||
MaxResults: pageSize,
|
||||
Marker: marker})
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
containers := resp.Containers
|
||||
if len(containers) > pageSize {
|
||||
c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers))
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
seen = append(seen, c.Name)
|
||||
}
|
||||
|
||||
marker = resp.NextMarker
|
||||
if marker == "" || len(containers) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
c.Assert(seen, chk.DeepEquals, created)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestContainerExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
ok, err := cli.ContainerExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
ok, err = cli.ContainerExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateDeleteContainer(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
c.Assert(cli.DeleteContainer(cnt), chk.IsNil)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
|
||||
// First create
|
||||
ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
|
||||
// Second create, should not give errors
|
||||
ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
|
||||
c.Assert(err, chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
|
||||
// Nonexisting container
|
||||
c.Assert(cli.DeleteContainer(cnt), chk.NotNil)
|
||||
|
||||
ok, err := cli.DeleteContainerIfExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
// Existing container
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
ok, err = cli.DeleteContainerIfExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestBlobExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
cli := getBlobClient(c)
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, blob)
|
||||
|
||||
ok, err := cli.BlobExists(cnt, blob+".foo")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
ok, err = cli.BlobExists(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) {
|
||||
api, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
cli := api.GetBlobService()
|
||||
|
||||
c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob")
|
||||
c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob")
|
||||
c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) {
|
||||
if testing.Short() {
|
||||
c.Skip("skipping blob copy in short mode, no SLA on async operation")
|
||||
}
|
||||
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
src := randString(20)
|
||||
dst := randString(20)
|
||||
body := []byte(randString(1024))
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, src)
|
||||
|
||||
c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, dst)
|
||||
|
||||
blobBody, err := cli.GetBlob(cnt, dst)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
b, err := ioutil.ReadAll(blobBody)
|
||||
defer blobBody.Close()
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(b, chk.DeepEquals, body)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil)
|
||||
|
||||
ok, err := cli.DeleteBlobIfExists(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
contents := randString(64)
|
||||
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
// Nonexisting blob
|
||||
_, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.NotNil)
|
||||
|
||||
// Put the blob
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil)
|
||||
|
||||
// Get blob properties
|
||||
props, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
c.Assert(props.ContentLength, chk.Equals, int64(len(contents)))
|
||||
c.Assert(props.BlobType, chk.Equals, BlobTypeBlock)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
blobs := []string{}
|
||||
const n = 5
|
||||
const pageSize = 2
|
||||
for i := 0; i < n; i++ {
|
||||
name := randString(20)
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil)
|
||||
blobs = append(blobs, name)
|
||||
}
|
||||
sort.Strings(blobs)
|
||||
|
||||
// Paginate
|
||||
seen := []string{}
|
||||
marker := ""
|
||||
for {
|
||||
resp, err := cli.ListBlobs(cnt, ListBlobsParameters{
|
||||
MaxResults: pageSize,
|
||||
Marker: marker})
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
for _, v := range resp.Blobs {
|
||||
seen = append(seen, v.Name)
|
||||
}
|
||||
|
||||
marker = resp.NextMarker
|
||||
if marker == "" || len(resp.Blobs) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Compare
|
||||
c.Assert(seen, chk.DeepEquals, blobs)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil)
|
||||
|
||||
props, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(props.ContentLength, chk.Not(chk.Equals), 0)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
body := "0123456789"
|
||||
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, blob)
|
||||
|
||||
// Read 1-3
|
||||
for _, r := range []struct {
|
||||
rangeStr string
|
||||
expected string
|
||||
}{
|
||||
{"0-", body},
|
||||
{"1-3", body[1 : 3+1]},
|
||||
{"3-", body[3:]},
|
||||
} {
|
||||
resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr)
|
||||
c.Assert(err, chk.IsNil)
|
||||
blobBody, err := ioutil.ReadAll(resp)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
str := string(blobBody)
|
||||
c.Assert(str, chk.Equals, r.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutBlock(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
chunk := []byte(randString(1024))
|
||||
blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
|
||||
c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
chunk := []byte(randString(1024))
|
||||
blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
|
||||
|
||||
// Put one block
|
||||
c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
|
||||
defer cli.deleteBlob(cnt, blob)
|
||||
|
||||
// Get committed blocks
|
||||
committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
if len(committed.CommittedBlocks) > 0 {
|
||||
c.Fatal("There are committed blocks")
|
||||
}
|
||||
|
||||
// Get uncommitted blocks
|
||||
uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1)
|
||||
// Commit block list
|
||||
c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil)
|
||||
|
||||
// Get all blocks
|
||||
all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(all.CommittedBlocks), chk.Equals, 1)
|
||||
c.Assert(len(all.UncommittedBlocks), chk.Equals, 0)
|
||||
|
||||
// Verify the block
|
||||
thatBlock := all.CommittedBlocks[0]
|
||||
c.Assert(thatBlock.Name, chk.Equals, blockID)
|
||||
c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk)))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil)
|
||||
|
||||
// Verify
|
||||
blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0)
|
||||
c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024)
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
|
||||
|
||||
// Verify
|
||||
props, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(props.ContentLength, chk.Equals, size)
|
||||
c.Assert(props.BlobType, chk.Equals, BlobTypePage)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024) // larger than we'll use
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
|
||||
|
||||
chunk1 := []byte(randString(1024))
|
||||
chunk2 := []byte(randString(512))
|
||||
|
||||
// Append chunks
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil)
|
||||
c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil)
|
||||
|
||||
// Verify contents
|
||||
out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
|
||||
c.Assert(err, chk.IsNil)
|
||||
defer out.Close()
|
||||
blobContents, err := ioutil.ReadAll(out)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...))
|
||||
out.Close()
|
||||
|
||||
// Overwrite first half of chunk1
|
||||
chunk0 := []byte(randString(512))
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil)
|
||||
|
||||
// Verify contents
|
||||
out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
|
||||
c.Assert(err, chk.IsNil)
|
||||
defer out.Close()
|
||||
blobContents, err = ioutil.ReadAll(out)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024) // larger than we'll use
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
|
||||
|
||||
// Put 0-2047
|
||||
chunk := []byte(randString(2048))
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil)
|
||||
|
||||
// Clear 512-1023
|
||||
c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil)
|
||||
|
||||
// Verify contents
|
||||
out, err := cli.GetBlobRange(cnt, blob, "0-2047")
|
||||
c.Assert(err, chk.IsNil)
|
||||
contents, err := ioutil.ReadAll(out)
|
||||
c.Assert(err, chk.IsNil)
|
||||
defer out.Close()
|
||||
c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024) // larger than we'll use
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
|
||||
|
||||
// Get page ranges on empty blob
|
||||
out, err := cli.GetPageRanges(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(out.PageList), chk.Equals, 0)
|
||||
|
||||
// Add 0-512 page
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil)
|
||||
|
||||
out, err = cli.GetPageRanges(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(out.PageList), chk.Equals, 1)
|
||||
|
||||
// Add 1024-2048
|
||||
c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil)
|
||||
|
||||
out, err = cli.GetPageRanges(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(out.PageList), chk.Equals, 2)
|
||||
}
|
||||
|
||||
func deleteTestContainers(cli BlobStorageClient) error {
|
||||
for {
|
||||
resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(resp.Containers) == 0 {
|
||||
break
|
||||
}
|
||||
for _, c := range resp.Containers {
|
||||
err = cli.DeleteContainer(c.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error {
|
||||
if len(chunk) > MaxBlobBlockSize {
|
||||
return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize)
|
||||
}
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
func randContainer() string {
|
||||
return testContainerPrefix + randString(32-len(testContainerPrefix))
|
||||
}
|
||||
|
||||
func randString(n int) string {
|
||||
if n <= 0 {
|
||||
panic("negative number")
|
||||
}
|
||||
const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz"
|
||||
var bytes = make([]byte, n)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
359
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
Normal file
359
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
Normal file
|
@ -0,0 +1,359 @@
|
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURL is the domain name used for storage requests when a
|
||||
// default client is created.
|
||||
DefaultBaseURL = "core.windows.net"
|
||||
|
||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||
// basic client is created.
|
||||
DefaultAPIVersion = "2014-02-14"
|
||||
|
||||
defaultUseHTTPS = true
|
||||
|
||||
blobServiceName = "blob"
|
||||
tableServiceName = "table"
|
||||
queueServiceName = "queue"
|
||||
)
|
||||
|
||||
// Client is the object that needs to be constructed to perform
|
||||
// operations on the storage account.
|
||||
type Client struct {
|
||||
accountName string
|
||||
accountKey []byte
|
||||
useHTTPS bool
|
||||
baseURL string
|
||||
apiVersion string
|
||||
}
|
||||
|
||||
type storageResponse struct {
|
||||
statusCode int
|
||||
headers http.Header
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
// AzureStorageServiceError contains fields of the error response from
|
||||
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
|
||||
// Some fields might be specific to certain calls.
|
||||
type AzureStorageServiceError struct {
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
|
||||
QueryParameterName string `xml:"QueryParameterName"`
|
||||
QueryParameterValue string `xml:"QueryParameterValue"`
|
||||
Reason string `xml:"Reason"`
|
||||
StatusCode int
|
||||
RequestID string
|
||||
}
|
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct {
|
||||
allowed []int
|
||||
got int
|
||||
}
|
||||
|
||||
func (e UnexpectedStatusCodeError) Error() string {
|
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
||||
|
||||
got := s(e.got)
|
||||
expected := []string{}
|
||||
for _, v := range e.allowed {
|
||||
expected = append(expected, s(v))
|
||||
}
|
||||
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
|
||||
}
|
||||
|
||||
// NewBasicClient constructs a Client with given storage service name and
|
||||
// key.
|
||||
func NewBasicClient(accountName, accountKey string) (Client, error) {
|
||||
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
|
||||
}
|
||||
|
||||
// NewClient constructs a Client. This should be used if the caller wants
|
||||
// to specify whether to use HTTPS, a specific REST API version or a custom
|
||||
// storage endpoint than Azure Public Cloud.
|
||||
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
||||
var c Client
|
||||
if accountName == "" {
|
||||
return c, fmt.Errorf("azure: account name required")
|
||||
} else if accountKey == "" {
|
||||
return c, fmt.Errorf("azure: account key required")
|
||||
} else if blobServiceBaseURL == "" {
|
||||
return c, fmt.Errorf("azure: base storage service url required")
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(accountKey)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
|
||||
return Client{
|
||||
accountName: accountName,
|
||||
accountKey: key,
|
||||
useHTTPS: useHTTPS,
|
||||
baseURL: blobServiceBaseURL,
|
||||
apiVersion: apiVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c Client) getBaseURL(service string) string {
|
||||
scheme := "http"
|
||||
if c.useHTTPS {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: host}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
||||
u, err := url.Parse(c.getBaseURL(service))
|
||||
if err != nil {
|
||||
// really should not be happening
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
path = "/" // API doesn't accept path segments not starting with '/'
|
||||
}
|
||||
|
||||
u.Path = path
|
||||
u.RawQuery = params.Encode()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
||||
// service of the storage account.
|
||||
func (c Client) GetBlobService() BlobStorageClient {
|
||||
return BlobStorageClient{c}
|
||||
}
|
||||
|
||||
// GetQueueService returns a QueueServiceClient which can operate on the queue
|
||||
// service of the storage account.
|
||||
func (c Client) GetQueueService() QueueServiceClient {
|
||||
return QueueServiceClient{c}
|
||||
}
|
||||
|
||||
func (c Client) createAuthorizationHeader(canonicalizedString string) string {
|
||||
signature := c.computeHmac256(canonicalizedString)
|
||||
return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature)
|
||||
}
|
||||
|
||||
func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) {
|
||||
canonicalizedResource, err := c.buildCanonicalizedResource(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource)
|
||||
return c.createAuthorizationHeader(canonicalizedString), nil
|
||||
}
|
||||
|
||||
func (c Client) getStandardHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
"x-ms-version": c.apiVersion,
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
|
||||
cm := make(map[string]string)
|
||||
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
match, _ := regexp.MatchString("x-ms-", headerName)
|
||||
if match {
|
||||
cm[headerName] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(cm))
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
ch := ""
|
||||
|
||||
for i, key := range keys {
|
||||
if i == len(keys)-1 {
|
||||
ch += fmt.Sprintf("%s:%s", key, cm[key])
|
||||
} else {
|
||||
ch += fmt.Sprintf("%s:%s\n", key, cm[key])
|
||||
}
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
func (c Client) buildCanonicalizedResource(uri string) (string, error) {
|
||||
errMsg := "buildCanonicalizedResource error: %s"
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
cr := "/" + c.accountName
|
||||
if len(u.Path) > 0 {
|
||||
cr += u.Path
|
||||
}
|
||||
|
||||
params, err := url.ParseQuery(u.RawQuery)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
if len(params) > 0 {
|
||||
cr += "\n"
|
||||
keys := make([]string, 0, len(params))
|
||||
for key := range params {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
for i, key := range keys {
|
||||
if len(params[key]) > 1 {
|
||||
sort.Strings(params[key])
|
||||
}
|
||||
|
||||
if i == len(keys)-1 {
|
||||
cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))
|
||||
} else {
|
||||
cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ","))
|
||||
}
|
||||
}
|
||||
}
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string {
|
||||
canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
|
||||
verb,
|
||||
headers["Content-Encoding"],
|
||||
headers["Content-Language"],
|
||||
headers["Content-Length"],
|
||||
headers["Content-MD5"],
|
||||
headers["Content-Type"],
|
||||
headers["Date"],
|
||||
headers["If-Modified-Singe"],
|
||||
headers["If-Match"],
|
||||
headers["If-None-Match"],
|
||||
headers["If-Unmodified-Singe"],
|
||||
headers["Range"],
|
||||
c.buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource)
|
||||
|
||||
return canonicalizedString
|
||||
}
|
||||
|
||||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) {
|
||||
authHeader, err := c.getAuthorizationHeader(verb, url, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers["Authorization"] = authHeader
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(verb, url, body)
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
httpClient := http.Client{}
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readResponseBody(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body
|
||||
err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status)
|
||||
} else {
|
||||
// response contains storage service error object, unmarshal
|
||||
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id"))
|
||||
if err != nil { // error unmarshaling the error response
|
||||
err = errIn
|
||||
}
|
||||
err = storageErr
|
||||
}
|
||||
return &storageResponse{
|
||||
statusCode: resp.StatusCode,
|
||||
headers: resp.Header,
|
||||
body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
|
||||
}, err
|
||||
}
|
||||
|
||||
return &storageResponse{
|
||||
statusCode: resp.StatusCode,
|
||||
headers: resp.Header,
|
||||
body: resp.Body}, nil
|
||||
}
|
||||
|
||||
func readResponseBody(resp *http.Response) ([]byte, error) {
|
||||
defer resp.Body.Close()
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
|
||||
var storageErr AzureStorageServiceError
|
||||
if err := xml.Unmarshal(body, &storageErr); err != nil {
|
||||
return storageErr, err
|
||||
}
|
||||
storageErr.StatusCode = statusCode
|
||||
storageErr.RequestID = requestID
|
||||
return storageErr, nil
|
||||
}
|
||||
|
||||
func (e AzureStorageServiceError) Error() string {
|
||||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID)
|
||||
}
|
||||
|
||||
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
||||
// one of the allowed status codes; otherwise nil.
|
||||
func checkRespCode(respCode int, allowed []int) error {
|
||||
for _, v := range allowed {
|
||||
if respCode == v {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return UnexpectedStatusCodeError{allowed, respCode}
|
||||
}
|
156
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go
generated
vendored
Normal file
156
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
chk "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck to testing
|
||||
func Test(t *testing.T) { chk.TestingT(t) }
|
||||
|
||||
type StorageClientSuite struct{}
|
||||
|
||||
var _ = chk.Suite(&StorageClientSuite{})
|
||||
|
||||
// getBasicClient returns a test client from storage credentials in the env
|
||||
func getBasicClient(c *chk.C) Client {
|
||||
name := os.Getenv("ACCOUNT_NAME")
|
||||
if name == "" {
|
||||
c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test")
|
||||
}
|
||||
key := os.Getenv("ACCOUNT_KEY")
|
||||
if key == "" {
|
||||
c.Fatal("ACCOUNT_KEY not set")
|
||||
}
|
||||
cli, err := NewBasicClient(name, key)
|
||||
c.Assert(err, chk.IsNil)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) {
|
||||
apiVersion := "2015-01-01" // a non existing one
|
||||
cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(cli.apiVersion, chk.Equals, apiVersion)
|
||||
c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
output := cli.getEndpoint(blobServiceName, "", url.Values{})
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
output := cli.getEndpoint(blobServiceName, "path", url.Values{})
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
params := url.Values{}
|
||||
params.Set("a", "b")
|
||||
params.Set("c", "d")
|
||||
output := cli.getEndpoint(blobServiceName, "", params)
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
params := url.Values{}
|
||||
params.Set("a", "b")
|
||||
params.Set("c", "d")
|
||||
output := cli.getEndpoint(blobServiceName, "path", params)
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
headers := cli.getStandardHeaders()
|
||||
c.Assert(len(headers), chk.Equals, 2)
|
||||
c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion)
|
||||
if _, ok := headers["x-ms-date"]; !ok {
|
||||
c.Fatal("Missing date header")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
type test struct{ url, expected string }
|
||||
tests := []test{
|
||||
{"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"},
|
||||
{"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"},
|
||||
{"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"},
|
||||
}
|
||||
|
||||
for _, i := range tests {
|
||||
out, err := cli.buildCanonicalizedResource(i.url)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(out, chk.Equals, i.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
type test struct {
|
||||
headers map[string]string
|
||||
expected string
|
||||
}
|
||||
tests := []test{
|
||||
{map[string]string{}, ""},
|
||||
{map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"},
|
||||
{map[string]string{"foo:": "bar"}, ""},
|
||||
{map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"},
|
||||
{map[string]string{
|
||||
"x-ms-version": "9999-99-99",
|
||||
"x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}}
|
||||
|
||||
for _, i := range tests {
|
||||
c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) {
|
||||
// attempt to delete a nonexisting container
|
||||
_, err := getBlobClient(c).deleteContainer(randContainer())
|
||||
c.Assert(err, chk.NotNil)
|
||||
|
||||
v, ok := err.(AzureStorageServiceError)
|
||||
c.Check(ok, chk.Equals, true)
|
||||
c.Assert(v.StatusCode, chk.Equals, 404)
|
||||
c.Assert(v.Code, chk.Equals, "ContainerNotFound")
|
||||
c.Assert(v.Code, chk.Not(chk.Equals), "")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) {
|
||||
key := base64.StdEncoding.EncodeToString([]byte("bar"))
|
||||
cli, err := NewBasicClient("foo", key)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
canonicalizedString := `foobarzoo`
|
||||
expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=`
|
||||
c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected)
|
||||
}
|
230
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
Normal file
230
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
||||
// Service.
|
||||
type QueueServiceClient struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
|
||||
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
|
||||
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
|
||||
|
||||
type putMessageRequest struct {
|
||||
XMLName xml.Name `xml:"QueueMessage"`
|
||||
MessageText string `xml:"MessageText"`
|
||||
}
|
||||
|
||||
// PutMessageParameters is the set of options can be specified for Put Messsage
|
||||
// operation. A zero struct does not use any preferences for the request.
|
||||
type PutMessageParameters struct {
|
||||
VisibilityTimeout int
|
||||
MessageTTL int
|
||||
}
|
||||
|
||||
func (p PutMessageParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
if p.VisibilityTimeout != 0 {
|
||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
||||
}
|
||||
if p.MessageTTL != 0 {
|
||||
out.Set("messagettl", strconv.Itoa(p.MessageTTL))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// GetMessagesParameters is the set of options can be specified for Get
|
||||
// Messsages operation. A zero struct does not use any preferences for the
|
||||
// request.
|
||||
type GetMessagesParameters struct {
|
||||
NumOfMessages int
|
||||
VisibilityTimeout int
|
||||
}
|
||||
|
||||
func (p GetMessagesParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
if p.NumOfMessages != 0 {
|
||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
||||
}
|
||||
if p.VisibilityTimeout != 0 {
|
||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// PeekMessagesParameters is the set of options can be specified for Peek
|
||||
// Messsage operation. A zero struct does not use any preferences for the
|
||||
// request.
|
||||
type PeekMessagesParameters struct {
|
||||
NumOfMessages int
|
||||
}
|
||||
|
||||
func (p PeekMessagesParameters) getParameters() url.Values {
|
||||
out := url.Values{"peekonly": {"true"}} // Required for peek operation
|
||||
if p.NumOfMessages != 0 {
|
||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// GetMessagesResponse represents a response returned from Get Messages
|
||||
// operation.
|
||||
type GetMessagesResponse struct {
|
||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
||||
QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
|
||||
}
|
||||
|
||||
// GetMessageResponse represents a QueueMessage object returned from Get
|
||||
// Messages operation response.
|
||||
type GetMessageResponse struct {
|
||||
MessageID string `xml:"MessageId"`
|
||||
InsertionTime string `xml:"InsertionTime"`
|
||||
ExpirationTime string `xml:"ExpirationTime"`
|
||||
PopReceipt string `xml:"PopReceipt"`
|
||||
TimeNextVisible string `xml:"TimeNextVisible"`
|
||||
DequeueCount int `xml:"DequeueCount"`
|
||||
MessageText string `xml:"MessageText"`
|
||||
}
|
||||
|
||||
// PeekMessagesResponse represents a response returned from Get Messages
|
||||
// operation.
|
||||
type PeekMessagesResponse struct {
|
||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
||||
QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
|
||||
}
|
||||
|
||||
// PeekMessageResponse represents a QueueMessage object returned from Peek
|
||||
// Messages operation response.
|
||||
type PeekMessageResponse struct {
|
||||
MessageID string `xml:"MessageId"`
|
||||
InsertionTime string `xml:"InsertionTime"`
|
||||
ExpirationTime string `xml:"ExpirationTime"`
|
||||
DequeueCount int `xml:"DequeueCount"`
|
||||
MessageText string `xml:"MessageText"`
|
||||
}
|
||||
|
||||
// CreateQueue operation creates a queue under the given account.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
|
||||
func (c QueueServiceClient) CreateQueue(name string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
||||
headers := c.client.getStandardHeaders()
|
||||
headers["Content-Length"] = "0"
|
||||
resp, err := c.client.exec("PUT", uri, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// DeleteQueue operation permanently deletes the specified queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
|
||||
func (c QueueServiceClient) DeleteQueue(name string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
||||
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// QueueExists returns true if a queue with given name exists.
|
||||
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
|
||||
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
|
||||
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// PutMessage operation adds a new message to the back of the message queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
|
||||
func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
req := putMessageRequest{MessageText: message}
|
||||
body, nn, err := xmlMarshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers := c.client.getStandardHeaders()
|
||||
headers["Content-Length"] = strconv.Itoa(nn)
|
||||
resp, err := c.client.exec("POST", uri, headers, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// ClearMessages operation deletes all messages from the specified queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
|
||||
func (c QueueServiceClient) ClearMessages(queue string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
|
||||
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetMessages operation retrieves one or more messages from the front of the
|
||||
// queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
|
||||
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
|
||||
var r GetMessagesResponse
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// PeekMessages retrieves one or more messages from the front of the queue, but
|
||||
// does not alter the visibility of the message.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
|
||||
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
|
||||
var r PeekMessagesResponse
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// DeleteMessage operation deletes the specified message.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
||||
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
|
||||
"popreceipt": {popReceipt}})
|
||||
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
91
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go
generated
vendored
Normal file
91
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
chk "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type StorageQueueSuite struct{}
|
||||
|
||||
var _ = chk.Suite(&StorageQueueSuite{})
|
||||
|
||||
func getQueueClient(c *chk.C) QueueServiceClient {
|
||||
return getBasicClient(c).GetQueueService()
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) {
|
||||
c.Assert(pathForQueue("q"), chk.Equals, "/q")
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) {
|
||||
c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages")
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) {
|
||||
c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m")
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) {
|
||||
cli := getQueueClient(c)
|
||||
name := randString(20)
|
||||
c.Assert(cli.CreateQueue(name), chk.IsNil)
|
||||
c.Assert(cli.DeleteQueue(name), chk.IsNil)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestQueueExists(c *chk.C) {
|
||||
cli := getQueueClient(c)
|
||||
ok, err := cli.QueueExists("nonexistent-queue")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
name := randString(20)
|
||||
c.Assert(cli.CreateQueue(name), chk.IsNil)
|
||||
defer cli.DeleteQueue(name)
|
||||
|
||||
ok, err = cli.QueueExists(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) {
|
||||
q := randString(20)
|
||||
cli := getQueueClient(c)
|
||||
c.Assert(cli.CreateQueue(q), chk.IsNil)
|
||||
defer cli.DeleteQueue(q)
|
||||
|
||||
msg := randString(64 * 1024) // exercise max length
|
||||
c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil)
|
||||
r, err := cli.PeekMessages(q, PeekMessagesParameters{})
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
|
||||
c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestGetMessages(c *chk.C) {
|
||||
q := randString(20)
|
||||
cli := getQueueClient(c)
|
||||
c.Assert(cli.CreateQueue(q), chk.IsNil)
|
||||
defer cli.DeleteQueue(q)
|
||||
|
||||
n := 4
|
||||
for i := 0; i < n; i++ {
|
||||
c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil)
|
||||
}
|
||||
|
||||
r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n})
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(r.QueueMessagesList), chk.Equals, n)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) {
|
||||
q := randString(20)
|
||||
cli := getQueueClient(c)
|
||||
c.Assert(cli.CreateQueue(q), chk.IsNil)
|
||||
defer cli.DeleteQueue(q)
|
||||
|
||||
c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil)
|
||||
r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1})
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
|
||||
m := r.QueueMessagesList[0]
|
||||
c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil)
|
||||
}
|
71
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
Normal file
71
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c Client) computeHmac256(message string) string {
|
||||
h := hmac.New(sha256.New, c.accountKey)
|
||||
h.Write([]byte(message))
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func currentTimeRfc1123Formatted() string {
|
||||
return timeRfc1123Formatted(time.Now().UTC())
|
||||
}
|
||||
|
||||
func timeRfc1123Formatted(t time.Time) string {
|
||||
return t.Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
func mergeParams(v1, v2 url.Values) url.Values {
|
||||
out := url.Values{}
|
||||
for k, v := range v1 {
|
||||
out[k] = v
|
||||
}
|
||||
for k, v := range v2 {
|
||||
vals, ok := out[k]
|
||||
if ok {
|
||||
vals = append(vals, v...)
|
||||
out[k] = vals
|
||||
} else {
|
||||
out[k] = v
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func prepareBlockListRequest(blocks []Block) string {
|
||||
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
|
||||
for _, v := range blocks {
|
||||
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
|
||||
}
|
||||
s += `</BlockList>`
|
||||
return s
|
||||
}
|
||||
|
||||
func xmlUnmarshal(body io.Reader, v interface{}) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return xml.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func xmlMarshal(v interface{}) (io.Reader, int, error) {
|
||||
b, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return bytes.NewReader(b), len(b), nil
|
||||
}
|
69
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go
generated
vendored
Normal file
69
Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
chk "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) {
|
||||
now := time.Now().UTC()
|
||||
expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout))
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_mergeParams(c *chk.C) {
|
||||
v1 := url.Values{
|
||||
"k1": {"v1"},
|
||||
"k2": {"v2"}}
|
||||
v2 := url.Values{
|
||||
"k1": {"v11"},
|
||||
"k3": {"v3"}}
|
||||
out := mergeParams(v1, v2)
|
||||
c.Assert(out.Get("k1"), chk.Equals, "v1")
|
||||
c.Assert(out.Get("k2"), chk.Equals, "v2")
|
||||
c.Assert(out.Get("k3"), chk.Equals, "v3")
|
||||
c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"})
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) {
|
||||
empty := []Block{}
|
||||
expected := `<?xml version="1.0" encoding="utf-8"?><BlockList></BlockList>`
|
||||
c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected)
|
||||
|
||||
blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}}
|
||||
expected = `<?xml version="1.0" encoding="utf-8"?><BlockList><Latest>foo</Latest><Uncommitted>bar</Uncommitted></BlockList>`
|
||||
c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected)
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) {
|
||||
xml := `<?xml version="1.0" encoding="utf-8"?>
|
||||
<Blob>
|
||||
<Name>myblob</Name>
|
||||
</Blob>`
|
||||
var blob Blob
|
||||
body := ioutil.NopCloser(strings.NewReader(xml))
|
||||
c.Assert(xmlUnmarshal(body, &blob), chk.IsNil)
|
||||
c.Assert(blob.Name, chk.Equals, "myblob")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) {
|
||||
type t struct {
|
||||
XMLName xml.Name `xml:"S"`
|
||||
Name string `xml:"Name"`
|
||||
}
|
||||
|
||||
b := t{Name: "myblob"}
|
||||
expected := `<S><Name>myblob</Name></S>`
|
||||
r, i, err := xmlMarshal(b)
|
||||
c.Assert(err, chk.IsNil)
|
||||
o, err := ioutil.ReadAll(r)
|
||||
c.Assert(err, chk.IsNil)
|
||||
out := string(o)
|
||||
c.Assert(out, chk.Equals, expected)
|
||||
c.Assert(i, chk.Equals, len(expected))
|
||||
}
|
1
Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
logrus
|
8
Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- tip
|
||||
install:
|
||||
- go get -t ./...
|
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
# 0.7.3
|
||||
|
||||
formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
formatter/text: Add configuration option for time format (#158)
|
349
Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
generated
vendored
Normal file
349
Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
generated
vendored
Normal file
|
@ -0,0 +1,349 @@
|
|||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus) [][godoc]
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||
many large deployments. The core API is unlikely to change much but please
|
||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||
every build.**
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stderr
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging though logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
"github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(logrus.JSONFormatter)
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(logrus.TextFormatter)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
|
||||
```
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
|
||||
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
252
Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
generated
vendored
Normal file
252
Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
generated
vendored
Normal file
|
@ -0,0 +1,252 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
return bytes.NewBuffer(serialized), err
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return reader.String(), err
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := Fields{}
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data}
|
||||
}
|
||||
|
||||
func (entry *Entry) log(level Level, msg string) {
|
||||
entry.Time = time.Now()
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
|
||||
_, err = io.Copy(entry.Logger.Out, reader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
entry.Warn(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
53
Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntryPanicln(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom time")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicln("kaboom")
|
||||
}
|
||||
|
||||
func TestEntryPanicf(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom again")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom true", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
|
||||
}
|
50
Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
Normal file
50
Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Level = logrus.DebugLevel
|
||||
}
|
||||
|
||||
func main() {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"err": err,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
||||
}()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 8,
|
||||
}).Debug("Started observing beach")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"temperature": -4,
|
||||
}).Debug("Temperature changes")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "orca",
|
||||
"size": 9009,
|
||||
}).Panic("It's over 9000!")
|
||||
}
|
30
Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
188
Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
generated
vendored
Normal file
188
Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
func StandardLogger() *Logger {
|
||||
return std
|
||||
}
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Out = out
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Level = level
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.Level
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
48
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
generated
vendored
Normal file
48
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package logrus
|
||||
|
||||
import "time"
|
||||
|
||||
const DefaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields) {
|
||||
_, ok := data["time"]
|
||||
if ok {
|
||||
data["fields.time"] = data["time"]
|
||||
}
|
||||
|
||||
_, ok = data["msg"]
|
||||
if ok {
|
||||
data["fields.msg"] = data["msg"]
|
||||
}
|
||||
|
||||
_, ok = data["level"]
|
||||
if ok {
|
||||
data["fields.level"] = data["level"]
|
||||
}
|
||||
}
|
88
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
generated
vendored
Normal file
88
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var smallFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
// largeFields is a large size data set for benchmarking
|
||||
var largeFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
"five": "six",
|
||||
"seven": "eight",
|
||||
"nine": "ten",
|
||||
"eleven": "twelve",
|
||||
"thirteen": "fourteen",
|
||||
"fifteen": "sixteen",
|
||||
"seventeen": "eighteen",
|
||||
"nineteen": "twenty",
|
||||
"a": "b",
|
||||
"c": "d",
|
||||
"e": "f",
|
||||
"g": "h",
|
||||
"i": "j",
|
||||
"k": "l",
|
||||
"m": "n",
|
||||
"o": "p",
|
||||
"q": "r",
|
||||
"s": "t",
|
||||
"u": "v",
|
||||
"w": "x",
|
||||
"y": "z",
|
||||
"this": "will",
|
||||
"make": "thirty",
|
||||
"entries": "yeah",
|
||||
}
|
||||
|
||||
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, largeFields)
|
||||
}
|
||||
|
||||
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||
entry := &Entry{
|
||||
Time: time.Time{},
|
||||
Level: InfoLevel,
|
||||
Message: "message",
|
||||
Data: fields,
|
||||
}
|
||||
var d []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
d, err = formatter.Format(entry)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(d)))
|
||||
}
|
||||
}
|
56
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
generated
vendored
Normal file
56
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package logstash
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Formatter generates json in logstash format.
|
||||
// Logstash site: http://logstash.net/
|
||||
type LogstashFormatter struct {
|
||||
Type string // if not empty use for logstash type field.
|
||||
|
||||
// TimestampFormat sets the format used for timestamps.
|
||||
TimestampFormat string
|
||||
}
|
||||
|
||||
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
entry.Data["@version"] = 1
|
||||
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = logrus.DefaultTimestampFormat
|
||||
}
|
||||
|
||||
entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
|
||||
|
||||
// set message field
|
||||
v, ok := entry.Data["message"]
|
||||
if ok {
|
||||
entry.Data["fields.message"] = v
|
||||
}
|
||||
entry.Data["message"] = entry.Message
|
||||
|
||||
// set level field
|
||||
v, ok = entry.Data["level"]
|
||||
if ok {
|
||||
entry.Data["fields.level"] = v
|
||||
}
|
||||
entry.Data["level"] = entry.Level.String()
|
||||
|
||||
// set type field
|
||||
if f.Type != "" {
|
||||
v, ok = entry.Data["type"]
|
||||
if ok {
|
||||
entry.Data["fields.type"] = v
|
||||
}
|
||||
entry.Data["type"] = f.Type
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
52
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
generated
vendored
Normal file
52
Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package logstash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLogstashFormatter(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
lf := LogstashFormatter{Type: "abc"}
|
||||
|
||||
fields := logrus.Fields{
|
||||
"message": "def",
|
||||
"level": "ijk",
|
||||
"type": "lmn",
|
||||
"one": 1,
|
||||
"pi": 3.14,
|
||||
"bool": true,
|
||||
}
|
||||
|
||||
entry := logrus.WithFields(fields)
|
||||
entry.Message = "msg"
|
||||
entry.Level = logrus.InfoLevel
|
||||
|
||||
b, _ := lf.Format(entry)
|
||||
|
||||
var data map[string]interface{}
|
||||
dec := json.NewDecoder(bytes.NewReader(b))
|
||||
dec.UseNumber()
|
||||
dec.Decode(&data)
|
||||
|
||||
// base fields
|
||||
assert.Equal(json.Number("1"), data["@version"])
|
||||
assert.NotEmpty(data["@timestamp"])
|
||||
assert.Equal("abc", data["type"])
|
||||
assert.Equal("msg", data["message"])
|
||||
assert.Equal("info", data["level"])
|
||||
|
||||
// substituted fields
|
||||
assert.Equal("def", data["fields.message"])
|
||||
assert.Equal("ijk", data["fields.level"])
|
||||
assert.Equal("lmn", data["fields.type"])
|
||||
|
||||
// formats
|
||||
assert.Equal(json.Number("1"), data["one"])
|
||||
assert.Equal(json.Number("3.14"), data["pi"])
|
||||
assert.Equal(true, data["bool"])
|
||||
}
|
122
Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *TestHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *TestHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookFires(t *testing.T) {
|
||||
hook := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ModifyHook struct {
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Fire(entry *Entry) error {
|
||||
entry.Data["wow"] = "whale"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookCanModifyEntry(t *testing.T) {
|
||||
hook := new(ModifyHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCanFireMultipleHooks(t *testing.T) {
|
||||
hook1 := new(ModifyHook)
|
||||
hook2 := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook1)
|
||||
log.Hooks.Add(hook2)
|
||||
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
assert.Equal(t, hook2.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ErrorHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Levels() []Level {
|
||||
return []Level{
|
||||
ErrorLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrorHookShouldFireOnError(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Error("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
|
@ -11,11 +11,11 @@ type Hook interface {
|
|||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type LevelHooks map[Level][]Hook
|
||||
type levelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks LevelHooks) Add(hook Hook) {
|
||||
func (hooks levelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ func (hooks LevelHooks) Add(hook Hook) {
|
|||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
54
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package airbrake
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/tobi/airbrake-go"
|
||||
)
|
||||
|
||||
// AirbrakeHook to send exceptions to an exception-tracking service compatible
|
||||
// with the Airbrake API.
|
||||
type airbrakeHook struct {
|
||||
APIKey string
|
||||
Endpoint string
|
||||
Environment string
|
||||
}
|
||||
|
||||
func NewHook(endpoint, apiKey, env string) *airbrakeHook {
|
||||
return &airbrakeHook{
|
||||
APIKey: apiKey,
|
||||
Endpoint: endpoint,
|
||||
Environment: env,
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
airbrake.ApiKey = hook.APIKey
|
||||
airbrake.Endpoint = hook.Endpoint
|
||||
airbrake.Environment = hook.Environment
|
||||
|
||||
var notifyErr error
|
||||
err, ok := entry.Data["error"].(error)
|
||||
if ok {
|
||||
notifyErr = err
|
||||
} else {
|
||||
notifyErr = errors.New(entry.Message)
|
||||
}
|
||||
|
||||
airErr := airbrake.Notify(notifyErr)
|
||||
if airErr != nil {
|
||||
return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *airbrakeHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.PanicLevel,
|
||||
}
|
||||
}
|
133
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
generated
vendored
Normal file
133
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
package airbrake
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type notice struct {
|
||||
Error NoticeError `xml:"error"`
|
||||
}
|
||||
type NoticeError struct {
|
||||
Class string `xml:"class"`
|
||||
Message string `xml:"message"`
|
||||
}
|
||||
|
||||
type customErr struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *customErr) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
const (
|
||||
testAPIKey = "abcxyz"
|
||||
testEnv = "development"
|
||||
expectedClass = "*airbrake.customErr"
|
||||
expectedMsg = "foo"
|
||||
unintendedMsg = "Airbrake will not see this string"
|
||||
)
|
||||
|
||||
var (
|
||||
noticeError = make(chan NoticeError, 1)
|
||||
)
|
||||
|
||||
// TestLogEntryMessageReceived checks if invoking Logrus' log.Error
|
||||
// method causes an XML payload containing the log entry message is received
|
||||
// by a HTTP server emulating an Airbrake-compatible endpoint.
|
||||
func TestLogEntryMessageReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.Error(expectedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogEntryMessageReceived confirms that, when passing an error type using
|
||||
// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the
|
||||
// error message returned by the Error() method on the error interface
|
||||
// rather than the logrus.Entry.Message string.
|
||||
func TestLogEntryWithErrorReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": &customErr{expectedMsg},
|
||||
}).Error(unintendedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
if received.Class != expectedClass {
|
||||
t.Errorf("Unexpected error class: %s", received.Class)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a
|
||||
// non-error type using logrus.Fields, a HTTP server emulating an Airbrake
|
||||
// endpoint receives the logrus.Entry.Message string.
|
||||
//
|
||||
// Only error types are supported when setting the 'error' field using
|
||||
// logrus.WithFields().
|
||||
func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": expectedMsg,
|
||||
}).Error(unintendedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != unintendedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
func startAirbrakeServer(t *testing.T) *httptest.Server {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var notice notice
|
||||
if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
r.Body.Close()
|
||||
|
||||
noticeError <- notice.Error
|
||||
}))
|
||||
|
||||
return ts
|
||||
}
|
|
@ -3,9 +3,8 @@ package logrus_bugsnag
|
|||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
bugsnag_errors "github.com/bugsnag/bugsnag-go/errors"
|
||||
)
|
||||
|
||||
type bugsnagHook struct{}
|
||||
|
@ -39,9 +38,6 @@ func NewBugsnagHook() (*bugsnagHook, error) {
|
|||
return &bugsnagHook{}, nil
|
||||
}
|
||||
|
||||
// skipStackFrames skips logrus stack frames before logging to Bugsnag.
|
||||
const skipStackFrames = 4
|
||||
|
||||
// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
|
||||
// "error" field (or the Message if the error isn't present) and sends it off.
|
||||
func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
|
||||
|
@ -53,16 +49,7 @@ func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
|
|||
notifyErr = errors.New(entry.Message)
|
||||
}
|
||||
|
||||
metadata := bugsnag.MetaData{}
|
||||
metadata["metadata"] = make(map[string]interface{})
|
||||
for key, val := range entry.Data {
|
||||
if key != "error" {
|
||||
metadata["metadata"][key] = val
|
||||
}
|
||||
}
|
||||
|
||||
errWithStack := bugsnag_errors.New(notifyErr, skipStackFrames)
|
||||
bugsnagErr := bugsnag.Notify(errWithStack, metadata)
|
||||
bugsnagErr := bugsnag.Notify(notifyErr)
|
||||
if bugsnagErr != nil {
|
||||
return ErrBugsnagSendFailed{bugsnagErr}
|
||||
}
|
64
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
generated
vendored
Normal file
64
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package logrus_bugsnag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
)
|
||||
|
||||
type notice struct {
|
||||
Events []struct {
|
||||
Exceptions []struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"exceptions"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
func TestNoticeReceived(t *testing.T) {
|
||||
msg := make(chan string, 1)
|
||||
expectedMsg := "foo"
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var notice notice
|
||||
data, _ := ioutil.ReadAll(r.Body)
|
||||
if err := json.Unmarshal(data, ¬ice); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_ = r.Body.Close()
|
||||
|
||||
msg <- notice.Events[0].Exceptions[0].Message
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
hook := &bugsnagHook{}
|
||||
|
||||
bugsnag.Configure(bugsnag.Configuration{
|
||||
Endpoint: ts.URL,
|
||||
ReleaseStage: "production",
|
||||
APIKey: "12345678901234567890123456789012",
|
||||
Synchronous: true,
|
||||
})
|
||||
|
||||
log := logrus.New()
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": errors.New(expectedMsg),
|
||||
}).Error("Bugsnag will not see this string")
|
||||
|
||||
select {
|
||||
case received := <-msg:
|
||||
if received != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Bugsnag API")
|
||||
}
|
||||
}
|
28
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
generated
vendored
Normal file
28
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
|
||||
|
||||
[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
|
||||
|
||||
In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
|
||||
|
||||
## Usage
|
||||
|
||||
You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
|
||||
|
||||
For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/papertrail"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
55
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
generated
vendored
Normal file
55
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package logrus_papertrail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
format = "Jan 2 15:04:05"
|
||||
)
|
||||
|
||||
// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
|
||||
type PapertrailHook struct {
|
||||
Host string
|
||||
Port int
|
||||
AppName string
|
||||
UDPConn net.Conn
|
||||
}
|
||||
|
||||
// NewPapertrailHook creates a hook to be added to an instance of logger.
|
||||
func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
|
||||
conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
|
||||
return &PapertrailHook{host, port, appName, conn}, err
|
||||
}
|
||||
|
||||
// Fire is called when a log event is fired.
|
||||
func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
|
||||
date := time.Now().Format(format)
|
||||
msg, _ := entry.String()
|
||||
payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
|
||||
|
||||
bytesWritten, err := hook.UDPConn.Write([]byte(payload))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels returns the available logging levels.
|
||||
func (hook *PapertrailHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
26
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package logrus_papertrail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/stvp/go-udp-testing"
|
||||
)
|
||||
|
||||
func TestWritingToUDP(t *testing.T) {
|
||||
port := 16661
|
||||
udp.SetAddr(fmt.Sprintf(":%d", port))
|
||||
|
||||
hook, err := NewPapertrailHook("localhost", port, "test")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local UDP server.")
|
||||
}
|
||||
|
||||
log := logrus.New()
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
udp.ShouldReceive(t, "foo", func() {
|
||||
log.Info("foo")
|
||||
})
|
||||
}
|
61
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
generated
vendored
Normal file
61
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
# Sentry Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
|
||||
|
||||
[Sentry](https://getsentry.com) provides both self-hosted and hosted
|
||||
solutions for exception tracking.
|
||||
Both client and server are
|
||||
[open source](https://github.com/getsentry/sentry).
|
||||
|
||||
## Usage
|
||||
|
||||
Every sentry application defined on the server gets a different
|
||||
[DSN](https://www.getsentry.com/docs/). In the example below replace
|
||||
`YOUR_DSN` with the one created for your application.
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/sentry"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Special fields
|
||||
|
||||
Some logrus fields have a special meaning in this hook,
|
||||
these are server_name and logger.
|
||||
When logs are sent to sentry these fields are treated differently.
|
||||
- server_name (also known as hostname) is the name of the server which
|
||||
is logging the event (hostname.example.com)
|
||||
- logger is the part of the application which is logging the event.
|
||||
In go this usually means setting it to the name of the package.
|
||||
|
||||
## Timeout
|
||||
|
||||
`Timeout` is the time the sentry hook will wait for a response
|
||||
from the sentry server.
|
||||
|
||||
If this time elapses with no response from
|
||||
the server an error will be returned.
|
||||
|
||||
If `Timeout` is set to 0 the SentryHook will not wait for a reply
|
||||
and will assume a correct delivery.
|
||||
|
||||
The SentryHook has a default timeout of `100 milliseconds` when created
|
||||
with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
|
||||
|
||||
```go
|
||||
hook, _ := logrus_sentry.NewSentryHook(...)
|
||||
hook.Timeout = 20*time.Second
|
||||
```
|
100
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
generated
vendored
Normal file
100
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
package logrus_sentry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/getsentry/raven-go"
|
||||
)
|
||||
|
||||
var (
|
||||
severityMap = map[logrus.Level]raven.Severity{
|
||||
logrus.DebugLevel: raven.DEBUG,
|
||||
logrus.InfoLevel: raven.INFO,
|
||||
logrus.WarnLevel: raven.WARNING,
|
||||
logrus.ErrorLevel: raven.ERROR,
|
||||
logrus.FatalLevel: raven.FATAL,
|
||||
logrus.PanicLevel: raven.FATAL,
|
||||
}
|
||||
)
|
||||
|
||||
func getAndDel(d logrus.Fields, key string) (string, bool) {
|
||||
var (
|
||||
ok bool
|
||||
v interface{}
|
||||
val string
|
||||
)
|
||||
if v, ok = d[key]; !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if val, ok = v.(string); !ok {
|
||||
return "", false
|
||||
}
|
||||
delete(d, key)
|
||||
return val, true
|
||||
}
|
||||
|
||||
// SentryHook delivers logs to a sentry server.
|
||||
type SentryHook struct {
|
||||
// Timeout sets the time to wait for a delivery error from the sentry server.
|
||||
// If this is set to zero the server will not wait for any response and will
|
||||
// consider the message correctly sent
|
||||
Timeout time.Duration
|
||||
|
||||
client *raven.Client
|
||||
levels []logrus.Level
|
||||
}
|
||||
|
||||
// NewSentryHook creates a hook to be added to an instance of logger
|
||||
// and initializes the raven client.
|
||||
// This method sets the timeout to 100 milliseconds.
|
||||
func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
|
||||
client, err := raven.NewClient(DSN, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SentryHook{100 * time.Millisecond, client, levels}, nil
|
||||
}
|
||||
|
||||
// Called when an event should be sent to sentry
|
||||
// Special fields that sentry uses to give more information to the server
|
||||
// are extracted from entry.Data (if they are found)
|
||||
// These fields are: logger and server_name
|
||||
func (hook *SentryHook) Fire(entry *logrus.Entry) error {
|
||||
packet := &raven.Packet{
|
||||
Message: entry.Message,
|
||||
Timestamp: raven.Timestamp(entry.Time),
|
||||
Level: severityMap[entry.Level],
|
||||
Platform: "go",
|
||||
}
|
||||
|
||||
d := entry.Data
|
||||
|
||||
if logger, ok := getAndDel(d, "logger"); ok {
|
||||
packet.Logger = logger
|
||||
}
|
||||
if serverName, ok := getAndDel(d, "server_name"); ok {
|
||||
packet.ServerName = serverName
|
||||
}
|
||||
packet.Extra = map[string]interface{}(d)
|
||||
|
||||
_, errCh := hook.client.Capture(packet, nil)
|
||||
timeout := hook.Timeout
|
||||
if timeout != 0 {
|
||||
timeoutCh := time.After(timeout)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return err
|
||||
case <-timeoutCh:
|
||||
return fmt.Errorf("no response from sentry server in %s", timeout)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels returns the available logging levels.
|
||||
func (hook *SentryHook) Levels() []logrus.Level {
|
||||
return hook.levels
|
||||
}
|
97
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
generated
vendored
Normal file
97
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
package logrus_sentry
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/getsentry/raven-go"
|
||||
)
|
||||
|
||||
const (
|
||||
message = "error message"
|
||||
server_name = "testserver.internal"
|
||||
logger_name = "test.logger"
|
||||
)
|
||||
|
||||
func getTestLogger() *logrus.Logger {
|
||||
l := logrus.New()
|
||||
l.Out = ioutil.Discard
|
||||
return l
|
||||
}
|
||||
|
||||
func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
|
||||
pch := make(chan *raven.Packet, 1)
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
defer req.Body.Close()
|
||||
d := json.NewDecoder(req.Body)
|
||||
p := &raven.Packet{}
|
||||
err := d.Decode(p)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
pch <- p
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
fragments := strings.SplitN(s.URL, "://", 2)
|
||||
dsn := fmt.Sprintf(
|
||||
"%s://public:secret@%s/sentry/project-id",
|
||||
fragments[0],
|
||||
fragments[1],
|
||||
)
|
||||
tf(dsn, pch)
|
||||
}
|
||||
|
||||
func TestSpecialFields(t *testing.T) {
|
||||
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
|
||||
logger := getTestLogger()
|
||||
|
||||
hook, err := NewSentryHook(dsn, []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
logger.Hooks.Add(hook)
|
||||
logger.WithFields(logrus.Fields{
|
||||
"server_name": server_name,
|
||||
"logger": logger_name,
|
||||
}).Error(message)
|
||||
|
||||
packet := <-pch
|
||||
if packet.Logger != logger_name {
|
||||
t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
|
||||
}
|
||||
|
||||
if packet.ServerName != server_name {
|
||||
t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSentryHandler(t *testing.T) {
|
||||
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
|
||||
logger := getTestLogger()
|
||||
hook, err := NewSentryHook(dsn, []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
logger.Error(message)
|
||||
packet := <-pch
|
||||
if packet.Message != message {
|
||||
t.Errorf("message should have been %s, was %s", message, packet.Message)
|
||||
}
|
||||
})
|
||||
}
|
20
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
59
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
59
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
package logrus_syslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"os"
|
||||
)
|
||||
|
||||
// SyslogHook to send logs via syslog.
|
||||
type SyslogHook struct {
|
||||
Writer *syslog.Writer
|
||||
SyslogNetwork string
|
||||
SyslogRaddr string
|
||||
}
|
||||
|
||||
// Creates a hook to be added to an instance of logger. This is called with
|
||||
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||
// `if err == nil { log.Hooks.Add(hook) }`
|
||||
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
|
||||
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||
return &SyslogHook{w, network, raddr}, err
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch entry.Level {
|
||||
case logrus.PanicLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.FatalLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.ErrorLevel:
|
||||
return hook.Writer.Err(line)
|
||||
case logrus.WarnLevel:
|
||||
return hook.Writer.Warning(line)
|
||||
case logrus.InfoLevel:
|
||||
return hook.Writer.Info(line)
|
||||
case logrus.DebugLevel:
|
||||
return hook.Writer.Debug(line)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
26
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package logrus_syslog
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLocalhostAddAndPrint(t *testing.T) {
|
||||
log := logrus.New()
|
||||
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local syslog.")
|
||||
}
|
||||
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
for _, level := range hook.Levels() {
|
||||
if len(log.Hooks[level]) != 1 {
|
||||
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Congratulations!")
|
||||
}
|
40
Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
}
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/Sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = DefaultTimestampFormat
|
||||
}
|
||||
|
||||
data["time"] = entry.Time.Format(f.TimestampFormat)
|
||||
data["msg"] = entry.Message
|
||||
data["level"] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
120
Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go
generated
vendored
Normal file
120
Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorNotLost(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["error"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["omg"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithTime(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("time", "right now!"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.time"] != "right now!" {
|
||||
t.Fatal("fields.time not set to original time field")
|
||||
}
|
||||
|
||||
if entry["time"] != "0001-01-01T00:00:00Z" {
|
||||
t.Fatal("time field not set to current time, was: ", entry["time"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithMsg(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("msg", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.msg"] != "something" {
|
||||
t.Fatal("fields.msg not set to original msg field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithLevel(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.level"] != "something" {
|
||||
t.Fatal("fields.level not set to original level field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEntryEndsWithNewline(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
if b[len(b)-1] != '\n' {
|
||||
t.Fatal("Expected JSON log entry to end with a newline")
|
||||
}
|
||||
}
|
203
Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
generated
vendored
Normal file
203
Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stdout`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks levelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged. `logrus.Debug` is useful in
|
||||
Level Level
|
||||
// Used to sync writing to the log.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(levelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stdout,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(levelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
// Ff you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
return NewEntry(logger).WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
return NewEntry(logger).WithFields(fields)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debugf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Printf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Errorf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panicf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debug(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Error(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panic(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debugln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Infoln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
NewEntry(logger).Println(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Errorln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panicln(args...)
|
||||
}
|
||||
}
|
94
Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
generated
vendored
Normal file
94
Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint8
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch lvl {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var _ StdLogger = &log.Logger{}
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
301
Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
generated
vendored
Normal file
301
Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
generated
vendored
Normal file
|
@ -0,0 +1,301 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
log(logger)
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = &TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
|
||||
log(logger)
|
||||
|
||||
fields := make(map[string]string)
|
||||
for _, kv := range strings.Split(buffer.String(), " ") {
|
||||
if !strings.Contains(kv, "=") {
|
||||
continue
|
||||
}
|
||||
kvArr := strings.Split(kv, "=")
|
||||
key := strings.TrimSpace(kvArr[0])
|
||||
val := kvArr[1]
|
||||
if kvArr[1][0] == '"' {
|
||||
var err error
|
||||
val, err = strconv.Unquote(val)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarn(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Warn("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "testtest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
localLog := logger.WithFields(Fields{
|
||||
"key1": "value1",
|
||||
})
|
||||
|
||||
localLog.WithField("key2", "value2").Info("test")
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "value2", fields["key2"])
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
|
||||
buffer = bytes.Buffer{}
|
||||
fields = Fields{}
|
||||
localLog.Info("test")
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, ok := fields["key2"]
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
}
|
||||
|
||||
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["fields.msg"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("time", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["fields.time"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("level", 1).Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
assert.Equal(t, fields["fields.level"], 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
|
||||
LogAndAssertText(t, func(log *Logger) {
|
||||
ll := log.WithField("herp", "derp")
|
||||
ll.Info("hello")
|
||||
ll.Info("bye")
|
||||
}, func(fields map[string]string) {
|
||||
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
|
||||
if _, ok := fields[fieldName]; ok {
|
||||
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
|
||||
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
llog := logger.WithField("context", "eating raw fish")
|
||||
|
||||
llog.Info("looks delicious")
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded first message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "looks delicious")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
|
||||
buffer.Reset()
|
||||
|
||||
llog.Warn("omg it is!")
|
||||
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded second message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "omg it is!")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
|
||||
|
||||
}
|
||||
|
||||
func TestConvertLevelToString(t *testing.T) {
|
||||
assert.Equal(t, "debug", DebugLevel.String())
|
||||
assert.Equal(t, "info", InfoLevel.String())
|
||||
assert.Equal(t, "warning", WarnLevel.String())
|
||||
assert.Equal(t, "error", ErrorLevel.String())
|
||||
assert.Equal(t, "fatal", FatalLevel.String())
|
||||
assert.Equal(t, "panic", PanicLevel.String())
|
||||
}
|
||||
|
||||
func TestParseLevel(t *testing.T) {
|
||||
l, err := ParseLevel("panic")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("fatal")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("error")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("warn")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("warning")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("info")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("debug")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("invalid")
|
||||
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
|
||||
}
|
||||
|
||||
func TestGetSetLevelRace(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
if i%2 == 0 {
|
||||
SetLevel(InfoLevel)
|
||||
} else {
|
||||
GetLevel()
|
||||
}
|
||||
}(i)
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
12
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
20
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
|
||||
*/
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]uint8
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
12
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
|
||||
type Termios syscall.Termios
|
21
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stdout
|
||||
var termios Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue