forked from TrueCloudLab/s3-tests
Compare commits
504 commits
wip-lumino
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
08df9352f9 | ||
|
cba8047c7e | ||
|
acc8ef43c9 | ||
|
999d39d4db | ||
|
ac71900ffb | ||
|
aa82bd16ae | ||
|
e8db6c2c16 | ||
|
6a775cb445 | ||
|
0d85ed2dda | ||
|
9444c29674 | ||
|
bc8c14ac12 | ||
|
ecf7a8a7a9 | ||
|
3458971054 | ||
|
2e41494293 | ||
|
f61129e432 | ||
|
218f90063f | ||
|
82fedef5a5 | ||
|
c9aded48e5 | ||
|
87b496f25f | ||
|
a83396cda7 | ||
|
93a3b6c704 | ||
|
474c1404e2 | ||
|
2e395d78ea | ||
|
4eda9c0626 | ||
|
38ab4c5638 | ||
|
36fb297e48 | ||
|
8277a9fb9a | ||
|
c0f0b679db | ||
|
95df503ced | ||
|
9577cde013 | ||
|
a3dbac7115 | ||
|
73ed9121f4 | ||
|
bebdfd1ba7 | ||
|
658fc699a8 | ||
|
27f24ee4d7 | ||
|
00b9a2a291 | ||
|
e9c5cc29e9 | ||
|
77f1334571 | ||
|
c4c5a247eb | ||
|
54c1488a43 | ||
|
88fd867007 | ||
|
a28d46fa2a | ||
|
46f60d3029 | ||
|
d48530a294 | ||
|
dfabbf5a8d | ||
|
7bd4b0ee14 | ||
|
96d658444a | ||
|
a3a16eb66a | ||
|
7ebc530e04 | ||
|
4ca7967ae7 | ||
|
d5791d8da6 | ||
|
ba292fbf59 | ||
|
ed4a8e2244 | ||
|
46217fcf81 | ||
|
cefea0fd26 | ||
|
d4ada317e1 | ||
|
c6e40b4ffa | ||
|
364f29d087 | ||
|
0377466704 | ||
|
db76dfe791 | ||
|
d8becad96a | ||
|
7cd4613883 | ||
|
5f3353e6b5 | ||
|
a35b3c609a | ||
|
83af25722c | ||
|
8e01f2315c | ||
|
ade849b90f | ||
|
aecd282a11 | ||
|
3ef85406f9 | ||
|
12abc78b9b | ||
|
b46d16467c | ||
|
4744808eda | ||
|
a87f0b63e7 | ||
|
3af42312bf | ||
|
3056e6d039 | ||
|
997f78d58a | ||
|
1d5764d569 | ||
|
055451f666 | ||
|
1866f04d81 | ||
|
a2acdbfdda | ||
|
da91ad8bbf | ||
|
6861c3d810 | ||
|
519f8a4b0c | ||
|
d552124680 | ||
|
19c17fa49a | ||
|
40182ce26f | ||
|
e29d6246fc | ||
|
95677d85bc | ||
|
9c50cd1539 | ||
|
e9e3374827 | ||
|
e54f0a4508 | ||
|
b1efd0477a | ||
|
9961af4bd2 | ||
|
c0a1880d4c | ||
|
0e1bf6f652 | ||
|
73b340a0e2 | ||
|
b75b89c94b | ||
|
c252440614 | ||
|
f624165ec9 | ||
|
10f3f7620d | ||
|
188b392131 | ||
|
28009bf7d3 | ||
|
4476773180 | ||
|
928eb7a90f | ||
|
b904ef08bc | ||
|
fa0ea9afe0 | ||
|
2998ea91eb | ||
|
00cdcaf056 | ||
|
741f2cbc9e | ||
|
b05a394738 | ||
|
13e0d736a8 | ||
|
e18ea7fac4 | ||
|
7e35765dd4 | ||
|
2535dd695d | ||
|
008f5025f7 | ||
|
bc2a3b0b70 | ||
|
c445361c2e | ||
|
89bbe654ca | ||
|
b045323900 | ||
|
febbcc12c2 | ||
|
818443e916 | ||
|
992e193d81 | ||
|
787dc6bd43 | ||
|
97c0338adf | ||
|
bb27e04c45 | ||
|
6d2ed19c18 | ||
|
13a9bfc00a | ||
|
29b0e27e49 | ||
|
d158edb201 | ||
|
5b9652caa4 | ||
|
d976f47d74 | ||
|
359bde7e87 | ||
|
3a0f1f0ead | ||
|
42aff3e8fd | ||
|
5219b86db9 | ||
|
43b957792b | ||
|
2087c1ba26 | ||
|
5914eb2005 | ||
|
a536dd0e88 | ||
|
3437cda73d | ||
|
2c710811fa | ||
|
819dd5aa32 | ||
|
b1472019d7 | ||
|
18a41ab63f | ||
|
b8422a2055 | ||
|
7993dd02a5 | ||
|
5e9f6e5ffb | ||
|
d13ed28a5c | ||
|
494379c2ff | ||
|
4c75fba0de | ||
|
f5d0bc9be3 | ||
|
7e7e8d5a42 | ||
|
c80e9d2118 | ||
|
4864dbc340 | ||
|
3652cfe2ec | ||
|
672a123348 | ||
|
9319a41b24 | ||
|
114397c358 | ||
|
6ff8cf27a2 | ||
|
e4953a3b76 | ||
|
60b26f210e | ||
|
64e919a13b | ||
|
defb8eb977 | ||
|
b200013565 | ||
|
89f97ed35c | ||
|
d89ab9d862 | ||
|
774172ad43 | ||
|
ad999de7c4 | ||
|
44069ff062 | ||
|
a8ee732732 | ||
|
79156f3d3d | ||
|
8063cd68c9 | ||
|
c8fc8cd7c8 | ||
|
a3100af70a | ||
|
5d63ebf83d | ||
|
be7ab936cd | ||
|
952beb9ebd | ||
|
bf889041c9 | ||
|
88a8d1c66f | ||
|
4cf38b4138 | ||
|
97be0d44c6 | ||
|
5b08b26453 | ||
|
ef570220f9 | ||
|
33afb4eb88 | ||
|
25d05a194b | ||
|
75e4e4f631 | ||
|
c03108418f | ||
|
9f1f9c9273 | ||
|
16834645a6 | ||
|
8af8f96740 | ||
|
dd7cac25f5 | ||
|
101dfc104a | ||
|
bacab3cadf | ||
|
6eb0e15711 | ||
|
d20d8d2207 | ||
|
76beb672d1 | ||
|
cb830ebae1 | ||
|
cf77d5c560 | ||
|
0f3f35ef01 | ||
|
47292aee17 | ||
|
a38cdb1dd5 | ||
|
13a477d096 | ||
|
c03fd082cc | ||
|
540b28fa20 | ||
|
a4d282c1db | ||
|
f7f0799ceb | ||
|
60593c99dd | ||
|
5f96a32045 | ||
|
6019ec1ef3 | ||
|
a3b849e4db | ||
|
93099c1fb0 | ||
|
9a6a1e9f19 | ||
|
23be1160f5 | ||
|
47ece6e861 | ||
|
eef8d0fa67 | ||
|
f51101d752 | ||
|
490d0a4c4f | ||
|
749e29185b | ||
|
7c07bad930 | ||
|
687ab24e7d | ||
|
d073b991aa | ||
|
99d4b329e2 | ||
|
55d8ef2a7e | ||
|
9ac8aef12b | ||
|
4a89a9a5b2 | ||
|
71266fede9 | ||
|
5dcc3dd689 | ||
|
bf43a4a10a | ||
|
86fecf83b9 | ||
|
64068d7bf9 | ||
|
d466b7bd09 | ||
|
96438f44e4 | ||
|
a6004fe43b | ||
|
b252638369 | ||
|
5476c709c8 | ||
|
ea3caaa76b | ||
|
95fd91df2b | ||
|
7fe0304e9c | ||
|
8662815ebe | ||
|
9c4f15a47e | ||
|
bb995c2aeb | ||
|
41ebef2540 | ||
|
513ecdfdd0 | ||
|
723853fd18 | ||
|
44643af0b0 | ||
|
245a93326e | ||
|
700a04737a | ||
|
d2a7ed88f1 | ||
|
459e3c870a | ||
|
20aa9aa071 | ||
|
d868058d0c | ||
|
e229d1aaf6 | ||
|
64bdc3beec | ||
|
ba9525f425 | ||
|
a3447c50df | ||
|
aaa355f20b | ||
|
a0ef4be7fc | ||
|
7bd3c432fc | ||
|
2851712901 | ||
|
2ce7e15cca | ||
|
cfdf914c4b | ||
|
1572fbc87b | ||
|
b1815c25dc | ||
|
c6a4ab9d12 | ||
|
7276bee050 | ||
|
e7102e8cb0 | ||
|
60dd3444b3 | ||
|
4a86ebbe8b | ||
|
66ced9af1d | ||
|
8893cc49c5 | ||
|
ea7d5fb563 | ||
|
59a3aff3de | ||
|
6a63d0cf91 | ||
|
5d6166bf53 | ||
|
6c885bb39a | ||
|
ef8f65d917 | ||
|
f4f7812efd | ||
|
cd1794f3c7 | ||
|
26b43ccb02 | ||
|
26f06011ee | ||
|
d7c243ba83 | ||
|
c08de72d55 | ||
|
b72bff16d1 | ||
|
bf23251357 | ||
|
f4a052dfcf | ||
|
62395eb872 | ||
|
8638017020 | ||
|
ae3052fa8a | ||
|
a48a9bf6d1 | ||
|
16266d1590 | ||
|
0b2d7f729d | ||
|
daf9062a22 | ||
|
4e3fd5ff41 | ||
|
0eed4a551d | ||
|
30db28e775 | ||
|
f0868651fd | ||
|
6ff497d908 | ||
|
e79dffa731 | ||
|
c2b59fb714 | ||
|
9d526d1a76 | ||
|
979e739eff | ||
|
4948f8b009 | ||
|
f6218fa1de | ||
|
54103207e4 | ||
|
5a8d0b8b0d | ||
|
350fcbb4ec | ||
|
8dbe896f89 | ||
|
982d15c30e | ||
|
fce9a52ef4 | ||
|
0985cc11d7 | ||
|
72e251ed69 | ||
|
fb39ac4829 | ||
|
6d8c0059db | ||
|
b63229b110 | ||
|
b7f47c2a31 | ||
|
e006dd4753 | ||
|
1a9d3677f7 | ||
|
4c8bbbef0a | ||
|
a0c15c80ad | ||
|
b6db7bdd8a | ||
|
7f8a12423f | ||
|
713012c178 | ||
|
5dc8bc75ab | ||
|
94b1986228 | ||
|
4c7c279f70 | ||
|
5925f0fb3f | ||
|
c1bce6ac70 | ||
|
d543619e71 | ||
|
f42872fd53 | ||
|
74daf86fe5 | ||
|
dac38694ef | ||
|
47a3755378 | ||
|
4d675235dd | ||
|
3b1571ace6 | ||
|
b4516725f2 | ||
|
d02c1819f6 | ||
|
4996430709 | ||
|
6d3f574a8e | ||
|
1ad38530e0 | ||
|
3f9d31c6c7 | ||
|
02b1d50ca7 | ||
|
11f75ea7c5 | ||
|
fb6520ac11 | ||
|
13452bd25f | ||
|
024e74c469 | ||
|
be9935ba1a | ||
|
c9c84faf48 | ||
|
aa453fa5c3 | ||
|
045ad2f46e | ||
|
48be90a64e | ||
|
92f056532b | ||
|
fc195db725 | ||
|
f7f5eeccb0 | ||
|
506e8e5d4a | ||
|
e7fbcee0d5 | ||
|
361f4f9a66 | ||
|
dd44c92340 | ||
|
42cdf6e026 | ||
|
6b814189e0 | ||
|
b787b79d81 | ||
|
2160a6a403 | ||
|
2bd8af524c | ||
|
4a053e1640 | ||
|
a2598dcdea | ||
|
6b553efbe1 | ||
|
5cb2bcdf89 | ||
|
04b5e63238 | ||
|
4460b08222 | ||
|
b051efdada | ||
|
4f9710a23b | ||
|
2a475a408a | ||
|
fab38b2579 | ||
|
15d4dd96c5 | ||
|
ae3ab351c8 | ||
|
5a67bab487 | ||
|
8f732422ca | ||
|
dab15076fe | ||
|
e9ac946d0f | ||
|
1ba6187525 | ||
|
d6090edb82 | ||
|
773edab4b9 | ||
|
968778e81b | ||
|
75f748f211 | ||
|
f57910a014 | ||
|
e4e225bacb | ||
|
9ca600eeaf | ||
|
023ffb9fbc | ||
|
8d623bff0c | ||
|
e54de402aa | ||
|
66de5b19db | ||
|
eaa68f4f77 | ||
|
b586f0170d | ||
|
c9228c2140 | ||
|
dcac3bc189 | ||
|
9bb46123f1 | ||
|
68033a97a9 | ||
|
2dfc89b077 | ||
|
7f21aaa8c2 | ||
|
b528485f62 | ||
|
f6f3d3bdf1 | ||
|
02722e9de5 | ||
|
bf956df71e | ||
|
aac3afd312 | ||
|
2a3ece90db | ||
|
b6647c9860 | ||
|
d9d91dd2de | ||
|
aa0f0d6ea9 | ||
|
758de89dbf | ||
|
d6e0afd3f0 | ||
|
dfa6088f6f | ||
|
37e6825d04 | ||
|
28b793271f | ||
|
16393fb667 | ||
|
7757e8fe84 | ||
|
250122e17d | ||
|
75182b7e03 | ||
|
cb309aa786 | ||
|
d54b8d1d9b | ||
|
5db9eb059d | ||
|
617ee92edd | ||
|
0634d6ee50 | ||
|
5be61bc30a | ||
|
0e04dcd6aa | ||
|
7f49adda30 | ||
|
ac18365f75 | ||
|
774d40d114 | ||
|
e89b53f0c7 | ||
|
e92835a00b | ||
|
435922ee00 | ||
|
2d12e98ce0 | ||
|
a2c6edff27 | ||
|
daade6614f | ||
|
67f4f5d356 | ||
|
b3d9487f14 | ||
|
06d0e3d398 | ||
|
4cfbece929 | ||
|
5522ac93fc | ||
|
d024de76f8 | ||
|
e064dc6a85 | ||
|
fa979f416d | ||
|
a2b3e4ef77 | ||
|
24ab8ecf07 | ||
|
ab0acde0f2 | ||
|
7ccdadb37a | ||
|
abbd9f17e6 | ||
|
f783f4294b | ||
|
8c67aafcf2 | ||
|
23c99318e6 | ||
|
bb65f38ad9 | ||
|
5fb7f7a709 | ||
|
e93443ac07 | ||
|
8ef3465f0e | ||
|
800a9a758f | ||
|
5da742036e | ||
|
47e3772e0b | ||
|
a7e619b7ca | ||
|
ebb31e02f9 | ||
|
afa108d742 | ||
|
fc0e55f7e1 | ||
|
26adb84b30 | ||
|
ecea466666 | ||
|
ef827b745e | ||
|
35c9fcd6ae | ||
|
35cd3f77af | ||
|
1012710ce7 | ||
|
006f9d5f46 | ||
|
b1e1ba0edf | ||
|
3e650c5e6e | ||
|
3511eabc5c | ||
|
5167af2e73 | ||
|
5ae2337747 | ||
|
fe35325ab8 | ||
|
eee2d9a82c | ||
|
6bef6ad125 | ||
|
f5ad2bfaee | ||
|
f318716c63 | ||
|
9506ac220f | ||
|
92c4ac5a59 | ||
|
33b1589969 | ||
|
52d9c955df | ||
|
19176daaf9 | ||
|
e208a74a05 | ||
|
5138dd451d | ||
|
2521183646 | ||
|
6cb65b6fa8 | ||
|
42debb8be7 | ||
|
a2a383e77f | ||
|
f464a5a3b1 | ||
|
0826df92d2 | ||
|
94d5be6ace | ||
|
301272c0ef | ||
|
9204283def | ||
|
b5e72953fa | ||
|
1ca6a94e6a | ||
|
b554973239 | ||
|
58944d0ba6 | ||
|
a1b139c608 | ||
|
425d209989 | ||
|
4ff6b044c2 | ||
|
edeb956a82 | ||
|
ca754b02f2 | ||
|
cc2d6f076f | ||
|
b068a07301 | ||
|
a2f73f6a84 |
44 changed files with 23966 additions and 13437 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -10,5 +10,6 @@
|
||||||
|
|
||||||
/*.egg-info
|
/*.egg-info
|
||||||
/virtualenv
|
/virtualenv
|
||||||
|
/venv
|
||||||
|
|
||||||
config.yaml
|
config.yaml
|
||||||
|
|
149
README.rst
149
README.rst
|
@ -2,96 +2,101 @@
|
||||||
S3 compatibility tests
|
S3 compatibility tests
|
||||||
========================
|
========================
|
||||||
|
|
||||||
This is a set of completely unofficial Amazon AWS S3 compatibility
|
This is a set of unofficial Amazon AWS S3 compatibility
|
||||||
tests, that will hopefully be useful to people implementing software
|
tests, that can be useful to people implementing software
|
||||||
that exposes an S3-like API.
|
that exposes an S3-like API. The tests use the Boto2 and Boto3 libraries.
|
||||||
|
|
||||||
The tests only cover the REST interface.
|
The tests use the Tox tool. To get started, ensure you have the ``tox``
|
||||||
|
software installed; e.g. on Debian/Ubuntu::
|
||||||
|
|
||||||
The tests use the Boto library, so any e.g. HTTP-level differences
|
sudo apt-get install tox
|
||||||
that Boto papers over, the tests will not be able to discover. Raw
|
|
||||||
HTTP tests may be added later.
|
|
||||||
|
|
||||||
The tests use the Nose test framework. To get started, ensure you have
|
|
||||||
the ``virtualenv`` software installed; e.g. on Debian/Ubuntu::
|
|
||||||
|
|
||||||
sudo apt-get install python-virtualenv
|
|
||||||
|
|
||||||
and then run::
|
|
||||||
|
|
||||||
./bootstrap
|
|
||||||
|
|
||||||
You will need to create a configuration file with the location of the
|
You will need to create a configuration file with the location of the
|
||||||
service and two different credentials, something like this::
|
service and two different credentials. A sample configuration file named
|
||||||
|
``s3tests.conf.SAMPLE`` has been provided in this repo. This file can be
|
||||||
|
used to run the s3 tests on a Ceph cluster started with vstart.
|
||||||
|
|
||||||
[DEFAULT]
|
Once you have that file copied and edited, you can run the tests with::
|
||||||
## this section is just used as default for all the "s3 *"
|
|
||||||
## sections, you can place these variables also directly there
|
|
||||||
|
|
||||||
## replace with e.g. "localhost" to run against local software
|
S3TEST_CONF=your.conf tox
|
||||||
host = s3.amazonaws.com
|
|
||||||
|
|
||||||
## uncomment the port to use something other than 80
|
You can specify which directory of tests to run::
|
||||||
# port = 8080
|
|
||||||
|
|
||||||
## say "no" to disable TLS
|
S3TEST_CONF=your.conf tox -- s3tests_boto3/functional
|
||||||
is_secure = yes
|
|
||||||
|
|
||||||
[fixtures]
|
You can specify which file of tests to run::
|
||||||
## all the buckets created will start with this prefix;
|
|
||||||
## {random} will be filled with random characters to pad
|
|
||||||
## the prefix to 30 characters long, and avoid collisions
|
|
||||||
bucket prefix = YOURNAMEHERE-{random}-
|
|
||||||
|
|
||||||
[s3 main]
|
S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_s3.py
|
||||||
## the tests assume two accounts are defined, "main" and "alt".
|
|
||||||
|
|
||||||
## user_id is a 64-character hexstring
|
You can specify which test to run::
|
||||||
user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
|
||||||
|
|
||||||
## display name typically looks more like a unix login, "jdoe" etc
|
S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_s3.py::test_bucket_list_empty
|
||||||
display_name = youruseridhere
|
|
||||||
|
|
||||||
## replace these with your access keys
|
|
||||||
access_key = ABCDEFGHIJKLMNOPQRST
|
|
||||||
secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmn
|
|
||||||
|
|
||||||
## replace with key id obtained when secret is created, or delete if KMS not tested
|
|
||||||
kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
|
||||||
|
|
||||||
[s3 alt]
|
|
||||||
## another user account, used for ACL-related tests
|
|
||||||
user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
|
|
||||||
display_name = john.doe
|
|
||||||
## the "alt" user needs to have email set, too
|
|
||||||
email = john.doe@example.com
|
|
||||||
access_key = NOPQRSTUVWXYZABCDEFG
|
|
||||||
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
|
||||||
|
|
||||||
Once you have that, you can run the tests with::
|
|
||||||
|
|
||||||
S3TEST_CONF=your.conf ./virtualenv/bin/nosetests
|
|
||||||
|
|
||||||
To gather a list of tests being run, use the flags::
|
|
||||||
|
|
||||||
-v --collect-only
|
|
||||||
|
|
||||||
You can specify what test(s) to run::
|
|
||||||
|
|
||||||
S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests.functional.test_s3:test_bucket_list_empty
|
|
||||||
|
|
||||||
Some tests have attributes set based on their current reliability and
|
Some tests have attributes set based on their current reliability and
|
||||||
things like AWS not enforcing their spec stricly. You can filter tests
|
things like AWS not enforcing their spec stricly. You can filter tests
|
||||||
based on their attributes::
|
based on their attributes::
|
||||||
|
|
||||||
S3TEST_CONF=aws.conf ./virtualenv/bin/nosetests -a '!fails_on_aws'
|
S3TEST_CONF=aws.conf tox -- -m 'not fails_on_aws'
|
||||||
|
|
||||||
|
Most of the tests have both Boto3 and Boto2 versions. Tests written in
|
||||||
|
Boto2 are in the ``s3tests`` directory. Tests written in Boto3 are
|
||||||
|
located in the ``s3test_boto3`` directory.
|
||||||
|
|
||||||
TODO
|
You can run only the boto3 tests with::
|
||||||
====
|
|
||||||
|
|
||||||
- We should assume read-after-write consistency, and make the tests
|
S3TEST_CONF=your.conf tox -- s3tests_boto3/functional
|
||||||
actually request such a location.
|
|
||||||
http://aws.amazon.com/s3/faqs/#What_data_consistency_model_does_Amazon_S3_employ
|
========================
|
||||||
|
STS compatibility tests
|
||||||
|
========================
|
||||||
|
|
||||||
|
This section contains some basic tests for the AssumeRole, GetSessionToken and AssumeRoleWithWebIdentity API's. The test file is located under ``s3tests_boto3/functional``.
|
||||||
|
|
||||||
|
To run the STS tests, the vstart cluster should be started with the following parameter (in addition to any parameters already used with it)::
|
||||||
|
|
||||||
|
vstart.sh -o rgw_sts_key=abcdefghijklmnop -o rgw_s3_auth_use_sts=true
|
||||||
|
|
||||||
|
Note that the ``rgw_sts_key`` can be set to anything that is 128 bits in length.
|
||||||
|
After the cluster is up the following command should be executed::
|
||||||
|
|
||||||
|
radosgw-admin caps add --tenant=testx --uid="9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef" --caps="roles=*"
|
||||||
|
|
||||||
|
You can run only the sts tests (all the three API's) with::
|
||||||
|
|
||||||
|
S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_sts.py
|
||||||
|
|
||||||
|
You can filter tests based on the attributes. There is a attribute named ``test_of_sts`` to run AssumeRole and GetSessionToken tests and ``webidentity_test`` to run the AssumeRoleWithWebIdentity tests. If you want to execute only ``test_of_sts`` tests you can apply that filter as below::
|
||||||
|
|
||||||
|
S3TEST_CONF=your.conf tox -- -m test_of_sts s3tests_boto3/functional/test_sts.py
|
||||||
|
|
||||||
|
For running ``webidentity_test`` you'll need have Keycloak running.
|
||||||
|
|
||||||
|
In order to run any STS test you'll need to add "iam" section to the config file. For further reference on how your config file should look check ``s3tests.conf.SAMPLE``.
|
||||||
|
|
||||||
|
========================
|
||||||
|
IAM policy tests
|
||||||
|
========================
|
||||||
|
|
||||||
|
This is a set of IAM policy tests.
|
||||||
|
This section covers tests for user policies such as Put, Get, List, Delete, user policies with s3 actions, conflicting user policies etc
|
||||||
|
These tests uses Boto3 libraries. Tests are written in the ``s3test_boto3`` directory.
|
||||||
|
|
||||||
|
These iam policy tests uses two users with profile name "iam" and "s3 alt" as mentioned in s3tests.conf.SAMPLE.
|
||||||
|
If Ceph cluster is started with vstart, then above two users will get created as part of vstart with same access key, secrete key etc as mentioned in s3tests.conf.SAMPLE.
|
||||||
|
Out of those two users, "iam" user is with capabilities --caps=user-policy=* and "s3 alt" user is without capabilities.
|
||||||
|
Adding above capabilities to "iam" user is also taken care by vstart (If Ceph cluster is started with vstart).
|
||||||
|
|
||||||
|
To run these tests, create configuration file with section "iam" and "s3 alt" refer s3tests.conf.SAMPLE.
|
||||||
|
Once you have that configuration file copied and edited, you can run all the tests with::
|
||||||
|
|
||||||
|
S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_iam.py
|
||||||
|
|
||||||
|
You can also specify specific test to run::
|
||||||
|
|
||||||
|
S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_iam.py::test_put_user_policy
|
||||||
|
|
||||||
|
Some tests have attributes set such as "fails_on_rgw".
|
||||||
|
You can filter tests based on their attributes::
|
||||||
|
|
||||||
|
S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_iam.py -m 'not fails_on_rgw'
|
||||||
|
|
||||||
- Test direct HTTP downloads, like a web browser would do.
|
|
||||||
|
|
51
bootstrap
51
bootstrap
|
@ -1,51 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ -f /etc/debian_version ]; then
|
|
||||||
for package in python-pip python-virtualenv python-dev libevent-dev libxml2-dev libxslt-dev zlib1g-dev; do
|
|
||||||
if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
|
|
||||||
# add a space after old values
|
|
||||||
missing="${missing:+$missing }$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ -n "$missing" ]; then
|
|
||||||
echo "$0: missing required DEB packages. Installing via sudo." 1>&2
|
|
||||||
sudo apt-get -y install $missing
|
|
||||||
fi
|
|
||||||
elif [ -f /etc/fedora-release ]; then
|
|
||||||
for package in python-pip python2-virtualenv python-devel libevent-devel libxml2-devel libxslt-devel zlib-devel; do
|
|
||||||
if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
|
|
||||||
missing="${missing:+$missing }$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ -n "$missing" ]; then
|
|
||||||
echo "$0: missing required RPM packages. Installing via sudo." 1>&2
|
|
||||||
sudo yum -y install $missing
|
|
||||||
fi
|
|
||||||
elif [ -f /etc/redhat-release ]; then
|
|
||||||
for package in python-virtualenv python-devel libevent-devel libxml2-devel libxslt-devel zlib-devel; do
|
|
||||||
if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
|
|
||||||
missing="${missing:+$missing }$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ -n "$missing" ]; then
|
|
||||||
echo "$0: missing required RPM packages. Installing via sudo." 1>&2
|
|
||||||
sudo yum -y install $missing
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
virtualenv --no-site-packages --distribute virtualenv
|
|
||||||
|
|
||||||
# avoid pip bugs
|
|
||||||
./virtualenv/bin/pip install --upgrade pip
|
|
||||||
|
|
||||||
# slightly old version of setuptools; newer fails w/ requests 0.14.0
|
|
||||||
./virtualenv/bin/pip install setuptools==32.3.1
|
|
||||||
|
|
||||||
./virtualenv/bin/pip install -r requirements.txt
|
|
||||||
|
|
||||||
# forbid setuptools from using the network because it'll try to use
|
|
||||||
# easy_install, and we really wanted pip; next line will fail if pip
|
|
||||||
# requirements.txt does not match setup.py requirements -- sucky but
|
|
||||||
# good enough for now
|
|
||||||
./virtualenv/bin/python setup.py develop
|
|
|
@ -1,85 +0,0 @@
|
||||||
fixtures:
|
|
||||||
## All the buckets created will start with this prefix;
|
|
||||||
## {random} will be filled with random characters to pad
|
|
||||||
## the prefix to 30 characters long, and avoid collisions
|
|
||||||
bucket prefix: YOURNAMEHERE-{random}-
|
|
||||||
|
|
||||||
file_generation:
|
|
||||||
groups:
|
|
||||||
## File generation works by creating N groups of files. Each group of
|
|
||||||
## files is defined by three elements: number of files, avg(filesize),
|
|
||||||
## and stddev(filesize) -- in that order.
|
|
||||||
- [1, 2, 3]
|
|
||||||
- [4, 5, 6]
|
|
||||||
|
|
||||||
## Config for the readwrite tool.
|
|
||||||
## The readwrite tool concurrently reads and writes to files in a
|
|
||||||
## single bucket for a set duration.
|
|
||||||
## Note: the readwrite tool does not need the s3.alt connection info.
|
|
||||||
## only s3.main is used.
|
|
||||||
readwrite:
|
|
||||||
## The number of reader and writer worker threads. This sets how many
|
|
||||||
## files will be read and written concurrently.
|
|
||||||
readers: 2
|
|
||||||
writers: 2
|
|
||||||
## The duration to run in seconds. Doesn't count setup/warmup time
|
|
||||||
duration: 15
|
|
||||||
|
|
||||||
files:
|
|
||||||
## The number of files to use. This number of files is created during the
|
|
||||||
## "warmup" phase. After the warmup, readers will randomly pick a file to
|
|
||||||
## read, and writers will randomly pick a file to overwrite
|
|
||||||
num: 3
|
|
||||||
## The file size to use, in KB
|
|
||||||
size: 1024
|
|
||||||
## The stddev for the file size, in KB
|
|
||||||
stddev: 0
|
|
||||||
|
|
||||||
s3:
|
|
||||||
## This section contains all the connection information
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
## This section contains the defaults for all of the other connections
|
|
||||||
## below. You can also place these variables directly there.
|
|
||||||
|
|
||||||
## Replace with e.g. "localhost" to run against local software
|
|
||||||
host: s3.amazonaws.com
|
|
||||||
|
|
||||||
## Uncomment the port to use soemthing other than 80
|
|
||||||
# port: 8080
|
|
||||||
|
|
||||||
## Say "no" to disable TLS.
|
|
||||||
is_secure: yes
|
|
||||||
|
|
||||||
## The tests assume two accounts are defined, "main" and "alt". You
|
|
||||||
## may add other connections to be instantianted as well, however
|
|
||||||
## any additional ones will not be used unless your tests use them.
|
|
||||||
|
|
||||||
main:
|
|
||||||
|
|
||||||
## The User ID that the S3 provider gives you. For AWS, this is
|
|
||||||
## typically a 64-char hexstring.
|
|
||||||
user_id: AWS_USER_ID
|
|
||||||
|
|
||||||
## Display name typically looks more like a unix login, "jdoe" etc
|
|
||||||
display_name: AWS_DISPLAY_NAME
|
|
||||||
|
|
||||||
## The email for this account.
|
|
||||||
email: AWS_EMAIL
|
|
||||||
|
|
||||||
## Replace these with your access keys.
|
|
||||||
access_key: AWS_ACCESS_KEY
|
|
||||||
secret_key: AWS_SECRET_KEY
|
|
||||||
|
|
||||||
## If KMS is tested, this if barbican key id. Optional.
|
|
||||||
kms_keyid: barbican_key_id
|
|
||||||
|
|
||||||
alt:
|
|
||||||
## Another user accout, used for ACL-related tests.
|
|
||||||
|
|
||||||
user_id: AWS_USER_ID
|
|
||||||
display_name: AWS_DISPLAY_NAME
|
|
||||||
email: AWS_EMAIL
|
|
||||||
access_key: AWS_ACCESS_KEY
|
|
||||||
secret_key: AWS_SECRET_KEY
|
|
||||||
|
|
51
pytest.ini
Normal file
51
pytest.ini
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
[pytest]
|
||||||
|
markers =
|
||||||
|
abac_test
|
||||||
|
appendobject
|
||||||
|
auth_aws2
|
||||||
|
auth_aws4
|
||||||
|
auth_common
|
||||||
|
bucket_policy
|
||||||
|
bucket_encryption
|
||||||
|
checksum
|
||||||
|
cloud_transition
|
||||||
|
encryption
|
||||||
|
fails_on_aws
|
||||||
|
fails_on_dbstore
|
||||||
|
fails_on_dho
|
||||||
|
fails_on_mod_proxy_fcgi
|
||||||
|
fails_on_rgw
|
||||||
|
fails_on_s3
|
||||||
|
fails_with_subdomain
|
||||||
|
group
|
||||||
|
group_policy
|
||||||
|
iam_account
|
||||||
|
iam_cross_account
|
||||||
|
iam_role
|
||||||
|
iam_tenant
|
||||||
|
iam_user
|
||||||
|
lifecycle
|
||||||
|
lifecycle_expiration
|
||||||
|
lifecycle_transition
|
||||||
|
list_objects_v2
|
||||||
|
object_lock
|
||||||
|
role_policy
|
||||||
|
session_policy
|
||||||
|
s3select
|
||||||
|
s3website
|
||||||
|
s3website_routing_rules
|
||||||
|
s3website_redirect_location
|
||||||
|
sns
|
||||||
|
sse_s3
|
||||||
|
storage_class
|
||||||
|
tagging
|
||||||
|
test_of_sts
|
||||||
|
token_claims_trust_policy_test
|
||||||
|
token_principal_tag_role_policy_test
|
||||||
|
token_request_tag_trust_policy_test
|
||||||
|
token_resource_tags_test
|
||||||
|
token_role_tags_test
|
||||||
|
token_tag_keys_test
|
||||||
|
user_policy
|
||||||
|
versioning
|
||||||
|
webidentity_test
|
|
@ -1,569 +0,0 @@
|
||||||
#
|
|
||||||
# FUZZ testing uses a probabalistic grammar to generate
|
|
||||||
# pseudo-random requests which will be sent to a server
|
|
||||||
# over long periods of time, with the goal of turning up
|
|
||||||
# garbage-input and buffer-overflow sensitivities.
|
|
||||||
#
|
|
||||||
# Each state ...
|
|
||||||
# generates/chooses contents for variables
|
|
||||||
# chooses a next state (from a weighted set of options)
|
|
||||||
#
|
|
||||||
# A terminal state is one from which there are no successors,
|
|
||||||
# at which point a message is generated (from the variables)
|
|
||||||
# and sent to the server.
|
|
||||||
#
|
|
||||||
# The test program doesn't actually know (or care) what
|
|
||||||
# response should be returned ... since the goal is to
|
|
||||||
# crash the server.
|
|
||||||
#
|
|
||||||
start:
|
|
||||||
set:
|
|
||||||
garbage:
|
|
||||||
- '{random 10-3000 printable}'
|
|
||||||
- '{random 10-1000 binary}'
|
|
||||||
garbage_no_whitespace:
|
|
||||||
- '{random 10-3000 printable_no_whitespace}'
|
|
||||||
- '{random 10-1000 binary_no_whitespace}'
|
|
||||||
acl_header:
|
|
||||||
- 'private'
|
|
||||||
- 'public-read'
|
|
||||||
- 'public-read-write'
|
|
||||||
- 'authenticated-read'
|
|
||||||
- 'bucket-owner-read'
|
|
||||||
- 'bucket-owner-full-control'
|
|
||||||
- '{random 3000 letters}'
|
|
||||||
- '{random 100-1000 binary_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- bucket
|
|
||||||
- object
|
|
||||||
|
|
||||||
bucket:
|
|
||||||
set:
|
|
||||||
urlpath: '/{bucket}'
|
|
||||||
choices:
|
|
||||||
- 13 bucket_get
|
|
||||||
- 8 bucket_put
|
|
||||||
- 5 bucket_delete
|
|
||||||
- bucket_garbage_method
|
|
||||||
|
|
||||||
bucket_garbage_method:
|
|
||||||
set:
|
|
||||||
method:
|
|
||||||
- '{random 1-100 printable}'
|
|
||||||
- '{random 10-100 binary}'
|
|
||||||
bucket:
|
|
||||||
- '{bucket_readable}'
|
|
||||||
- '{bucket_not_readable}'
|
|
||||||
- '{bucket_writable}'
|
|
||||||
- '{bucket_not_writable}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- bucket_get_simple
|
|
||||||
- bucket_get_filtered
|
|
||||||
- bucket_get_uploads
|
|
||||||
- bucket_put_create
|
|
||||||
- bucket_put_versioning
|
|
||||||
- bucket_put_simple
|
|
||||||
|
|
||||||
bucket_delete:
|
|
||||||
set:
|
|
||||||
method: DELETE
|
|
||||||
bucket:
|
|
||||||
- '{bucket_writable}'
|
|
||||||
- '{bucket_not_writable}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
query:
|
|
||||||
- null
|
|
||||||
- policy
|
|
||||||
- website
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
bucket_get:
|
|
||||||
set:
|
|
||||||
method: GET
|
|
||||||
bucket:
|
|
||||||
- '{bucket_readable}'
|
|
||||||
- '{bucket_not_readable}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- 11 bucket_get_simple
|
|
||||||
- bucket_get_filtered
|
|
||||||
- bucket_get_uploads
|
|
||||||
|
|
||||||
bucket_get_simple:
|
|
||||||
set:
|
|
||||||
query:
|
|
||||||
- acl
|
|
||||||
- policy
|
|
||||||
- location
|
|
||||||
- logging
|
|
||||||
- notification
|
|
||||||
- versions
|
|
||||||
- requestPayment
|
|
||||||
- versioning
|
|
||||||
- website
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
bucket_get_uploads:
|
|
||||||
set:
|
|
||||||
delimiter:
|
|
||||||
- null
|
|
||||||
- '3 delimiter={garbage_no_whitespace}'
|
|
||||||
prefix:
|
|
||||||
- null
|
|
||||||
- '3 prefix={garbage_no_whitespace}'
|
|
||||||
key_marker:
|
|
||||||
- null
|
|
||||||
- 'key-marker={object_readable}'
|
|
||||||
- 'key-marker={object_not_readable}'
|
|
||||||
- 'key-marker={invalid_key}'
|
|
||||||
- 'key-marker={random 100-1000 printable_no_whitespace}'
|
|
||||||
max_uploads:
|
|
||||||
- null
|
|
||||||
- 'max-uploads={random 1-5 binary_no_whitespace}'
|
|
||||||
- 'max-uploads={random 1-1000 digits}'
|
|
||||||
upload_id_marker:
|
|
||||||
- null
|
|
||||||
- '3 upload-id-marker={random 0-1000 printable_no_whitespace}'
|
|
||||||
query:
|
|
||||||
- 'uploads'
|
|
||||||
- 'uploads&{delimiter}&{prefix}'
|
|
||||||
- 'uploads&{max_uploads}&{key_marker}&{upload_id_marker}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
bucket_get_filtered:
|
|
||||||
set:
|
|
||||||
delimiter:
|
|
||||||
- 'delimiter={garbage_no_whitespace}'
|
|
||||||
prefix:
|
|
||||||
- 'prefix={garbage_no_whitespace}'
|
|
||||||
marker:
|
|
||||||
- 'marker={object_readable}'
|
|
||||||
- 'marker={object_not_readable}'
|
|
||||||
- 'marker={invalid_key}'
|
|
||||||
- 'marker={random 100-1000 printable_no_whitespace}'
|
|
||||||
max_keys:
|
|
||||||
- 'max-keys={random 1-5 binary_no_whitespace}'
|
|
||||||
- 'max-keys={random 1-1000 digits}'
|
|
||||||
query:
|
|
||||||
- null
|
|
||||||
- '{delimiter}&{prefix}'
|
|
||||||
- '{max-keys}&{marker}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
bucket_put:
|
|
||||||
set:
|
|
||||||
bucket:
|
|
||||||
- '{bucket_writable}'
|
|
||||||
- '{bucket_not_writable}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
method: PUT
|
|
||||||
choices:
|
|
||||||
- bucket_put_simple
|
|
||||||
- bucket_put_create
|
|
||||||
- bucket_put_versioning
|
|
||||||
|
|
||||||
bucket_put_create:
|
|
||||||
set:
|
|
||||||
body:
|
|
||||||
- '2 {garbage}'
|
|
||||||
- '<CreateBucketConfiguration><LocationConstraint>{random 2-10 binary}</LocationConstraint></CreateBucketConfiguration>'
|
|
||||||
headers:
|
|
||||||
- ['0-5', 'x-amz-acl', '{acl_header}']
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
bucket_put_versioning:
|
|
||||||
set:
|
|
||||||
body:
|
|
||||||
- '{garbage}'
|
|
||||||
- '4 <VersioningConfiguration>{versioning_status}{mfa_delete_body}</VersioningConfiguration>'
|
|
||||||
mfa_delete_body:
|
|
||||||
- null
|
|
||||||
- '<Status>{random 2-10 binary}</Status>'
|
|
||||||
- '<Status>{random 2000-3000 printable}</Status>'
|
|
||||||
versioning_status:
|
|
||||||
- null
|
|
||||||
- '<MfaDelete>{random 2-10 binary}</MfaDelete>'
|
|
||||||
- '<MfaDelete>{random 2000-3000 printable}</MfaDelete>'
|
|
||||||
mfa_header:
|
|
||||||
- '{random 10-1000 printable_no_whitespace} {random 10-1000 printable_no_whitespace}'
|
|
||||||
headers:
|
|
||||||
- ['0-1', 'x-amz-mfa', '{mfa_header}']
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
bucket_put_simple:
|
|
||||||
set:
|
|
||||||
body:
|
|
||||||
- '{acl_body}'
|
|
||||||
- '{policy_body}'
|
|
||||||
- '{logging_body}'
|
|
||||||
- '{notification_body}'
|
|
||||||
- '{request_payment_body}'
|
|
||||||
- '{website_body}'
|
|
||||||
acl_body:
|
|
||||||
- null
|
|
||||||
- '<AccessControlPolicy>{owner}{acl}</AccessControlPolicy>'
|
|
||||||
owner:
|
|
||||||
- null
|
|
||||||
- '7 <Owner>{id}{display_name}</Owner>'
|
|
||||||
id:
|
|
||||||
- null
|
|
||||||
- '<ID>{random 10-200 binary}</ID>'
|
|
||||||
- '<ID>{random 1000-3000 printable}</ID>'
|
|
||||||
display_name:
|
|
||||||
- null
|
|
||||||
- '2 <DisplayName>{random 10-200 binary}</DisplayName>'
|
|
||||||
- '2 <DisplayName>{random 1000-3000 printable}</DisplayName>'
|
|
||||||
- '2 <DisplayName>{random 10-300 letters}@{random 10-300 letters}.{random 2-4 letters}</DisplayName>'
|
|
||||||
acl:
|
|
||||||
- null
|
|
||||||
- '10 <AccessControlList><Grant>{grantee}{permission}</Grant></AccessControlList>'
|
|
||||||
grantee:
|
|
||||||
- null
|
|
||||||
- '7 <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">{id}{display_name}</Grantee>'
|
|
||||||
permission:
|
|
||||||
- null
|
|
||||||
- '7 <Permission>{permission_value}</Permission>'
|
|
||||||
permission_value:
|
|
||||||
- '2 {garbage}'
|
|
||||||
- FULL_CONTROL
|
|
||||||
- WRITE
|
|
||||||
- WRITE_ACP
|
|
||||||
- READ
|
|
||||||
- READ_ACP
|
|
||||||
policy_body:
|
|
||||||
- null
|
|
||||||
- '2 {garbage}'
|
|
||||||
logging_body:
|
|
||||||
- null
|
|
||||||
- '<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />'
|
|
||||||
- '<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01"><LoggingEnabled>{bucket}{target_prefix}{target_grants}</LoggingEnabled></BucketLoggingStatus>'
|
|
||||||
target_prefix:
|
|
||||||
- null
|
|
||||||
- '<TargetPrefix>{random 10-1000 printable}</TargetPrefix>'
|
|
||||||
- '<TargetPrefix>{random 10-1000 binary}</TargetPrefix>'
|
|
||||||
target_grants:
|
|
||||||
- null
|
|
||||||
- '10 <TargetGrants><Grant>{grantee}{permission}</Grant></TargetGrants>'
|
|
||||||
notification_body:
|
|
||||||
- null
|
|
||||||
- '<NotificationConfiguration />'
|
|
||||||
- '2 <NotificationConfiguration><TopicConfiguration>{topic}{event}</TopicConfiguration></NotificationConfiguration>'
|
|
||||||
topic:
|
|
||||||
- null
|
|
||||||
- '2 <Topic>{garbage}</Topic>'
|
|
||||||
event:
|
|
||||||
- null
|
|
||||||
- '<Event>s3:ReducedRedundancyLostObject</Event>'
|
|
||||||
- '2 <Event>{garbage}</Event>'
|
|
||||||
request_payment_body:
|
|
||||||
- null
|
|
||||||
- '<RequestPaymentConfiguration xlmns="http://s3.amazonaws.com/doc/2006-03-01/"><Payer>{payer}</Payer></RequestPaymentConfiguration>'
|
|
||||||
payer:
|
|
||||||
- Requester
|
|
||||||
- BucketOwner
|
|
||||||
- '2 {garbage}'
|
|
||||||
website_body:
|
|
||||||
- null
|
|
||||||
- '<WebsiteConfiguration>{index_doc}{error_doc}{routing_rules}</WebsiteConfiguration>'
|
|
||||||
- '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{index_doc}{error_doc}{routing_rules}</WebsiteConfiguration>'
|
|
||||||
index_doc:
|
|
||||||
- null
|
|
||||||
- '<IndexDocument>{filename}</IndexDocument>'
|
|
||||||
- '<IndexDocument><Suffix>{filename}</Suffix></IndexDocument>'
|
|
||||||
filename:
|
|
||||||
- null
|
|
||||||
- '2 {garbage}'
|
|
||||||
- '{random 2-10 printable}.html'
|
|
||||||
- '{random 100-1000 printable}.html'
|
|
||||||
- '{random 100-1000 printable_no_whitespace}.html'
|
|
||||||
error_doc:
|
|
||||||
- null
|
|
||||||
- '<ErrorDocument>{filename}</ErrorDocument>'
|
|
||||||
- '<ErrorDocument><Key>{filename}</Key></ErrorDocument>'
|
|
||||||
routing_rules:
|
|
||||||
- null
|
|
||||||
- ['0-10', '<RoutingRules>{routing_rules_content}</RoutingRules>']
|
|
||||||
routing_rules_content:
|
|
||||||
- null
|
|
||||||
- ['0-1000', '<RoutingRule>{routing_rule}</RoutingRule>']
|
|
||||||
routing_rule:
|
|
||||||
- null
|
|
||||||
- ['0-2', '{routing_rule_condition}{routing_rule_redirect}']
|
|
||||||
routing_rule_condition:
|
|
||||||
- null
|
|
||||||
- ['0-10', '<Condition>{KeyPrefixEquals}{HttpErrorCodeReturnedEquals}</Condition>']
|
|
||||||
KeyPrefixEquals:
|
|
||||||
- null
|
|
||||||
- ['0-2', '<KeyPrefixEquals>{filename}</KeyPrefixEquals>']
|
|
||||||
HttpErrorCodeReturnedEquals:
|
|
||||||
- null
|
|
||||||
- ['0-2', '<HttpErrorCodeReturnedEquals>{HttpErrorCode}</HttpErrorCodeReturnedEquals>']
|
|
||||||
HttpErrorCode:
|
|
||||||
- null
|
|
||||||
- '2 {garbage}'
|
|
||||||
- '{random 1-10 digits}'
|
|
||||||
- '{random 1-100 printable}'
|
|
||||||
routing_rule_redirect:
|
|
||||||
- null
|
|
||||||
- '{protocol}{hostname}{ReplaceKeyPrefixWith}{ReplaceKeyWith}{HttpRedirectCode}'
|
|
||||||
protocol:
|
|
||||||
- null
|
|
||||||
- '<Protocol>http</Protocol>'
|
|
||||||
- '<Protocol>https</Protocol>'
|
|
||||||
- ['1-5', '<Protocol>{garbage}</Protocol>']
|
|
||||||
- ['1-5', '<Protocol>{filename}</Protocol>']
|
|
||||||
hostname:
|
|
||||||
- null
|
|
||||||
- ['1-5', '<HostHame>{hostname_val}</HostHame>']
|
|
||||||
- ['1-5', '<HostHame>{garbage}</HostHame>']
|
|
||||||
hostname_val:
|
|
||||||
- null
|
|
||||||
- '{random 1-255 printable_no_whitespace}'
|
|
||||||
- '{random 1-255 printable}'
|
|
||||||
- '{random 1-255 punctuation}'
|
|
||||||
- '{random 1-255 whitespace}'
|
|
||||||
- '{garbage}'
|
|
||||||
ReplaceKeyPrefixWith:
|
|
||||||
- null
|
|
||||||
- ['1-5', '<ReplaceKeyPrefixWith>{filename}</ReplaceKeyPrefixWith>']
|
|
||||||
HttpRedirectCode:
|
|
||||||
- null
|
|
||||||
- ['1-5', '<HttpRedirectCode>{random 1-10 digits}</HttpRedirectCode>']
|
|
||||||
- ['1-5', '<HttpRedirectCode>{random 1-100 printable}</HttpRedirectCode>']
|
|
||||||
- ['1-5', '<HttpRedirectCode>{filename}</HttpRedirectCode>']
|
|
||||||
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
object:
|
|
||||||
set:
|
|
||||||
urlpath: '/{bucket}/{object}'
|
|
||||||
|
|
||||||
range_header:
|
|
||||||
- null
|
|
||||||
- 'bytes={random 1-2 digits}-{random 1-4 digits}'
|
|
||||||
- 'bytes={random 1-1000 binary_no_whitespace}'
|
|
||||||
if_modified_since_header:
|
|
||||||
- null
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
if_match_header:
|
|
||||||
- null
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
if_none_match_header:
|
|
||||||
- null
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- object_delete
|
|
||||||
- object_get
|
|
||||||
- object_put
|
|
||||||
- object_head
|
|
||||||
- object_garbage_method
|
|
||||||
|
|
||||||
object_garbage_method:
|
|
||||||
set:
|
|
||||||
method:
|
|
||||||
- '{random 1-100 printable}'
|
|
||||||
- '{random 10-100 binary}'
|
|
||||||
bucket:
|
|
||||||
- '{bucket_readable}'
|
|
||||||
- '{bucket_not_readable}'
|
|
||||||
- '{bucket_writable}'
|
|
||||||
- '{bucket_not_writable}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
object:
|
|
||||||
- '{object_readable}'
|
|
||||||
- '{object_not_readable}'
|
|
||||||
- '{object_writable}'
|
|
||||||
- '{object_not_writable}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- object_get_query
|
|
||||||
- object_get_head_simple
|
|
||||||
|
|
||||||
object_delete:
|
|
||||||
set:
|
|
||||||
method: DELETE
|
|
||||||
bucket:
|
|
||||||
- '5 {bucket_writable}'
|
|
||||||
- '{bucket_not_writable}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
object:
|
|
||||||
- '{object_writable}'
|
|
||||||
- '{object_not_writable}'
|
|
||||||
- '2 {garbage_no_whitespace}'
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
object_get:
|
|
||||||
set:
|
|
||||||
method: GET
|
|
||||||
bucket:
|
|
||||||
- '5 {bucket_readable}'
|
|
||||||
- '{bucket_not_readable}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
object:
|
|
||||||
- '{object_readable}'
|
|
||||||
- '{object_not_readable}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- 5 object_get_head_simple
|
|
||||||
- 2 object_get_query
|
|
||||||
|
|
||||||
object_get_query:
|
|
||||||
set:
|
|
||||||
query:
|
|
||||||
- 'torrent'
|
|
||||||
- 'acl'
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
object_get_head_simple:
|
|
||||||
set: {}
|
|
||||||
headers:
|
|
||||||
- ['0-1', 'range', '{range_header}']
|
|
||||||
- ['0-1', 'if-modified-since', '{if_modified_since_header}']
|
|
||||||
- ['0-1', 'if-unmodified-since', '{if_modified_since_header}']
|
|
||||||
- ['0-1', 'if-match', '{if_match_header}']
|
|
||||||
- ['0-1', 'if-none-match', '{if_none_match_header}']
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
object_head:
|
|
||||||
set:
|
|
||||||
method: HEAD
|
|
||||||
bucket:
|
|
||||||
- '5 {bucket_readable}'
|
|
||||||
- '{bucket_not_readable}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
object:
|
|
||||||
- '{object_readable}'
|
|
||||||
- '{object_not_readable}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- object_get_head_simple
|
|
||||||
|
|
||||||
object_put:
|
|
||||||
set:
|
|
||||||
method: PUT
|
|
||||||
bucket:
|
|
||||||
- '5 {bucket_writable}'
|
|
||||||
- '{bucket_not_writable}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
object:
|
|
||||||
- '{object_writable}'
|
|
||||||
- '{object_not_writable}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
cache_control:
|
|
||||||
- null
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
- 'no-cache'
|
|
||||||
content_disposition:
|
|
||||||
- null
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
content_encoding:
|
|
||||||
- null
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
content_length:
|
|
||||||
- '{random 1-20 digits}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
content_md5:
|
|
||||||
- null
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
content_type:
|
|
||||||
- null
|
|
||||||
- 'binary/octet-stream'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
expect:
|
|
||||||
- null
|
|
||||||
- '100-continue'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
expires:
|
|
||||||
- null
|
|
||||||
- '{random 1-10000000 digits}'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
meta_key:
|
|
||||||
- null
|
|
||||||
- 'foo'
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
meta_value:
|
|
||||||
- null
|
|
||||||
- '{garbage_no_whitespace}'
|
|
||||||
choices:
|
|
||||||
- object_put_simple
|
|
||||||
- object_put_acl
|
|
||||||
- object_put_copy
|
|
||||||
|
|
||||||
object_put_simple:
|
|
||||||
set: {}
|
|
||||||
headers:
|
|
||||||
- ['0-1', 'cache-control', '{cache_control}']
|
|
||||||
- ['0-1', 'content-disposition', '{content_disposition}']
|
|
||||||
- ['0-1', 'content-encoding', '{content_encoding}']
|
|
||||||
- ['0-1', 'content-length', '{content_length}']
|
|
||||||
- ['0-1', 'content-md5', '{content_md5}']
|
|
||||||
- ['0-1', 'content-type', '{content_type}']
|
|
||||||
- ['0-1', 'expect', '{expect}']
|
|
||||||
- ['0-1', 'expires', '{expires}']
|
|
||||||
- ['0-1', 'x-amz-acl', '{acl_header}']
|
|
||||||
- ['0-6', 'x-amz-meta-{meta_key}', '{meta_value}']
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
object_put_acl:
|
|
||||||
set:
|
|
||||||
query: 'acl'
|
|
||||||
body:
|
|
||||||
- null
|
|
||||||
- '2 {garbage}'
|
|
||||||
- '<AccessControlPolicy>{owner}{acl}</AccessControlPolicy>'
|
|
||||||
owner:
|
|
||||||
- null
|
|
||||||
- '7 <Owner>{id}{display_name}</Owner>'
|
|
||||||
id:
|
|
||||||
- null
|
|
||||||
- '<ID>{random 10-200 binary}</ID>'
|
|
||||||
- '<ID>{random 1000-3000 printable}</ID>'
|
|
||||||
display_name:
|
|
||||||
- null
|
|
||||||
- '2 <DisplayName>{random 10-200 binary}</DisplayName>'
|
|
||||||
- '2 <DisplayName>{random 1000-3000 printable}</DisplayName>'
|
|
||||||
- '2 <DisplayName>{random 10-300 letters}@{random 10-300 letters}.{random 2-4 letters}</DisplayName>'
|
|
||||||
acl:
|
|
||||||
- null
|
|
||||||
- '10 <AccessControlList><Grant>{grantee}{permission}</Grant></AccessControlList>'
|
|
||||||
grantee:
|
|
||||||
- null
|
|
||||||
- '7 <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">{id}{display_name}</Grantee>'
|
|
||||||
permission:
|
|
||||||
- null
|
|
||||||
- '7 <Permission>{permission_value}</Permission>'
|
|
||||||
permission_value:
|
|
||||||
- '2 {garbage}'
|
|
||||||
- FULL_CONTROL
|
|
||||||
- WRITE
|
|
||||||
- WRITE_ACP
|
|
||||||
- READ
|
|
||||||
- READ_ACP
|
|
||||||
headers:
|
|
||||||
- ['0-1', 'cache-control', '{cache_control}']
|
|
||||||
- ['0-1', 'content-disposition', '{content_disposition}']
|
|
||||||
- ['0-1', 'content-encoding', '{content_encoding}']
|
|
||||||
- ['0-1', 'content-length', '{content_length}']
|
|
||||||
- ['0-1', 'content-md5', '{content_md5}']
|
|
||||||
- ['0-1', 'content-type', '{content_type}']
|
|
||||||
- ['0-1', 'expect', '{expect}']
|
|
||||||
- ['0-1', 'expires', '{expires}']
|
|
||||||
- ['0-1', 'x-amz-acl', '{acl_header}']
|
|
||||||
choices: []
|
|
||||||
|
|
||||||
object_put_copy:
|
|
||||||
set: {}
|
|
||||||
headers:
|
|
||||||
- ['1-1', 'x-amz-copy-source', '{source_object}']
|
|
||||||
- ['0-1', 'x-amz-acl', '{acl_header}']
|
|
||||||
- ['0-1', 'x-amz-metadata-directive', '{metadata_directive}']
|
|
||||||
- ['0-1', 'x-amz-copy-source-if-match', '{if_match_header}']
|
|
||||||
- ['0-1', 'x-amz-copy-source-if-none-match', '{if_none_match_header}']
|
|
||||||
- ['0-1', 'x-amz-copy-source-if-modified-since', '{if_modified_since_header}']
|
|
||||||
- ['0-1', 'x-amz-copy-source-if-unmodified-since', '{if_modified_since_header}']
|
|
||||||
choices: []
|
|
|
@ -1,12 +1,15 @@
|
||||||
PyYAML
|
PyYAML
|
||||||
nose >=1.0.0
|
|
||||||
boto >=2.6.0
|
boto >=2.6.0
|
||||||
bunch >=1.0.0
|
boto3 >=1.0.0
|
||||||
|
# botocore-1.28 broke v2 signatures, see https://tracker.ceph.com/issues/58059
|
||||||
|
botocore <1.28.0
|
||||||
|
munch >=2.0.0
|
||||||
# 0.14 switches to libev, that means bootstrap needs to change too
|
# 0.14 switches to libev, that means bootstrap needs to change too
|
||||||
gevent >=1.0
|
gevent >=1.0
|
||||||
isodate >=0.4.4
|
isodate >=0.4.4
|
||||||
requests ==0.14.0
|
requests >=2.23.0
|
||||||
pytz >=2011k
|
pytz
|
||||||
ordereddict
|
|
||||||
httplib2
|
httplib2
|
||||||
lxml
|
lxml
|
||||||
|
pytest
|
||||||
|
tox
|
||||||
|
|
171
s3tests.conf.SAMPLE
Normal file
171
s3tests.conf.SAMPLE
Normal file
|
@ -0,0 +1,171 @@
|
||||||
|
[DEFAULT]
|
||||||
|
## this section is just used for host, port and bucket_prefix
|
||||||
|
|
||||||
|
# host set for rgw in vstart.sh
|
||||||
|
host = localhost
|
||||||
|
|
||||||
|
# port set for rgw in vstart.sh
|
||||||
|
port = 8000
|
||||||
|
|
||||||
|
## say "False" to disable TLS
|
||||||
|
is_secure = False
|
||||||
|
|
||||||
|
## say "False" to disable SSL Verify
|
||||||
|
ssl_verify = False
|
||||||
|
|
||||||
|
[fixtures]
|
||||||
|
## all the buckets created will start with this prefix;
|
||||||
|
## {random} will be filled with random characters to pad
|
||||||
|
## the prefix to 30 characters long, and avoid collisions
|
||||||
|
bucket prefix = yournamehere-{random}-
|
||||||
|
|
||||||
|
# all the iam account resources (users, roles, etc) created
|
||||||
|
# will start with this name prefix
|
||||||
|
iam name prefix = s3-tests-
|
||||||
|
|
||||||
|
# all the iam account resources (users, roles, etc) created
|
||||||
|
# will start with this path prefix
|
||||||
|
iam path prefix = /s3-tests/
|
||||||
|
|
||||||
|
[s3 main]
|
||||||
|
# main display_name set in vstart.sh
|
||||||
|
display_name = M. Tester
|
||||||
|
|
||||||
|
# main user_idname set in vstart.sh
|
||||||
|
user_id = testid
|
||||||
|
|
||||||
|
# main email set in vstart.sh
|
||||||
|
email = tester@ceph.com
|
||||||
|
|
||||||
|
# zonegroup api_name for bucket location
|
||||||
|
api_name = default
|
||||||
|
|
||||||
|
## main AWS access key
|
||||||
|
access_key = 0555b35654ad1656d804
|
||||||
|
|
||||||
|
## main AWS secret key
|
||||||
|
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||||
|
|
||||||
|
## replace with key id obtained when secret is created, or delete if KMS not tested
|
||||||
|
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
||||||
|
|
||||||
|
## Storage classes
|
||||||
|
#storage_classes = "LUKEWARM, FROZEN"
|
||||||
|
|
||||||
|
## Lifecycle debug interval (default: 10)
|
||||||
|
#lc_debug_interval = 20
|
||||||
|
|
||||||
|
[s3 alt]
|
||||||
|
# alt display_name set in vstart.sh
|
||||||
|
display_name = john.doe
|
||||||
|
## alt email set in vstart.sh
|
||||||
|
email = john.doe@example.com
|
||||||
|
|
||||||
|
# alt user_id set in vstart.sh
|
||||||
|
user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
|
||||||
|
|
||||||
|
# alt AWS access key set in vstart.sh
|
||||||
|
access_key = NOPQRSTUVWXYZABCDEFG
|
||||||
|
|
||||||
|
# alt AWS secret key set in vstart.sh
|
||||||
|
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
||||||
|
|
||||||
|
#[s3 cloud]
|
||||||
|
## to run the testcases with "cloud_transition" attribute.
|
||||||
|
## Note: the waiting time may have to tweaked depending on
|
||||||
|
## the I/O latency to the cloud endpoint.
|
||||||
|
|
||||||
|
## host set for cloud endpoint
|
||||||
|
# host = localhost
|
||||||
|
|
||||||
|
## port set for cloud endpoint
|
||||||
|
# port = 8001
|
||||||
|
|
||||||
|
## say "False" to disable TLS
|
||||||
|
# is_secure = False
|
||||||
|
|
||||||
|
## cloud endpoint credentials
|
||||||
|
# access_key = 0555b35654ad1656d804
|
||||||
|
# secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||||
|
|
||||||
|
## storage class configured as cloud tier on local rgw server
|
||||||
|
# cloud_storage_class = CLOUDTIER
|
||||||
|
|
||||||
|
## Below are optional -
|
||||||
|
|
||||||
|
## Above configured cloud storage class config options
|
||||||
|
# retain_head_object = false
|
||||||
|
# target_storage_class = Target_SC
|
||||||
|
# target_path = cloud-bucket
|
||||||
|
|
||||||
|
## another regular storage class to test multiple transition rules,
|
||||||
|
# storage_class = S1
|
||||||
|
|
||||||
|
[s3 tenant]
|
||||||
|
# tenant display_name set in vstart.sh
|
||||||
|
display_name = testx$tenanteduser
|
||||||
|
|
||||||
|
# tenant user_id set in vstart.sh
|
||||||
|
user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||||
|
|
||||||
|
# tenant AWS secret key set in vstart.sh
|
||||||
|
access_key = HIJKLMNOPQRSTUVWXYZA
|
||||||
|
|
||||||
|
# tenant AWS secret key set in vstart.sh
|
||||||
|
secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
|
||||||
|
|
||||||
|
# tenant email set in vstart.sh
|
||||||
|
email = tenanteduser@example.com
|
||||||
|
|
||||||
|
# tenant name
|
||||||
|
tenant = testx
|
||||||
|
|
||||||
|
#following section needs to be added for all sts-tests
|
||||||
|
[iam]
|
||||||
|
#used for iam operations in sts-tests
|
||||||
|
#email from vstart.sh
|
||||||
|
email = s3@example.com
|
||||||
|
|
||||||
|
#user_id from vstart.sh
|
||||||
|
user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||||
|
|
||||||
|
#access_key from vstart.sh
|
||||||
|
access_key = ABCDEFGHIJKLMNOPQRST
|
||||||
|
|
||||||
|
#secret_key vstart.sh
|
||||||
|
secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmn
|
||||||
|
|
||||||
|
#display_name from vstart.sh
|
||||||
|
display_name = youruseridhere
|
||||||
|
|
||||||
|
# iam account root user for iam_account tests
|
||||||
|
[iam root]
|
||||||
|
access_key = AAAAAAAAAAAAAAAAAAaa
|
||||||
|
secret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||||
|
user_id = RGW11111111111111111
|
||||||
|
email = account1@ceph.com
|
||||||
|
|
||||||
|
# iam account root user in a different account than [iam root]
|
||||||
|
[iam alt root]
|
||||||
|
access_key = BBBBBBBBBBBBBBBBBBbb
|
||||||
|
secret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
|
||||||
|
user_id = RGW22222222222222222
|
||||||
|
email = account2@ceph.com
|
||||||
|
|
||||||
|
#following section needs to be added when you want to run Assume Role With Webidentity test
|
||||||
|
[webidentity]
|
||||||
|
#used for assume role with web identity test in sts-tests
|
||||||
|
#all parameters will be obtained from ceph/qa/tasks/keycloak.py
|
||||||
|
token=<access_token>
|
||||||
|
|
||||||
|
aud=<obtained after introspecting token>
|
||||||
|
|
||||||
|
sub=<obtained after introspecting token>
|
||||||
|
|
||||||
|
azp=<obtained after introspecting token>
|
||||||
|
|
||||||
|
user_token=<access token for a user, with attribute Department=[Engineering, Marketing>]
|
||||||
|
|
||||||
|
thumbprint=<obtained from x509 certificate>
|
||||||
|
|
||||||
|
KC_REALM=<name of the realm>
|
|
@ -1,142 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import yaml
|
|
||||||
import optparse
|
|
||||||
|
|
||||||
NANOSECONDS = int(1e9)
|
|
||||||
|
|
||||||
# Output stats in a format similar to siege
|
|
||||||
# see http://www.joedog.org/index/siege-home
|
|
||||||
OUTPUT_FORMAT = """Stats for type: [{type}]
|
|
||||||
Transactions: {trans:>11} hits
|
|
||||||
Availability: {avail:>11.2f} %
|
|
||||||
Elapsed time: {elapsed:>11.2f} secs
|
|
||||||
Data transferred: {data:>11.2f} MB
|
|
||||||
Response time: {resp_time:>11.2f} secs
|
|
||||||
Transaction rate: {trans_rate:>11.2f} trans/sec
|
|
||||||
Throughput: {data_rate:>11.2f} MB/sec
|
|
||||||
Concurrency: {conc:>11.2f}
|
|
||||||
Successful transactions: {trans_success:>11}
|
|
||||||
Failed transactions: {trans_fail:>11}
|
|
||||||
Longest transaction: {trans_long:>11.2f}
|
|
||||||
Shortest transaction: {trans_short:>11.2f}
|
|
||||||
"""
|
|
||||||
|
|
||||||
def parse_options():
|
|
||||||
usage = "usage: %prog [options]"
|
|
||||||
parser = optparse.OptionParser(usage=usage)
|
|
||||||
parser.add_option(
|
|
||||||
"-f", "--file", dest="input", metavar="FILE",
|
|
||||||
help="Name of input YAML file. Default uses sys.stdin")
|
|
||||||
parser.add_option(
|
|
||||||
"-v", "--verbose", dest="verbose", action="store_true",
|
|
||||||
help="Enable verbose output")
|
|
||||||
|
|
||||||
(options, args) = parser.parse_args()
|
|
||||||
|
|
||||||
if not options.input and os.isatty(sys.stdin.fileno()):
|
|
||||||
parser.error("option -f required if no data is provided "
|
|
||||||
"in stdin")
|
|
||||||
|
|
||||||
return (options, args)
|
|
||||||
|
|
||||||
def main():
|
|
||||||
(options, args) = parse_options()
|
|
||||||
|
|
||||||
total = {}
|
|
||||||
durations = {}
|
|
||||||
min_time = {}
|
|
||||||
max_time = {}
|
|
||||||
errors = {}
|
|
||||||
success = {}
|
|
||||||
|
|
||||||
calculate_stats(options, total, durations, min_time, max_time, errors,
|
|
||||||
success)
|
|
||||||
print_results(total, durations, min_time, max_time, errors, success)
|
|
||||||
|
|
||||||
def calculate_stats(options, total, durations, min_time, max_time, errors,
|
|
||||||
success):
|
|
||||||
print 'Calculating statistics...'
|
|
||||||
|
|
||||||
f = sys.stdin
|
|
||||||
if options.input:
|
|
||||||
f = file(options.input, 'r')
|
|
||||||
|
|
||||||
for item in yaml.safe_load_all(f):
|
|
||||||
type_ = item.get('type')
|
|
||||||
if type_ not in ('r', 'w'):
|
|
||||||
continue # ignore any invalid items
|
|
||||||
|
|
||||||
if 'error' in item:
|
|
||||||
errors[type_] = errors.get(type_, 0) + 1
|
|
||||||
continue # skip rest of analysis for this item
|
|
||||||
else:
|
|
||||||
success[type_] = success.get(type_, 0) + 1
|
|
||||||
|
|
||||||
# parse the item
|
|
||||||
data_size = item['chunks'][-1][0]
|
|
||||||
duration = item['duration']
|
|
||||||
start = item['start']
|
|
||||||
end = start + duration / float(NANOSECONDS)
|
|
||||||
|
|
||||||
if options.verbose:
|
|
||||||
print "[{type}] POSIX time: {start:>18.2f} - {end:<18.2f} " \
|
|
||||||
"{data:>11.2f} KB".format(
|
|
||||||
type=type_,
|
|
||||||
start=start,
|
|
||||||
end=end,
|
|
||||||
data=data_size / 1024.0, # convert to KB
|
|
||||||
)
|
|
||||||
|
|
||||||
# update time boundaries
|
|
||||||
prev = min_time.setdefault(type_, start)
|
|
||||||
if start < prev:
|
|
||||||
min_time[type_] = start
|
|
||||||
prev = max_time.setdefault(type_, end)
|
|
||||||
if end > prev:
|
|
||||||
max_time[type_] = end
|
|
||||||
|
|
||||||
# save the duration
|
|
||||||
if type_ not in durations:
|
|
||||||
durations[type_] = []
|
|
||||||
durations[type_].append(duration)
|
|
||||||
|
|
||||||
# add to running totals
|
|
||||||
total[type_] = total.get(type_, 0) + data_size
|
|
||||||
|
|
||||||
def print_results(total, durations, min_time, max_time, errors, success):
|
|
||||||
for type_ in total.keys():
|
|
||||||
trans_success = success.get(type_, 0)
|
|
||||||
trans_fail = errors.get(type_, 0)
|
|
||||||
trans = trans_success + trans_fail
|
|
||||||
avail = trans_success * 100.0 / trans
|
|
||||||
elapsed = max_time[type_] - min_time[type_]
|
|
||||||
data = total[type_] / 1024.0 / 1024.0 # convert to MB
|
|
||||||
resp_time = sum(durations[type_]) / float(NANOSECONDS) / \
|
|
||||||
len(durations[type_])
|
|
||||||
trans_rate = trans / elapsed
|
|
||||||
data_rate = data / elapsed
|
|
||||||
conc = trans_rate * resp_time
|
|
||||||
trans_long = max(durations[type_]) / float(NANOSECONDS)
|
|
||||||
trans_short = min(durations[type_]) / float(NANOSECONDS)
|
|
||||||
|
|
||||||
print OUTPUT_FORMAT.format(
|
|
||||||
type=type_,
|
|
||||||
trans_success=trans_success,
|
|
||||||
trans_fail=trans_fail,
|
|
||||||
trans=trans,
|
|
||||||
avail=avail,
|
|
||||||
elapsed=elapsed,
|
|
||||||
data=data,
|
|
||||||
resp_time=resp_time,
|
|
||||||
trans_rate=trans_rate,
|
|
||||||
data_rate=data_rate,
|
|
||||||
conc=conc,
|
|
||||||
trans_long=trans_long,
|
|
||||||
trans_short=trans_short,
|
|
||||||
)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import boto.s3.connection
|
import boto.s3.connection
|
||||||
import bunch
|
import munch
|
||||||
import itertools
|
import itertools
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
@ -11,8 +11,8 @@ from lxml import etree
|
||||||
from doctest import Example
|
from doctest import Example
|
||||||
from lxml.doctestcompare import LXMLOutputChecker
|
from lxml.doctestcompare import LXMLOutputChecker
|
||||||
|
|
||||||
s3 = bunch.Bunch()
|
s3 = munch.Munch()
|
||||||
config = bunch.Bunch()
|
config = munch.Munch()
|
||||||
prefix = ''
|
prefix = ''
|
||||||
|
|
||||||
bucket_counter = itertools.count(1)
|
bucket_counter = itertools.count(1)
|
||||||
|
@ -51,10 +51,10 @@ def nuke_bucket(bucket):
|
||||||
while deleted_cnt:
|
while deleted_cnt:
|
||||||
deleted_cnt = 0
|
deleted_cnt = 0
|
||||||
for key in bucket.list():
|
for key in bucket.list():
|
||||||
print 'Cleaning bucket {bucket} key {key}'.format(
|
print('Cleaning bucket {bucket} key {key}'.format(
|
||||||
bucket=bucket,
|
bucket=bucket,
|
||||||
key=key,
|
key=key,
|
||||||
)
|
))
|
||||||
key.set_canned_acl('private')
|
key.set_canned_acl('private')
|
||||||
key.delete()
|
key.delete()
|
||||||
deleted_cnt += 1
|
deleted_cnt += 1
|
||||||
|
@ -67,26 +67,26 @@ def nuke_bucket(bucket):
|
||||||
and e.body == ''):
|
and e.body == ''):
|
||||||
e.error_code = 'AccessDenied'
|
e.error_code = 'AccessDenied'
|
||||||
if e.error_code != 'AccessDenied':
|
if e.error_code != 'AccessDenied':
|
||||||
print 'GOT UNWANTED ERROR', e.error_code
|
print('GOT UNWANTED ERROR', e.error_code)
|
||||||
raise
|
raise
|
||||||
# seems like we're not the owner of the bucket; ignore
|
# seems like we're not the owner of the bucket; ignore
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def nuke_prefixed_buckets():
|
def nuke_prefixed_buckets():
|
||||||
for name, conn in s3.items():
|
for name, conn in list(s3.items()):
|
||||||
print 'Cleaning buckets from connection {name}'.format(name=name)
|
print('Cleaning buckets from connection {name}'.format(name=name))
|
||||||
for bucket in conn.get_all_buckets():
|
for bucket in conn.get_all_buckets():
|
||||||
if bucket.name.startswith(prefix):
|
if bucket.name.startswith(prefix):
|
||||||
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
|
print('Cleaning bucket {bucket}'.format(bucket=bucket))
|
||||||
nuke_bucket(bucket)
|
nuke_bucket(bucket)
|
||||||
|
|
||||||
print 'Done with cleanup of test buckets.'
|
print('Done with cleanup of test buckets.')
|
||||||
|
|
||||||
def read_config(fp):
|
def read_config(fp):
|
||||||
config = bunch.Bunch()
|
config = munch.Munch()
|
||||||
g = yaml.safe_load_all(fp)
|
g = yaml.safe_load_all(fp)
|
||||||
for new in g:
|
for new in g:
|
||||||
config.update(bunch.bunchify(new))
|
config.update(munch.Munchify(new))
|
||||||
return config
|
return config
|
||||||
|
|
||||||
def connect(conf):
|
def connect(conf):
|
||||||
|
@ -97,7 +97,7 @@ def connect(conf):
|
||||||
access_key='aws_access_key_id',
|
access_key='aws_access_key_id',
|
||||||
secret_key='aws_secret_access_key',
|
secret_key='aws_secret_access_key',
|
||||||
)
|
)
|
||||||
kwargs = dict((mapping[k],v) for (k,v) in conf.iteritems() if k in mapping)
|
kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
|
||||||
#process calling_format argument
|
#process calling_format argument
|
||||||
calling_formats = dict(
|
calling_formats = dict(
|
||||||
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
|
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
|
||||||
|
@ -105,7 +105,7 @@ def connect(conf):
|
||||||
vhost=boto.s3.connection.VHostCallingFormat(),
|
vhost=boto.s3.connection.VHostCallingFormat(),
|
||||||
)
|
)
|
||||||
kwargs['calling_format'] = calling_formats['ordinary']
|
kwargs['calling_format'] = calling_formats['ordinary']
|
||||||
if conf.has_key('calling_format'):
|
if 'calling_format' in conf:
|
||||||
raw_calling_format = conf['calling_format']
|
raw_calling_format = conf['calling_format']
|
||||||
try:
|
try:
|
||||||
kwargs['calling_format'] = calling_formats[raw_calling_format]
|
kwargs['calling_format'] = calling_formats[raw_calling_format]
|
||||||
|
@ -146,7 +146,7 @@ def setup():
|
||||||
raise RuntimeError("Empty Prefix! Aborting!")
|
raise RuntimeError("Empty Prefix! Aborting!")
|
||||||
|
|
||||||
defaults = config.s3.defaults
|
defaults = config.s3.defaults
|
||||||
for section in config.s3.keys():
|
for section in list(config.s3.keys()):
|
||||||
if section == 'defaults':
|
if section == 'defaults':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -258,9 +258,10 @@ def with_setup_kwargs(setup, teardown=None):
|
||||||
# yield _test_gen
|
# yield _test_gen
|
||||||
|
|
||||||
def trim_xml(xml_str):
|
def trim_xml(xml_str):
|
||||||
p = etree.XMLParser(remove_blank_text=True)
|
p = etree.XMLParser(encoding="utf-8", remove_blank_text=True)
|
||||||
|
xml_str = bytes(xml_str, "utf-8")
|
||||||
elem = etree.XML(xml_str, parser=p)
|
elem = etree.XML(xml_str, parser=p)
|
||||||
return etree.tostring(elem)
|
return etree.tostring(elem, encoding="unicode")
|
||||||
|
|
||||||
def normalize_xml(xml, pretty_print=True):
|
def normalize_xml(xml, pretty_print=True):
|
||||||
if xml is None:
|
if xml is None:
|
||||||
|
@ -282,7 +283,7 @@ def normalize_xml(xml, pretty_print=True):
|
||||||
for parent in root.xpath('//*[./*]'): # Search for parent elements
|
for parent in root.xpath('//*[./*]'): # Search for parent elements
|
||||||
parent[:] = sorted(parent,key=lambda x: x.tag)
|
parent[:] = sorted(parent,key=lambda x: x.tag)
|
||||||
|
|
||||||
xmlstr = etree.tostring(root, encoding="utf-8", xml_declaration=True, pretty_print=pretty_print)
|
xmlstr = etree.tostring(root, encoding="unicode", pretty_print=pretty_print)
|
||||||
# there are two different DTD URIs
|
# there are two different DTD URIs
|
||||||
xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
|
xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
|
||||||
xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)
|
xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
from boto.auth_handler import AuthHandler
|
|
||||||
|
|
||||||
class AnonymousAuthHandler(AuthHandler):
|
|
||||||
def add_auth(self, http_request, **kwargs):
|
|
||||||
return # Nothing to do for anonymous access!
|
|
|
@ -1,21 +1,21 @@
|
||||||
from __future__ import print_function
|
|
||||||
import sys
|
import sys
|
||||||
import ConfigParser
|
import configparser
|
||||||
import boto.exception
|
import boto.exception
|
||||||
import boto.s3.connection
|
import boto.s3.connection
|
||||||
import bunch
|
import munch
|
||||||
import itertools
|
import itertools
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
from httplib import HTTPConnection, HTTPSConnection
|
import pytest
|
||||||
from urlparse import urlparse
|
from http.client import HTTPConnection, HTTPSConnection
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from .utils import region_sync_meta
|
from .utils import region_sync_meta
|
||||||
|
|
||||||
s3 = bunch.Bunch()
|
s3 = munch.Munch()
|
||||||
config = bunch.Bunch()
|
config = munch.Munch()
|
||||||
targets = bunch.Bunch()
|
targets = munch.Munch()
|
||||||
|
|
||||||
# this will be assigned by setup()
|
# this will be assigned by setup()
|
||||||
prefix = None
|
prefix = None
|
||||||
|
@ -69,7 +69,7 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
|
||||||
if bucket.name.startswith(prefix):
|
if bucket.name.startswith(prefix):
|
||||||
print('Cleaning bucket {bucket}'.format(bucket=bucket))
|
print('Cleaning bucket {bucket}'.format(bucket=bucket))
|
||||||
success = False
|
success = False
|
||||||
for i in xrange(2):
|
for i in range(2):
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
iterator = iter(bucket.list_versions())
|
iterator = iter(bucket.list_versions())
|
||||||
|
@ -91,7 +91,13 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
|
||||||
))
|
))
|
||||||
# key.set_canned_acl('private')
|
# key.set_canned_acl('private')
|
||||||
bucket.delete_key(key.name, version_id = key.version_id)
|
bucket.delete_key(key.name, version_id = key.version_id)
|
||||||
|
try:
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
except boto.exception.S3ResponseError as e:
|
||||||
|
# if DELETE times out, the retry may see NoSuchBucket
|
||||||
|
if e.error_code != 'NoSuchBucket':
|
||||||
|
raise e
|
||||||
|
pass
|
||||||
success = True
|
success = True
|
||||||
except boto.exception.S3ResponseError as e:
|
except boto.exception.S3ResponseError as e:
|
||||||
if e.error_code != 'AccessDenied':
|
if e.error_code != 'AccessDenied':
|
||||||
|
@ -110,12 +116,12 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
|
||||||
def nuke_prefixed_buckets(prefix):
|
def nuke_prefixed_buckets(prefix):
|
||||||
# If no regions are specified, use the simple method
|
# If no regions are specified, use the simple method
|
||||||
if targets.main.master == None:
|
if targets.main.master == None:
|
||||||
for name, conn in s3.items():
|
for name, conn in list(s3.items()):
|
||||||
print('Deleting buckets on {name}'.format(name=name))
|
print('Deleting buckets on {name}'.format(name=name))
|
||||||
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
||||||
else:
|
else:
|
||||||
# First, delete all buckets on the master connection
|
# First, delete all buckets on the master connection
|
||||||
for name, conn in s3.items():
|
for name, conn in list(s3.items()):
|
||||||
if conn == targets.main.master.connection:
|
if conn == targets.main.master.connection:
|
||||||
print('Deleting buckets on {name} (master)'.format(name=name))
|
print('Deleting buckets on {name} (master)'.format(name=name))
|
||||||
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
||||||
|
@ -125,7 +131,7 @@ def nuke_prefixed_buckets(prefix):
|
||||||
print('region-sync in nuke_prefixed_buckets')
|
print('region-sync in nuke_prefixed_buckets')
|
||||||
|
|
||||||
# Now delete remaining buckets on any other connection
|
# Now delete remaining buckets on any other connection
|
||||||
for name, conn in s3.items():
|
for name, conn in list(s3.items()):
|
||||||
if conn != targets.main.master.connection:
|
if conn != targets.main.master.connection:
|
||||||
print('Deleting buckets on {name} (non-master)'.format(name=name))
|
print('Deleting buckets on {name} (non-master)'.format(name=name))
|
||||||
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
||||||
|
@ -143,46 +149,46 @@ class TargetConfig:
|
||||||
self.sync_meta_wait = 0
|
self.sync_meta_wait = 0
|
||||||
try:
|
try:
|
||||||
self.api_name = cfg.get(section, 'api_name')
|
self.api_name = cfg.get(section, 'api_name')
|
||||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
self.port = cfg.getint(section, 'port')
|
self.port = cfg.getint(section, 'port')
|
||||||
except ConfigParser.NoOptionError:
|
except configparser.NoOptionError:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
self.host=cfg.get(section, 'host')
|
self.host=cfg.get(section, 'host')
|
||||||
except ConfigParser.NoOptionError:
|
except configparser.NoOptionError:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
'host not specified for section {s}'.format(s=section)
|
'host not specified for section {s}'.format(s=section)
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
self.is_master=cfg.getboolean(section, 'is_master')
|
self.is_master=cfg.getboolean(section, 'is_master')
|
||||||
except ConfigParser.NoOptionError:
|
except configparser.NoOptionError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.is_secure=cfg.getboolean(section, 'is_secure')
|
self.is_secure=cfg.getboolean(section, 'is_secure')
|
||||||
except ConfigParser.NoOptionError:
|
except configparser.NoOptionError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
raw_calling_format = cfg.get(section, 'calling_format')
|
raw_calling_format = cfg.get(section, 'calling_format')
|
||||||
except ConfigParser.NoOptionError:
|
except configparser.NoOptionError:
|
||||||
raw_calling_format = 'ordinary'
|
raw_calling_format = 'ordinary'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
|
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
|
||||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
|
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
|
||||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
|
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
|
||||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -202,7 +208,7 @@ class TargetConnection:
|
||||||
|
|
||||||
class RegionsInfo:
|
class RegionsInfo:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.m = bunch.Bunch()
|
self.m = munch.Munch()
|
||||||
self.master = None
|
self.master = None
|
||||||
self.secondaries = []
|
self.secondaries = []
|
||||||
|
|
||||||
|
@ -220,21 +226,21 @@ class RegionsInfo:
|
||||||
return self.m[name]
|
return self.m[name]
|
||||||
def get(self):
|
def get(self):
|
||||||
return self.m
|
return self.m
|
||||||
def iteritems(self):
|
def items(self):
|
||||||
return self.m.iteritems()
|
return self.m.items()
|
||||||
|
|
||||||
regions = RegionsInfo()
|
regions = RegionsInfo()
|
||||||
|
|
||||||
|
|
||||||
class RegionsConn:
|
class RegionsConn:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.m = bunch.Bunch()
|
self.m = munch.Munch()
|
||||||
self.default = None
|
self.default = None
|
||||||
self.master = None
|
self.master = None
|
||||||
self.secondaries = []
|
self.secondaries = []
|
||||||
|
|
||||||
def iteritems(self):
|
def items(self):
|
||||||
return self.m.iteritems()
|
return self.m.items()
|
||||||
|
|
||||||
def set_default(self, conn):
|
def set_default(self, conn):
|
||||||
self.default = conn
|
self.default = conn
|
||||||
|
@ -254,7 +260,7 @@ _multiprocess_can_split_ = True
|
||||||
|
|
||||||
def setup():
|
def setup():
|
||||||
|
|
||||||
cfg = ConfigParser.RawConfigParser()
|
cfg = configparser.RawConfigParser()
|
||||||
try:
|
try:
|
||||||
path = os.environ['S3TEST_CONF']
|
path = os.environ['S3TEST_CONF']
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
@ -262,8 +268,7 @@ def setup():
|
||||||
'To run tests, point environment '
|
'To run tests, point environment '
|
||||||
+ 'variable S3TEST_CONF to a config file.',
|
+ 'variable S3TEST_CONF to a config file.',
|
||||||
)
|
)
|
||||||
with file(path) as f:
|
cfg.read(path)
|
||||||
cfg.readfp(f)
|
|
||||||
|
|
||||||
global prefix
|
global prefix
|
||||||
global targets
|
global targets
|
||||||
|
@ -271,19 +276,19 @@ def setup():
|
||||||
|
|
||||||
try:
|
try:
|
||||||
template = cfg.get('fixtures', 'bucket prefix')
|
template = cfg.get('fixtures', 'bucket prefix')
|
||||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
template = 'test-{random}-'
|
template = 'test-{random}-'
|
||||||
prefix = choose_bucket_prefix(template=template)
|
prefix = choose_bucket_prefix(template=template)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
slow_backend = cfg.getboolean('fixtures', 'slow backend')
|
slow_backend = cfg.getboolean('fixtures', 'slow backend')
|
||||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
slow_backend = False
|
slow_backend = False
|
||||||
|
|
||||||
# pull the default_region out, if it exists
|
# pull the default_region out, if it exists
|
||||||
try:
|
try:
|
||||||
default_region = cfg.get('fixtures', 'default_region')
|
default_region = cfg.get('fixtures', 'default_region')
|
||||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
default_region = None
|
default_region = None
|
||||||
|
|
||||||
s3.clear()
|
s3.clear()
|
||||||
|
@ -309,7 +314,7 @@ def setup():
|
||||||
if len(regions.get()) == 0:
|
if len(regions.get()) == 0:
|
||||||
regions.add("default", TargetConfig(cfg, section))
|
regions.add("default", TargetConfig(cfg, section))
|
||||||
|
|
||||||
config[name] = bunch.Bunch()
|
config[name] = munch.Munch()
|
||||||
for var in [
|
for var in [
|
||||||
'user_id',
|
'user_id',
|
||||||
'display_name',
|
'display_name',
|
||||||
|
@ -319,15 +324,16 @@ def setup():
|
||||||
'port',
|
'port',
|
||||||
'is_secure',
|
'is_secure',
|
||||||
'kms_keyid',
|
'kms_keyid',
|
||||||
|
'storage_classes',
|
||||||
]:
|
]:
|
||||||
try:
|
try:
|
||||||
config[name][var] = cfg.get(section, var)
|
config[name][var] = cfg.get(section, var)
|
||||||
except ConfigParser.NoOptionError:
|
except configparser.NoOptionError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
targets[name] = RegionsConn()
|
targets[name] = RegionsConn()
|
||||||
|
|
||||||
for (k, conf) in regions.iteritems():
|
for (k, conf) in regions.items():
|
||||||
conn = boto.s3.connection.S3Connection(
|
conn = boto.s3.connection.S3Connection(
|
||||||
aws_access_key_id=cfg.get(section, 'access_key'),
|
aws_access_key_id=cfg.get(section, 'access_key'),
|
||||||
aws_secret_access_key=cfg.get(section, 'secret_key'),
|
aws_secret_access_key=cfg.get(section, 'secret_key'),
|
||||||
|
@ -365,6 +371,15 @@ def teardown():
|
||||||
# remove our buckets here also, to avoid littering
|
# remove our buckets here also, to avoid littering
|
||||||
nuke_prefixed_buckets(prefix=prefix)
|
nuke_prefixed_buckets(prefix=prefix)
|
||||||
|
|
||||||
|
@pytest.fixture(scope="package")
|
||||||
|
def configfile():
|
||||||
|
setup()
|
||||||
|
yield config
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def setup_teardown(configfile):
|
||||||
|
yield
|
||||||
|
teardown()
|
||||||
|
|
||||||
bucket_counter = itertools.count(1)
|
bucket_counter = itertools.count(1)
|
||||||
|
|
||||||
|
@ -468,7 +483,7 @@ def _make_raw_request(host, port, method, path, body=None, request_headers=None,
|
||||||
if request_headers is None:
|
if request_headers is None:
|
||||||
request_headers = {}
|
request_headers = {}
|
||||||
|
|
||||||
c = class_(host, port, strict=True, timeout=timeout)
|
c = class_(host, port=port, timeout=timeout)
|
||||||
|
|
||||||
# TODO: We might have to modify this in future if we need to interact with
|
# TODO: We might have to modify this in future if we need to interact with
|
||||||
# how httplib.request handles Accept-Encoding and Host.
|
# how httplib.request handles Accept-Encoding and Host.
|
||||||
|
|
46
s3tests/functional/policy.py
Normal file
46
s3tests/functional/policy.py
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
import json
|
||||||
|
|
||||||
|
class Statement(object):
|
||||||
|
def __init__(self, action, resource, principal = {"AWS" : "*"}, effect= "Allow", condition = None):
|
||||||
|
self.principal = principal
|
||||||
|
self.action = action
|
||||||
|
self.resource = resource
|
||||||
|
self.condition = condition
|
||||||
|
self.effect = effect
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
d = { "Action" : self.action,
|
||||||
|
"Principal" : self.principal,
|
||||||
|
"Effect" : self.effect,
|
||||||
|
"Resource" : self.resource
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.condition is not None:
|
||||||
|
d["Condition"] = self.condition
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
class Policy(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.statements = []
|
||||||
|
|
||||||
|
def add_statement(self, s):
|
||||||
|
self.statements.append(s)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def to_json(self):
|
||||||
|
policy_dict = {
|
||||||
|
"Version" : "2012-10-17",
|
||||||
|
"Statement":
|
||||||
|
[s.to_dict() for s in self.statements]
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.dumps(policy_dict)
|
||||||
|
|
||||||
|
def make_json_policy(action, resource, principal={"AWS": "*"}, conditions=None):
|
||||||
|
"""
|
||||||
|
Helper function to make single statement policies
|
||||||
|
"""
|
||||||
|
s = Statement(action, resource, principal, condition=conditions)
|
||||||
|
p = Policy()
|
||||||
|
return p.add_statement(s).to_json()
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,23 +1,20 @@
|
||||||
from __future__ import print_function
|
|
||||||
import sys
|
import sys
|
||||||
import collections
|
from collections.abc import Container
|
||||||
import nose
|
import pytest
|
||||||
import string
|
import string
|
||||||
import random
|
import random
|
||||||
from pprint import pprint
|
from pprint import pprint
|
||||||
import time
|
import time
|
||||||
import boto.exception
|
import boto.exception
|
||||||
|
import socket
|
||||||
|
|
||||||
from urlparse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from nose.tools import eq_ as eq, ok_ as ok
|
|
||||||
from nose.plugins.attrib import attr
|
|
||||||
from nose.tools import timed
|
|
||||||
from nose.plugins.skip import SkipTest
|
|
||||||
|
|
||||||
from .. import common
|
from .. import common
|
||||||
|
|
||||||
from . import (
|
from . import (
|
||||||
|
configfile,
|
||||||
|
setup_teardown,
|
||||||
get_new_bucket,
|
get_new_bucket,
|
||||||
get_new_bucket_name,
|
get_new_bucket_name,
|
||||||
s3,
|
s3,
|
||||||
|
@ -42,36 +39,27 @@ ERRORDOC_TEMPLATE = '<html><h1>ErrorDoc</h1><body>{random}</body></html>'
|
||||||
|
|
||||||
CAN_WEBSITE = None
|
CAN_WEBSITE = None
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True, scope="module")
|
||||||
def check_can_test_website():
|
def check_can_test_website():
|
||||||
global CAN_WEBSITE
|
|
||||||
# This is a bit expensive, so we cache this
|
|
||||||
if CAN_WEBSITE is None:
|
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
try:
|
try:
|
||||||
wsconf = bucket.get_website_configuration()
|
wsconf = bucket.get_website_configuration()
|
||||||
CAN_WEBSITE = True
|
return True
|
||||||
except boto.exception.S3ResponseError as e:
|
except boto.exception.S3ResponseError as e:
|
||||||
if e.status == 404 and e.reason == 'Not Found' and e.error_code in ['NoSuchWebsiteConfiguration', 'NoSuchKey']:
|
if e.status == 404 and e.reason == 'Not Found' and e.error_code in ['NoSuchWebsiteConfiguration', 'NoSuchKey']:
|
||||||
CAN_WEBSITE = True
|
return True
|
||||||
elif e.status == 405 and e.reason == 'Method Not Allowed' and e.error_code == 'MethodNotAllowed':
|
elif e.status == 405 and e.reason == 'Method Not Allowed' and e.error_code == 'MethodNotAllowed':
|
||||||
# rgw_enable_static_website is false
|
pytest.skip('rgw_enable_static_website is false')
|
||||||
CAN_WEBSITE = False
|
|
||||||
elif e.status == 403 and e.reason == 'SignatureDoesNotMatch' and e.error_code == 'Forbidden':
|
elif e.status == 403 and e.reason == 'SignatureDoesNotMatch' and e.error_code == 'Forbidden':
|
||||||
# This is older versions that do not support the website code
|
# This is older versions that do not support the website code
|
||||||
CAN_WEBSITE = False
|
pytest.skip('static website is not implemented')
|
||||||
|
elif e.status == 501 and e.error_code == 'NotImplemented':
|
||||||
|
pytest.skip('static website is not implemented')
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Unknown response in checking if WebsiteConf is supported", e)
|
raise RuntimeError("Unknown response in checking if WebsiteConf is supported", e)
|
||||||
finally:
|
finally:
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
if CAN_WEBSITE is True:
|
|
||||||
return True
|
|
||||||
elif CAN_WEBSITE is False:
|
|
||||||
raise SkipTest
|
|
||||||
else:
|
|
||||||
raise RuntimeError("Unknown cached response in checking if WebsiteConf is supported")
|
|
||||||
|
|
||||||
|
|
||||||
def make_website_config(xml_fragment):
|
def make_website_config(xml_fragment):
|
||||||
"""
|
"""
|
||||||
Take the tedious stuff out of the config
|
Take the tedious stuff out of the config
|
||||||
|
@ -108,7 +96,7 @@ def get_website_url(**kwargs):
|
||||||
|
|
||||||
def _test_website_populate_fragment(xml_fragment, fields):
|
def _test_website_populate_fragment(xml_fragment, fields):
|
||||||
for k in ['RoutingRules']:
|
for k in ['RoutingRules']:
|
||||||
if k in fields.keys() and len(fields[k]) > 0:
|
if k in list(fields.keys()) and len(fields[k]) > 0:
|
||||||
fields[k] = '<%s>%s</%s>' % (k, fields[k], k)
|
fields[k] = '<%s>%s</%s>' % (k, fields[k], k)
|
||||||
f = {
|
f = {
|
||||||
'IndexDocument_Suffix': choose_bucket_prefix(template='index-{random}.html', max_len=32),
|
'IndexDocument_Suffix': choose_bucket_prefix(template='index-{random}.html', max_len=32),
|
||||||
|
@ -166,24 +154,24 @@ def _test_website_prep(bucket, xml_template, hardcoded_fields = {}, expect_fail=
|
||||||
# Cleanup for our validation
|
# Cleanup for our validation
|
||||||
common.assert_xml_equal(config_xmlcmp, config_xmlnew)
|
common.assert_xml_equal(config_xmlcmp, config_xmlnew)
|
||||||
#print("config_xmlcmp\n", config_xmlcmp)
|
#print("config_xmlcmp\n", config_xmlcmp)
|
||||||
#eq (config_xmlnew, config_xmlcmp)
|
#assert config_xmlnew == config_xmlcmp
|
||||||
f['WebsiteConfiguration'] = config_xmlcmp
|
f['WebsiteConfiguration'] = config_xmlcmp
|
||||||
return f
|
return f
|
||||||
|
|
||||||
def __website_expected_reponse_status(res, status, reason):
|
def __website_expected_reponse_status(res, status, reason):
|
||||||
if not isinstance(status, collections.Container):
|
if not isinstance(status, Container):
|
||||||
status = set([status])
|
status = set([status])
|
||||||
if not isinstance(reason, collections.Container):
|
if not isinstance(reason, Container):
|
||||||
reason = set([reason])
|
reason = set([reason])
|
||||||
|
|
||||||
if status is not IGNORE_FIELD:
|
if status is not IGNORE_FIELD:
|
||||||
ok(res.status in status, 'HTTP code was %s should be %s' % (res.status, status))
|
assert res.status in status, 'HTTP code was %s should be %s' % (res.status, status)
|
||||||
if reason is not IGNORE_FIELD:
|
if reason is not IGNORE_FIELD:
|
||||||
ok(res.reason in reason, 'HTTP reason was was %s should be %s' % (res.reason, reason))
|
assert res.reason in reason, 'HTTP reason was was %s should be %s' % (res.reason, reason)
|
||||||
|
|
||||||
def _website_expected_default_html(**kwargs):
|
def _website_expected_default_html(**kwargs):
|
||||||
fields = []
|
fields = []
|
||||||
for k in kwargs.keys():
|
for k in list(kwargs.keys()):
|
||||||
# AmazonS3 seems to be inconsistent, some HTML errors include BucketName, but others do not.
|
# AmazonS3 seems to be inconsistent, some HTML errors include BucketName, but others do not.
|
||||||
if k is 'BucketName':
|
if k is 'BucketName':
|
||||||
continue
|
continue
|
||||||
|
@ -191,7 +179,7 @@ def _website_expected_default_html(**kwargs):
|
||||||
v = kwargs[k]
|
v = kwargs[k]
|
||||||
if isinstance(v, str):
|
if isinstance(v, str):
|
||||||
v = [v]
|
v = [v]
|
||||||
elif not isinstance(v, collections.Container):
|
elif not isinstance(v, Container):
|
||||||
v = [v]
|
v = [v]
|
||||||
for v2 in v:
|
for v2 in v:
|
||||||
s = '<li>%s: %s</li>' % (k,v2)
|
s = '<li>%s: %s</li>' % (k,v2)
|
||||||
|
@ -209,21 +197,22 @@ def _website_expected_error_response(res, bucket_name, status, reason, code, con
|
||||||
errorcode = res.getheader('x-amz-error-code', None)
|
errorcode = res.getheader('x-amz-error-code', None)
|
||||||
if errorcode is not None:
|
if errorcode is not None:
|
||||||
if code is not IGNORE_FIELD:
|
if code is not IGNORE_FIELD:
|
||||||
eq(errorcode, code)
|
assert errorcode == code
|
||||||
|
|
||||||
if not isinstance(content, collections.Container):
|
if not isinstance(content, Container):
|
||||||
content = set([content])
|
content = set([content])
|
||||||
for f in content:
|
for f in content:
|
||||||
if f is not IGNORE_FIELD and f is not None:
|
if f is not IGNORE_FIELD and f is not None:
|
||||||
ok(f in body, 'HTML should contain "%s"' % (f, ))
|
f = bytes(f, 'utf-8')
|
||||||
|
assert f in body, 'HTML should contain "%s"' % (f, )
|
||||||
|
|
||||||
def _website_expected_redirect_response(res, status, reason, new_url):
|
def _website_expected_redirect_response(res, status, reason, new_url):
|
||||||
body = res.read()
|
body = res.read()
|
||||||
print(body)
|
print(body)
|
||||||
__website_expected_reponse_status(res, status, reason)
|
__website_expected_reponse_status(res, status, reason)
|
||||||
loc = res.getheader('Location', None)
|
loc = res.getheader('Location', None)
|
||||||
eq(loc, new_url, 'Location header should be set "%s" != "%s"' % (loc,new_url,))
|
assert loc == new_url, 'Location header should be set "%s" != "%s"' % (loc,new_url,)
|
||||||
ok(len(body) == 0, 'Body of a redirect should be empty')
|
assert len(body) == 0, 'Body of a redirect should be empty'
|
||||||
|
|
||||||
def _website_request(bucket_name, path, connect_hostname=None, method='GET', timeout=None):
|
def _website_request(bucket_name, path, connect_hostname=None, method='GET', timeout=None):
|
||||||
url = get_website_url(proto='http', bucket=bucket_name, path=path)
|
url = get_website_url(proto='http', bucket=bucket_name, path=path)
|
||||||
|
@ -235,33 +224,23 @@ def _website_request(bucket_name, path, connect_hostname=None, method='GET', tim
|
||||||
request_headers={}
|
request_headers={}
|
||||||
request_headers['Host'] = o.hostname
|
request_headers['Host'] = o.hostname
|
||||||
request_headers['Accept'] = '*/*'
|
request_headers['Accept'] = '*/*'
|
||||||
print('Request: {method} {path}\n{headers}'.format(method=method, path=path, headers=''.join(map(lambda t: t[0]+':'+t[1]+"\n", request_headers.items()))))
|
print('Request: {method} {path}\n{headers}'.format(method=method, path=path, headers=''.join([t[0]+':'+t[1]+"\n" for t in list(request_headers.items())])))
|
||||||
res = _make_raw_request(connect_hostname, config.main.port, method, path, request_headers=request_headers, secure=False, timeout=timeout)
|
res = _make_raw_request(connect_hostname, config.main.port, method, path, request_headers=request_headers, secure=False, timeout=timeout)
|
||||||
for (k,v) in res.getheaders():
|
for (k,v) in res.getheaders():
|
||||||
print(k,v)
|
print(k,v)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
# ---------- Non-existant buckets via the website endpoint
|
# ---------- Non-existant buckets via the website endpoint
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_rgw
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-existant bucket via website endpoint should give NoSuchBucket, exposing security risk')
|
|
||||||
@attr('s3website')
|
|
||||||
@attr('fails_on_rgw')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_nonexistant_bucket_s3():
|
def test_website_nonexistant_bucket_s3():
|
||||||
bucket_name = get_new_bucket_name()
|
bucket_name = get_new_bucket_name()
|
||||||
res = _website_request(bucket_name, '')
|
res = _website_request(bucket_name, '')
|
||||||
_website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket'))
|
_website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket'))
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_s3
|
||||||
@attr(operation='list')
|
@pytest.mark.fails_on_dbstore
|
||||||
#@attr(assertion='non-existant bucket via website endpoint should give Forbidden, keeping bucket identity secure')
|
|
||||||
@attr(assertion='non-existant bucket via website endpoint should give NoSuchBucket')
|
|
||||||
@attr('s3website')
|
|
||||||
@attr('fails_on_s3')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_nonexistant_bucket_rgw():
|
def test_website_nonexistant_bucket_rgw():
|
||||||
bucket_name = get_new_bucket_name()
|
bucket_name = get_new_bucket_name()
|
||||||
res = _website_request(bucket_name, '')
|
res = _website_request(bucket_name, '')
|
||||||
|
@ -269,13 +248,9 @@ def test_website_nonexistant_bucket_rgw():
|
||||||
_website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket'))
|
_website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket'))
|
||||||
|
|
||||||
#------------- IndexDocument only, successes
|
#------------- IndexDocument only, successes
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
@pytest.mark.timeout(10)
|
||||||
@attr(assertion='non-empty public buckets via s3website return page for /, where page is public')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
@timed(10)
|
|
||||||
def test_website_public_bucket_list_public_index():
|
def test_website_public_bucket_list_public_index():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -291,17 +266,14 @@ def test_website_public_bucket_list_public_index():
|
||||||
res = _website_request(bucket.name, '')
|
res = _website_request(bucket.name, '')
|
||||||
body = res.read()
|
body = res.read()
|
||||||
print(body)
|
print(body)
|
||||||
eq(body, indexstring) # default content should match index.html set content
|
indexstring = bytes(indexstring, 'utf-8')
|
||||||
|
assert body == indexstring # default content should match index.html set content
|
||||||
__website_expected_reponse_status(res, 200, 'OK')
|
__website_expected_reponse_status(res, 200, 'OK')
|
||||||
indexhtml.delete()
|
indexhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty private buckets via s3website return page for /, where page is private')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_public_index():
|
def test_website_private_bucket_list_public_index():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -319,18 +291,15 @@ def test_website_private_bucket_list_public_index():
|
||||||
__website_expected_reponse_status(res, 200, 'OK')
|
__website_expected_reponse_status(res, 200, 'OK')
|
||||||
body = res.read()
|
body = res.read()
|
||||||
print(body)
|
print(body)
|
||||||
eq(body, indexstring, 'default content should match index.html set content')
|
indexstring = bytes(indexstring, 'utf-8')
|
||||||
|
assert body == indexstring, 'default content should match index.html set content'
|
||||||
indexhtml.delete()
|
indexhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
|
|
||||||
# ---------- IndexDocument only, failures
|
# ---------- IndexDocument only, failures
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='empty private buckets via s3website return a 403 for /')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_empty():
|
def test_website_private_bucket_list_empty():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -341,12 +310,8 @@ def test_website_private_bucket_list_empty():
|
||||||
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
|
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='empty public buckets via s3website return a 404 for /')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_public_bucket_list_empty():
|
def test_website_public_bucket_list_empty():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -356,12 +321,8 @@ def test_website_public_bucket_list_empty():
|
||||||
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'))
|
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'))
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty public buckets via s3website return page for /, where page is private')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_public_bucket_list_private_index():
|
def test_website_public_bucket_list_private_index():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -381,12 +342,8 @@ def test_website_public_bucket_list_private_index():
|
||||||
indexhtml.delete()
|
indexhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty private buckets via s3website return page for /, where page is private')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_private_index():
|
def test_website_private_bucket_list_private_index():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -407,12 +364,8 @@ def test_website_private_bucket_list_private_index():
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
# ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but missing
|
# ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but missing
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='empty private buckets via s3website return a 403 for /, missing errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_empty_missingerrordoc():
|
def test_website_private_bucket_list_empty_missingerrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -423,12 +376,8 @@ def test_website_private_bucket_list_empty_missingerrordoc():
|
||||||
|
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='empty public buckets via s3website return a 404 for /, missing errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_public_bucket_list_empty_missingerrordoc():
|
def test_website_public_bucket_list_empty_missingerrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -438,12 +387,8 @@ def test_website_public_bucket_list_empty_missingerrordoc():
|
||||||
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey')
|
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey')
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty public buckets via s3website return page for /, where page is private, missing errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_public_bucket_list_private_index_missingerrordoc():
|
def test_website_public_bucket_list_private_index_missingerrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -462,12 +407,8 @@ def test_website_public_bucket_list_private_index_missingerrordoc():
|
||||||
indexhtml.delete()
|
indexhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty private buckets via s3website return page for /, where page is private, missing errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_private_index_missingerrordoc():
|
def test_website_private_bucket_list_private_index_missingerrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -487,12 +428,8 @@ def test_website_private_bucket_list_private_index_missingerrordoc():
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
# ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but not accessible
|
# ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but not accessible
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='empty private buckets via s3website return a 403 for /, blocked errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_empty_blockederrordoc():
|
def test_website_private_bucket_list_empty_blockederrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -509,17 +446,61 @@ def test_website_private_bucket_list_empty_blockederrordoc():
|
||||||
body = res.read()
|
body = res.read()
|
||||||
print(body)
|
print(body)
|
||||||
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
|
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
|
||||||
ok(errorstring not in body, 'error content should NOT match error.html set content')
|
errorstring = bytes(errorstring, 'utf-8')
|
||||||
|
assert errorstring not in body, 'error content should NOT match error.html set content'
|
||||||
|
|
||||||
errorhtml.delete()
|
errorhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
def test_website_public_bucket_list_pubilc_errordoc():
|
||||||
@attr(assertion='empty public buckets via s3website return a 404 for /, blocked errordoc')
|
bucket = get_new_bucket()
|
||||||
@attr('s3website')
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
bucket.make_public()
|
||||||
|
errorhtml = bucket.new_key(f['ErrorDocument_Key'])
|
||||||
|
errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
|
||||||
|
errorhtml.set_contents_from_string(errorstring)
|
||||||
|
errorhtml.set_canned_acl('public-read')
|
||||||
|
|
||||||
|
url = get_website_url(proto='http', bucket=bucket.name, path='')
|
||||||
|
o = urlparse(url)
|
||||||
|
host = o.hostname
|
||||||
|
port = s3.main.port
|
||||||
|
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
sock.connect((host, port))
|
||||||
|
|
||||||
|
request = "GET / HTTP/1.1\r\nHost:%s.%s:%s\r\n\r\n" % (bucket.name, host, port)
|
||||||
|
sock.send(request.encode())
|
||||||
|
|
||||||
|
#receive header
|
||||||
|
resp = sock.recv(4096)
|
||||||
|
print(resp)
|
||||||
|
|
||||||
|
#receive body
|
||||||
|
resp = sock.recv(4096)
|
||||||
|
print('payload length=%d' % len(resp))
|
||||||
|
print(resp)
|
||||||
|
|
||||||
|
#check if any additional payload is left
|
||||||
|
resp_len = 0
|
||||||
|
sock.settimeout(2)
|
||||||
|
try:
|
||||||
|
resp = sock.recv(4096)
|
||||||
|
resp_len = len(resp)
|
||||||
|
print('invalid payload length=%d' % resp_len)
|
||||||
|
print(resp)
|
||||||
|
except socket.timeout:
|
||||||
|
print('no invalid payload')
|
||||||
|
|
||||||
|
assert resp_len == 0, 'invalid payload'
|
||||||
|
|
||||||
|
errorhtml.delete()
|
||||||
|
bucket.delete()
|
||||||
|
|
||||||
|
@pytest.mark.s3website
|
||||||
|
@pytest.mark.fails_on_dbstore
|
||||||
def test_website_public_bucket_list_empty_blockederrordoc():
|
def test_website_public_bucket_list_empty_blockederrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -535,17 +516,14 @@ def test_website_public_bucket_list_empty_blockederrordoc():
|
||||||
body = res.read()
|
body = res.read()
|
||||||
print(body)
|
print(body)
|
||||||
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'), body=body)
|
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'), body=body)
|
||||||
ok(errorstring not in body, 'error content should match error.html set content')
|
errorstring = bytes(errorstring, 'utf-8')
|
||||||
|
assert errorstring not in body, 'error content should match error.html set content'
|
||||||
|
|
||||||
errorhtml.delete()
|
errorhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty public buckets via s3website return page for /, where page is private, blocked errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_public_bucket_list_private_index_blockederrordoc():
|
def test_website_public_bucket_list_private_index_blockederrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -566,18 +544,15 @@ def test_website_public_bucket_list_private_index_blockederrordoc():
|
||||||
body = res.read()
|
body = res.read()
|
||||||
print(body)
|
print(body)
|
||||||
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
|
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
|
||||||
ok(errorstring not in body, 'error content should match error.html set content')
|
errorstring = bytes(errorstring, 'utf-8')
|
||||||
|
assert errorstring not in body, 'error content should match error.html set content'
|
||||||
|
|
||||||
indexhtml.delete()
|
indexhtml.delete()
|
||||||
errorhtml.delete()
|
errorhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty private buckets via s3website return page for /, where page is private, blocked errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_private_index_blockederrordoc():
|
def test_website_private_bucket_list_private_index_blockederrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -598,19 +573,16 @@ def test_website_private_bucket_list_private_index_blockederrordoc():
|
||||||
body = res.read()
|
body = res.read()
|
||||||
print(body)
|
print(body)
|
||||||
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
|
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
|
||||||
ok(errorstring not in body, 'error content should match error.html set content')
|
errorstring = bytes(errorstring, 'utf-8')
|
||||||
|
assert errorstring not in body, 'error content should match error.html set content'
|
||||||
|
|
||||||
indexhtml.delete()
|
indexhtml.delete()
|
||||||
errorhtml.delete()
|
errorhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
# ---------- IndexDocument & ErrorDocument, failures with errordoc available
|
# ---------- IndexDocument & ErrorDocument, failures with errordoc available
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='empty private buckets via s3website return a 403 for /, good errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_empty_gooderrordoc():
|
def test_website_private_bucket_list_empty_gooderrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -628,12 +600,8 @@ def test_website_private_bucket_list_empty_gooderrordoc():
|
||||||
errorhtml.delete()
|
errorhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='empty public buckets via s3website return a 404 for /, good errordoc')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_public_bucket_list_empty_gooderrordoc():
|
def test_website_public_bucket_list_empty_gooderrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -652,12 +620,8 @@ def test_website_public_bucket_list_empty_gooderrordoc():
|
||||||
errorhtml.delete()
|
errorhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty public buckets via s3website return page for /, where page is private')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_public_bucket_list_private_index_gooderrordoc():
|
def test_website_public_bucket_list_private_index_gooderrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -681,12 +645,8 @@ def test_website_public_bucket_list_private_index_gooderrordoc():
|
||||||
errorhtml.delete()
|
errorhtml.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='non-empty private buckets via s3website return page for /, where page is private')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_private_bucket_list_private_index_gooderrordoc():
|
def test_website_private_bucket_list_private_index_gooderrordoc():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
|
||||||
|
@ -711,12 +671,8 @@ def test_website_private_bucket_list_private_index_gooderrordoc():
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
# ------ RedirectAll tests
|
# ------ RedirectAll tests
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='RedirectAllRequestsTo without protocol should TODO')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_bucket_private_redirectall_base():
|
def test_website_bucket_private_redirectall_base():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
|
||||||
|
@ -728,12 +684,8 @@ def test_website_bucket_private_redirectall_base():
|
||||||
|
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='RedirectAllRequestsTo without protocol should TODO')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_bucket_private_redirectall_path():
|
def test_website_bucket_private_redirectall_path():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
|
||||||
|
@ -747,12 +699,8 @@ def test_website_bucket_private_redirectall_path():
|
||||||
|
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(operation='list')
|
|
||||||
@attr(assertion='RedirectAllRequestsTo without protocol should TODO')
|
|
||||||
@attr('s3website')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_bucket_private_redirectall_path_upgrade():
|
def test_website_bucket_private_redirectall_path_upgrade():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
x = string.Template(WEBSITE_CONFIGS_XMLFRAG['RedirectAll+Protocol']).safe_substitute(RedirectAllRequestsTo_Protocol='https')
|
x = string.Template(WEBSITE_CONFIGS_XMLFRAG['RedirectAll+Protocol']).safe_substitute(RedirectAllRequestsTo_Protocol='https')
|
||||||
|
@ -768,13 +716,9 @@ def test_website_bucket_private_redirectall_path_upgrade():
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
# ------ x-amz redirect tests
|
# ------ x-amz redirect tests
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.s3website_redirect_location
|
||||||
@attr(operation='list')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(assertion='x-amz-website-redirect-location should not fire without websiteconf')
|
|
||||||
@attr('s3website')
|
|
||||||
@attr('x-amz-website-redirect-location')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_xredirect_nonwebsite():
|
def test_website_xredirect_nonwebsite():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
#f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
|
#f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
|
||||||
|
@ -786,7 +730,7 @@ def test_website_xredirect_nonwebsite():
|
||||||
headers = {'x-amz-website-redirect-location': redirect_dest}
|
headers = {'x-amz-website-redirect-location': redirect_dest}
|
||||||
k.set_contents_from_string(content, headers=headers, policy='public-read')
|
k.set_contents_from_string(content, headers=headers, policy='public-read')
|
||||||
redirect = k.get_redirect()
|
redirect = k.get_redirect()
|
||||||
eq(k.get_redirect(), redirect_dest)
|
assert k.get_redirect() == redirect_dest
|
||||||
|
|
||||||
res = _website_request(bucket.name, '/page')
|
res = _website_request(bucket.name, '/page')
|
||||||
body = res.read()
|
body = res.read()
|
||||||
|
@ -800,13 +744,9 @@ def test_website_xredirect_nonwebsite():
|
||||||
k.delete()
|
k.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.s3website_redirect_location
|
||||||
@attr(operation='list')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(assertion='x-amz-website-redirect-location should fire websiteconf, relative path, public key')
|
|
||||||
@attr('s3website')
|
|
||||||
@attr('x-amz-website-redirect-location')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_xredirect_public_relative():
|
def test_website_xredirect_public_relative():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -818,7 +758,7 @@ def test_website_xredirect_public_relative():
|
||||||
headers = {'x-amz-website-redirect-location': redirect_dest}
|
headers = {'x-amz-website-redirect-location': redirect_dest}
|
||||||
k.set_contents_from_string(content, headers=headers, policy='public-read')
|
k.set_contents_from_string(content, headers=headers, policy='public-read')
|
||||||
redirect = k.get_redirect()
|
redirect = k.get_redirect()
|
||||||
eq(k.get_redirect(), redirect_dest)
|
assert k.get_redirect() == redirect_dest
|
||||||
|
|
||||||
res = _website_request(bucket.name, '/page')
|
res = _website_request(bucket.name, '/page')
|
||||||
#new_url = get_website_url(bucket_name=bucket.name, path=redirect_dest)
|
#new_url = get_website_url(bucket_name=bucket.name, path=redirect_dest)
|
||||||
|
@ -827,13 +767,9 @@ def test_website_xredirect_public_relative():
|
||||||
k.delete()
|
k.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.s3website_redirect_location
|
||||||
@attr(operation='list')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(assertion='x-amz-website-redirect-location should fire websiteconf, absolute, public key')
|
|
||||||
@attr('s3website')
|
|
||||||
@attr('x-amz-website-redirect-location')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_xredirect_public_abs():
|
def test_website_xredirect_public_abs():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -845,7 +781,7 @@ def test_website_xredirect_public_abs():
|
||||||
headers = {'x-amz-website-redirect-location': redirect_dest}
|
headers = {'x-amz-website-redirect-location': redirect_dest}
|
||||||
k.set_contents_from_string(content, headers=headers, policy='public-read')
|
k.set_contents_from_string(content, headers=headers, policy='public-read')
|
||||||
redirect = k.get_redirect()
|
redirect = k.get_redirect()
|
||||||
eq(k.get_redirect(), redirect_dest)
|
assert k.get_redirect() == redirect_dest
|
||||||
|
|
||||||
res = _website_request(bucket.name, '/page')
|
res = _website_request(bucket.name, '/page')
|
||||||
new_url = get_website_url(proto='http', hostname='example.com', path='/foo')
|
new_url = get_website_url(proto='http', hostname='example.com', path='/foo')
|
||||||
|
@ -854,13 +790,9 @@ def test_website_xredirect_public_abs():
|
||||||
k.delete()
|
k.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.s3website_redirect_location
|
||||||
@attr(operation='list')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(assertion='x-amz-website-redirect-location should fire websiteconf, relative path, private key')
|
|
||||||
@attr('s3website')
|
|
||||||
@attr('x-amz-website-redirect-location')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_xredirect_private_relative():
|
def test_website_xredirect_private_relative():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -872,7 +804,7 @@ def test_website_xredirect_private_relative():
|
||||||
headers = {'x-amz-website-redirect-location': redirect_dest}
|
headers = {'x-amz-website-redirect-location': redirect_dest}
|
||||||
k.set_contents_from_string(content, headers=headers, policy='private')
|
k.set_contents_from_string(content, headers=headers, policy='private')
|
||||||
redirect = k.get_redirect()
|
redirect = k.get_redirect()
|
||||||
eq(k.get_redirect(), redirect_dest)
|
assert k.get_redirect() == redirect_dest
|
||||||
|
|
||||||
res = _website_request(bucket.name, '/page')
|
res = _website_request(bucket.name, '/page')
|
||||||
# We get a 403 because the page is private
|
# We get a 403 because the page is private
|
||||||
|
@ -881,13 +813,9 @@ def test_website_xredirect_private_relative():
|
||||||
k.delete()
|
k.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@pytest.mark.s3website
|
||||||
@attr(method='get')
|
@pytest.mark.s3website_redirect_location
|
||||||
@attr(operation='list')
|
@pytest.mark.fails_on_dbstore
|
||||||
@attr(assertion='x-amz-website-redirect-location should fire websiteconf, absolute, private key')
|
|
||||||
@attr('s3website')
|
|
||||||
@attr('x-amz-website-redirect-location')
|
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
|
||||||
def test_website_xredirect_private_abs():
|
def test_website_xredirect_private_abs():
|
||||||
bucket = get_new_bucket()
|
bucket = get_new_bucket()
|
||||||
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
|
||||||
|
@ -899,7 +827,7 @@ def test_website_xredirect_private_abs():
|
||||||
headers = {'x-amz-website-redirect-location': redirect_dest}
|
headers = {'x-amz-website-redirect-location': redirect_dest}
|
||||||
k.set_contents_from_string(content, headers=headers, policy='private')
|
k.set_contents_from_string(content, headers=headers, policy='private')
|
||||||
redirect = k.get_redirect()
|
redirect = k.get_redirect()
|
||||||
eq(k.get_redirect(), redirect_dest)
|
assert k.get_redirect() == redirect_dest
|
||||||
|
|
||||||
res = _website_request(bucket.name, '/page')
|
res = _website_request(bucket.name, '/page')
|
||||||
new_url = get_website_url(proto='http', hostname='example.com', path='/foo')
|
new_url = get_website_url(proto='http', hostname='example.com', path='/foo')
|
||||||
|
@ -1011,7 +939,7 @@ ROUTING_RULES = {
|
||||||
""",
|
""",
|
||||||
}
|
}
|
||||||
|
|
||||||
for k in ROUTING_RULES.keys():
|
for k in list(ROUTING_RULES.keys()):
|
||||||
if len(ROUTING_RULES[k]) > 0:
|
if len(ROUTING_RULES[k]) > 0:
|
||||||
ROUTING_RULES[k] = "<!-- %s -->\n%s" % (k, ROUTING_RULES[k])
|
ROUTING_RULES[k] = "<!-- %s -->\n%s" % (k, ROUTING_RULES[k])
|
||||||
|
|
||||||
|
@ -1112,8 +1040,6 @@ def routing_teardown(**kwargs):
|
||||||
print('Deleting', str(o))
|
print('Deleting', str(o))
|
||||||
o.delete()
|
o.delete()
|
||||||
|
|
||||||
@common.with_setup_kwargs(setup=routing_setup, teardown=routing_teardown)
|
|
||||||
#@timed(10)
|
|
||||||
def routing_check(*args, **kwargs):
|
def routing_check(*args, **kwargs):
|
||||||
bucket = kwargs['bucket']
|
bucket = kwargs['bucket']
|
||||||
args=args[0]
|
args=args[0]
|
||||||
|
@ -1139,8 +1065,8 @@ def routing_check(*args, **kwargs):
|
||||||
if args['code'] >= 200 and args['code'] < 300:
|
if args['code'] >= 200 and args['code'] < 300:
|
||||||
#body = res.read()
|
#body = res.read()
|
||||||
#print(body)
|
#print(body)
|
||||||
#eq(body, args['content'], 'default content should match index.html set content')
|
#assert body == args['content'], 'default content should match index.html set content'
|
||||||
ok(res.getheader('Content-Length', -1) > 0)
|
assert int(res.getheader('Content-Length', -1)) > 0
|
||||||
elif args['code'] >= 300 and args['code'] < 400:
|
elif args['code'] >= 300 and args['code'] < 400:
|
||||||
_website_expected_redirect_response(res, args['code'], IGNORE_FIELD, new_url)
|
_website_expected_redirect_response(res, args['code'], IGNORE_FIELD, new_url)
|
||||||
elif args['code'] >= 400:
|
elif args['code'] >= 400:
|
||||||
|
@ -1148,9 +1074,9 @@ def routing_check(*args, **kwargs):
|
||||||
else:
|
else:
|
||||||
assert(False)
|
assert(False)
|
||||||
|
|
||||||
@attr('s3website_RoutingRules')
|
@pytest.mark.s3website_routing_rules
|
||||||
@attr('s3website')
|
@pytest.mark.s3website
|
||||||
@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
|
@pytest.mark.fails_on_dbstore
|
||||||
def test_routing_generator():
|
def test_routing_generator():
|
||||||
for t in ROUTING_RULES_TESTS:
|
for t in ROUTING_RULES_TESTS:
|
||||||
if 'xml' in t and 'RoutingRules' in t['xml'] and len(t['xml']['RoutingRules']) > 0:
|
if 'xml' in t and 'RoutingRules' in t['xml'] and len(t['xml']['RoutingRules']) > 0:
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
from nose.tools import eq_ as eq
|
from . import utils
|
||||||
|
|
||||||
import utils
|
|
||||||
|
|
||||||
def test_generate():
|
def test_generate():
|
||||||
FIVE_MB = 5 * 1024 * 1024
|
FIVE_MB = 5 * 1024 * 1024
|
||||||
eq(len(''.join(utils.generate_random(0))), 0)
|
assert len(''.join(utils.generate_random(0))) == 0
|
||||||
eq(len(''.join(utils.generate_random(1))), 1)
|
assert len(''.join(utils.generate_random(1))) == 1
|
||||||
eq(len(''.join(utils.generate_random(FIVE_MB - 1))), FIVE_MB - 1)
|
assert len(''.join(utils.generate_random(FIVE_MB - 1))) == FIVE_MB - 1
|
||||||
eq(len(''.join(utils.generate_random(FIVE_MB))), FIVE_MB)
|
assert len(''.join(utils.generate_random(FIVE_MB))) == FIVE_MB
|
||||||
eq(len(''.join(utils.generate_random(FIVE_MB + 1))), FIVE_MB + 1)
|
assert len(''.join(utils.generate_random(FIVE_MB + 1))) == FIVE_MB + 1
|
||||||
|
|
|
@ -3,8 +3,6 @@ import requests
|
||||||
import string
|
import string
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from nose.tools import eq_ as eq
|
|
||||||
|
|
||||||
def assert_raises(excClass, callableObj, *args, **kwargs):
|
def assert_raises(excClass, callableObj, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Like unittest.TestCase.assertRaises, but returns the exception.
|
Like unittest.TestCase.assertRaises, but returns the exception.
|
||||||
|
@ -28,11 +26,11 @@ def generate_random(size, part_size=5*1024*1024):
|
||||||
chunk = 1024
|
chunk = 1024
|
||||||
allowed = string.ascii_letters
|
allowed = string.ascii_letters
|
||||||
for x in range(0, size, part_size):
|
for x in range(0, size, part_size):
|
||||||
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
|
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
|
||||||
s = ''
|
s = ''
|
||||||
left = size - x
|
left = size - x
|
||||||
this_part_size = min(left, part_size)
|
this_part_size = min(left, part_size)
|
||||||
for y in range(this_part_size / chunk):
|
for y in range(this_part_size // chunk):
|
||||||
s = s + strpart
|
s = s + strpart
|
||||||
s = s + strpart[:(this_part_size % chunk)]
|
s = s + strpart[:(this_part_size % chunk)]
|
||||||
yield s
|
yield s
|
||||||
|
@ -42,13 +40,22 @@ def generate_random(size, part_size=5*1024*1024):
|
||||||
# syncs all the regions except for the one passed in
|
# syncs all the regions except for the one passed in
|
||||||
def region_sync_meta(targets, region):
|
def region_sync_meta(targets, region):
|
||||||
|
|
||||||
for (k, r) in targets.iteritems():
|
for (k, r) in targets.items():
|
||||||
if r == region:
|
if r == region:
|
||||||
continue
|
continue
|
||||||
conf = r.conf
|
conf = r.conf
|
||||||
if conf.sync_agent_addr:
|
if conf.sync_agent_addr:
|
||||||
ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
|
ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
|
||||||
eq(ret.status_code, 200)
|
assert ret.status_code == 200
|
||||||
if conf.sync_meta_wait:
|
if conf.sync_meta_wait:
|
||||||
time.sleep(conf.sync_meta_wait)
|
time.sleep(conf.sync_meta_wait)
|
||||||
|
|
||||||
|
|
||||||
|
def get_grantee(policy, permission):
|
||||||
|
'''
|
||||||
|
Given an object/bucket policy, extract the grantee with the required permission
|
||||||
|
'''
|
||||||
|
|
||||||
|
for g in policy.acl.grants:
|
||||||
|
if g.permission == permission:
|
||||||
|
return g.id
|
||||||
|
|
|
@ -1,376 +0,0 @@
|
||||||
from boto.s3.connection import S3Connection
|
|
||||||
from boto.exception import BotoServerError
|
|
||||||
from boto.s3.key import Key
|
|
||||||
from httplib import BadStatusLine
|
|
||||||
from optparse import OptionParser
|
|
||||||
from .. import common
|
|
||||||
|
|
||||||
import traceback
|
|
||||||
import itertools
|
|
||||||
import random
|
|
||||||
import string
|
|
||||||
import struct
|
|
||||||
import yaml
|
|
||||||
import sys
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
class DecisionGraphError(Exception):
|
|
||||||
""" Raised when a node in a graph tries to set a header or
|
|
||||||
key that was previously set by another node
|
|
||||||
"""
|
|
||||||
def __init__(self, value):
|
|
||||||
self.value = value
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return repr(self.value)
|
|
||||||
|
|
||||||
|
|
||||||
class RecursionError(Exception):
|
|
||||||
"""Runaway recursion in string formatting"""
|
|
||||||
|
|
||||||
def __init__(self, msg):
|
|
||||||
self.msg = msg
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return '{0.__doc__}: {0.msg!r}'.format(self)
|
|
||||||
|
|
||||||
|
|
||||||
def assemble_decision(decision_graph, prng):
|
|
||||||
""" Take in a graph describing the possible decision space and a random
|
|
||||||
number generator and traverse the graph to build a decision
|
|
||||||
"""
|
|
||||||
return descend_graph(decision_graph, 'start', prng)
|
|
||||||
|
|
||||||
|
|
||||||
def descend_graph(decision_graph, node_name, prng):
|
|
||||||
""" Given a graph and a particular node in that graph, set the values in
|
|
||||||
the node's "set" list, pick a choice from the "choice" list, and
|
|
||||||
recurse. Finally, return dictionary of values
|
|
||||||
"""
|
|
||||||
node = decision_graph[node_name]
|
|
||||||
|
|
||||||
try:
|
|
||||||
choice = make_choice(node['choices'], prng)
|
|
||||||
if choice == '':
|
|
||||||
decision = {}
|
|
||||||
else:
|
|
||||||
decision = descend_graph(decision_graph, choice, prng)
|
|
||||||
except IndexError:
|
|
||||||
decision = {}
|
|
||||||
|
|
||||||
for key, choices in node['set'].iteritems():
|
|
||||||
if key in decision:
|
|
||||||
raise DecisionGraphError("Node %s tried to set '%s', but that key was already set by a lower node!" %(node_name, key))
|
|
||||||
decision[key] = make_choice(choices, prng)
|
|
||||||
|
|
||||||
if 'headers' in node:
|
|
||||||
decision.setdefault('headers', [])
|
|
||||||
|
|
||||||
for desc in node['headers']:
|
|
||||||
try:
|
|
||||||
(repetition_range, header, value) = desc
|
|
||||||
except ValueError:
|
|
||||||
(header, value) = desc
|
|
||||||
repetition_range = '1'
|
|
||||||
|
|
||||||
try:
|
|
||||||
size_min, size_max = repetition_range.split('-', 1)
|
|
||||||
except ValueError:
|
|
||||||
size_min = size_max = repetition_range
|
|
||||||
|
|
||||||
size_min = int(size_min)
|
|
||||||
size_max = int(size_max)
|
|
||||||
|
|
||||||
num_reps = prng.randint(size_min, size_max)
|
|
||||||
if header in [h for h, v in decision['headers']]:
|
|
||||||
raise DecisionGraphError("Node %s tried to add header '%s', but that header already exists!" %(node_name, header))
|
|
||||||
for _ in xrange(num_reps):
|
|
||||||
decision['headers'].append([header, value])
|
|
||||||
|
|
||||||
return decision
|
|
||||||
|
|
||||||
|
|
||||||
def make_choice(choices, prng):
|
|
||||||
""" Given a list of (possibly weighted) options or just a single option!,
|
|
||||||
choose one of the options taking weights into account and return the
|
|
||||||
choice
|
|
||||||
"""
|
|
||||||
if isinstance(choices, str):
|
|
||||||
return choices
|
|
||||||
weighted_choices = []
|
|
||||||
for option in choices:
|
|
||||||
if option is None:
|
|
||||||
weighted_choices.append('')
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
(weight, value) = option.split(None, 1)
|
|
||||||
weight = int(weight)
|
|
||||||
except ValueError:
|
|
||||||
weight = 1
|
|
||||||
value = option
|
|
||||||
|
|
||||||
if value == 'null' or value == 'None':
|
|
||||||
value = ''
|
|
||||||
|
|
||||||
for _ in xrange(weight):
|
|
||||||
weighted_choices.append(value)
|
|
||||||
|
|
||||||
return prng.choice(weighted_choices)
|
|
||||||
|
|
||||||
|
|
||||||
def expand_headers(decision, prng):
|
|
||||||
expanded_headers = {}
|
|
||||||
for header in decision['headers']:
|
|
||||||
h = expand(decision, header[0], prng)
|
|
||||||
v = expand(decision, header[1], prng)
|
|
||||||
expanded_headers[h] = v
|
|
||||||
return expanded_headers
|
|
||||||
|
|
||||||
|
|
||||||
def expand(decision, value, prng):
|
|
||||||
c = itertools.count()
|
|
||||||
fmt = RepeatExpandingFormatter(prng)
|
|
||||||
new = fmt.vformat(value, [], decision)
|
|
||||||
return new
|
|
||||||
|
|
||||||
|
|
||||||
class RepeatExpandingFormatter(string.Formatter):
|
|
||||||
charsets = {
|
|
||||||
'printable_no_whitespace': string.printable.translate(None, string.whitespace),
|
|
||||||
'printable': string.printable,
|
|
||||||
'punctuation': string.punctuation,
|
|
||||||
'whitespace': string.whitespace,
|
|
||||||
'digits': string.digits
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, prng, _recursion=0):
|
|
||||||
super(RepeatExpandingFormatter, self).__init__()
|
|
||||||
# this class assumes it is always instantiated once per
|
|
||||||
# formatting; use that to detect runaway recursion
|
|
||||||
self.prng = prng
|
|
||||||
self._recursion = _recursion
|
|
||||||
|
|
||||||
def get_value(self, key, args, kwargs):
|
|
||||||
fields = key.split(None, 1)
|
|
||||||
fn = getattr(self, 'special_{name}'.format(name=fields[0]), None)
|
|
||||||
if fn is not None:
|
|
||||||
if len(fields) == 1:
|
|
||||||
fields.append('')
|
|
||||||
return fn(fields[1])
|
|
||||||
|
|
||||||
val = super(RepeatExpandingFormatter, self).get_value(key, args, kwargs)
|
|
||||||
if self._recursion > 5:
|
|
||||||
raise RecursionError(key)
|
|
||||||
fmt = self.__class__(self.prng, _recursion=self._recursion+1)
|
|
||||||
|
|
||||||
n = fmt.vformat(val, args, kwargs)
|
|
||||||
return n
|
|
||||||
|
|
||||||
def special_random(self, args):
|
|
||||||
arg_list = args.split()
|
|
||||||
try:
|
|
||||||
size_min, size_max = arg_list[0].split('-', 1)
|
|
||||||
except ValueError:
|
|
||||||
size_min = size_max = arg_list[0]
|
|
||||||
except IndexError:
|
|
||||||
size_min = '0'
|
|
||||||
size_max = '1000'
|
|
||||||
|
|
||||||
size_min = int(size_min)
|
|
||||||
size_max = int(size_max)
|
|
||||||
length = self.prng.randint(size_min, size_max)
|
|
||||||
|
|
||||||
try:
|
|
||||||
charset_arg = arg_list[1]
|
|
||||||
except IndexError:
|
|
||||||
charset_arg = 'printable'
|
|
||||||
|
|
||||||
if charset_arg == 'binary' or charset_arg == 'binary_no_whitespace':
|
|
||||||
num_bytes = length + 8
|
|
||||||
tmplist = [self.prng.getrandbits(64) for _ in xrange(num_bytes / 8)]
|
|
||||||
tmpstring = struct.pack((num_bytes / 8) * 'Q', *tmplist)
|
|
||||||
if charset_arg == 'binary_no_whitespace':
|
|
||||||
tmpstring = ''.join(c for c in tmpstring if c not in string.whitespace)
|
|
||||||
return tmpstring[0:length]
|
|
||||||
else:
|
|
||||||
charset = self.charsets[charset_arg]
|
|
||||||
return ''.join([self.prng.choice(charset) for _ in xrange(length)]) # Won't scale nicely
|
|
||||||
|
|
||||||
|
|
||||||
def parse_options():
|
|
||||||
parser = OptionParser()
|
|
||||||
parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
|
|
||||||
parser.add_option('--seed', dest='seed', type='int', help='initial seed for the random number generator')
|
|
||||||
parser.add_option('--seed-file', dest='seedfile', help='read seeds for specific requests from FILE', metavar='FILE')
|
|
||||||
parser.add_option('-n', dest='num_requests', type='int', help='issue NUM requests before stopping', metavar='NUM')
|
|
||||||
parser.add_option('-v', '--verbose', dest='verbose', action="store_true", help='turn on verbose output')
|
|
||||||
parser.add_option('-d', '--debug', dest='debug', action="store_true", help='turn on debugging (very verbose) output')
|
|
||||||
parser.add_option('--decision-graph', dest='graph_filename', help='file in which to find the request decision graph')
|
|
||||||
parser.add_option('--no-cleanup', dest='cleanup', action="store_false", help='turn off teardown so you can peruse the state of buckets after testing')
|
|
||||||
|
|
||||||
parser.set_defaults(num_requests=5)
|
|
||||||
parser.set_defaults(cleanup=True)
|
|
||||||
parser.set_defaults(graph_filename='request_decision_graph.yml')
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def randomlist(seed=None):
|
|
||||||
""" Returns an infinite generator of random numbers
|
|
||||||
"""
|
|
||||||
rng = random.Random(seed)
|
|
||||||
while True:
|
|
||||||
yield rng.randint(0,100000) #100,000 seeds is enough, right?
|
|
||||||
|
|
||||||
|
|
||||||
def populate_buckets(conn, alt):
|
|
||||||
""" Creates buckets and keys for fuzz testing and sets appropriate
|
|
||||||
permissions. Returns a dictionary of the bucket and key names.
|
|
||||||
"""
|
|
||||||
breadable = common.get_new_bucket(alt)
|
|
||||||
bwritable = common.get_new_bucket(alt)
|
|
||||||
bnonreadable = common.get_new_bucket(alt)
|
|
||||||
|
|
||||||
oreadable = Key(breadable)
|
|
||||||
owritable = Key(bwritable)
|
|
||||||
ononreadable = Key(breadable)
|
|
||||||
oreadable.set_contents_from_string('oreadable body')
|
|
||||||
owritable.set_contents_from_string('owritable body')
|
|
||||||
ononreadable.set_contents_from_string('ononreadable body')
|
|
||||||
|
|
||||||
breadable.set_acl('public-read')
|
|
||||||
bwritable.set_acl('public-read-write')
|
|
||||||
bnonreadable.set_acl('private')
|
|
||||||
oreadable.set_acl('public-read')
|
|
||||||
owritable.set_acl('public-read-write')
|
|
||||||
ononreadable.set_acl('private')
|
|
||||||
|
|
||||||
return dict(
|
|
||||||
bucket_readable=breadable.name,
|
|
||||||
bucket_writable=bwritable.name,
|
|
||||||
bucket_not_readable=bnonreadable.name,
|
|
||||||
bucket_not_writable=breadable.name,
|
|
||||||
object_readable=oreadable.key,
|
|
||||||
object_writable=owritable.key,
|
|
||||||
object_not_readable=ononreadable.key,
|
|
||||||
object_not_writable=oreadable.key,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _main():
|
|
||||||
""" The main script
|
|
||||||
"""
|
|
||||||
(options, args) = parse_options()
|
|
||||||
random.seed(options.seed if options.seed else None)
|
|
||||||
s3_connection = common.s3.main
|
|
||||||
alt_connection = common.s3.alt
|
|
||||||
|
|
||||||
if options.outfile:
|
|
||||||
OUT = open(options.outfile, 'w')
|
|
||||||
else:
|
|
||||||
OUT = sys.stderr
|
|
||||||
|
|
||||||
VERBOSE = DEBUG = open('/dev/null', 'w')
|
|
||||||
if options.verbose:
|
|
||||||
VERBOSE = OUT
|
|
||||||
if options.debug:
|
|
||||||
DEBUG = OUT
|
|
||||||
VERBOSE = OUT
|
|
||||||
|
|
||||||
request_seeds = None
|
|
||||||
if options.seedfile:
|
|
||||||
FH = open(options.seedfile, 'r')
|
|
||||||
request_seeds = [int(line) for line in FH if line != '\n']
|
|
||||||
print>>OUT, 'Seedfile: %s' %options.seedfile
|
|
||||||
print>>OUT, 'Number of requests: %d' %len(request_seeds)
|
|
||||||
else:
|
|
||||||
if options.seed:
|
|
||||||
print>>OUT, 'Initial Seed: %d' %options.seed
|
|
||||||
print>>OUT, 'Number of requests: %d' %options.num_requests
|
|
||||||
random_list = randomlist(options.seed)
|
|
||||||
request_seeds = itertools.islice(random_list, options.num_requests)
|
|
||||||
|
|
||||||
print>>OUT, 'Decision Graph: %s' %options.graph_filename
|
|
||||||
|
|
||||||
graph_file = open(options.graph_filename, 'r')
|
|
||||||
decision_graph = yaml.safe_load(graph_file)
|
|
||||||
|
|
||||||
constants = populate_buckets(s3_connection, alt_connection)
|
|
||||||
print>>VERBOSE, "Test Buckets/Objects:"
|
|
||||||
for key, value in constants.iteritems():
|
|
||||||
print>>VERBOSE, "\t%s: %s" %(key, value)
|
|
||||||
|
|
||||||
print>>OUT, "Begin Fuzzing..."
|
|
||||||
print>>VERBOSE, '='*80
|
|
||||||
for request_seed in request_seeds:
|
|
||||||
print>>VERBOSE, 'Seed is: %r' %request_seed
|
|
||||||
prng = random.Random(request_seed)
|
|
||||||
decision = assemble_decision(decision_graph, prng)
|
|
||||||
decision.update(constants)
|
|
||||||
|
|
||||||
method = expand(decision, decision['method'], prng)
|
|
||||||
path = expand(decision, decision['urlpath'], prng)
|
|
||||||
|
|
||||||
try:
|
|
||||||
body = expand(decision, decision['body'], prng)
|
|
||||||
except KeyError:
|
|
||||||
body = ''
|
|
||||||
|
|
||||||
try:
|
|
||||||
headers = expand_headers(decision, prng)
|
|
||||||
except KeyError:
|
|
||||||
headers = {}
|
|
||||||
|
|
||||||
print>>VERBOSE, "%r %r" %(method[:100], path[:100])
|
|
||||||
for h, v in headers.iteritems():
|
|
||||||
print>>VERBOSE, "%r: %r" %(h[:50], v[:50])
|
|
||||||
print>>VERBOSE, "%r\n" % body[:100]
|
|
||||||
|
|
||||||
print>>DEBUG, 'FULL REQUEST'
|
|
||||||
print>>DEBUG, 'Method: %r' %method
|
|
||||||
print>>DEBUG, 'Path: %r' %path
|
|
||||||
print>>DEBUG, 'Headers:'
|
|
||||||
for h, v in headers.iteritems():
|
|
||||||
print>>DEBUG, "\t%r: %r" %(h, v)
|
|
||||||
print>>DEBUG, 'Body: %r\n' %body
|
|
||||||
|
|
||||||
failed = False # Let's be optimistic, shall we?
|
|
||||||
try:
|
|
||||||
response = s3_connection.make_request(method, path, data=body, headers=headers, override_num_retries=1)
|
|
||||||
body = response.read()
|
|
||||||
except BotoServerError, e:
|
|
||||||
response = e
|
|
||||||
body = e.body
|
|
||||||
failed = True
|
|
||||||
except BadStatusLine, e:
|
|
||||||
print>>OUT, 'FAILED: failed to parse response (BadStatusLine); probably a NUL byte in your request?'
|
|
||||||
print>>VERBOSE, '='*80
|
|
||||||
continue
|
|
||||||
|
|
||||||
if failed:
|
|
||||||
print>>OUT, 'FAILED:'
|
|
||||||
OLD_VERBOSE = VERBOSE
|
|
||||||
OLD_DEBUG = DEBUG
|
|
||||||
VERBOSE = DEBUG = OUT
|
|
||||||
print>>VERBOSE, 'Seed was: %r' %request_seed
|
|
||||||
print>>VERBOSE, 'Response status code: %d %s' %(response.status, response.reason)
|
|
||||||
print>>DEBUG, 'Body:\n%s' %body
|
|
||||||
print>>VERBOSE, '='*80
|
|
||||||
if failed:
|
|
||||||
VERBOSE = OLD_VERBOSE
|
|
||||||
DEBUG = OLD_DEBUG
|
|
||||||
|
|
||||||
print>>OUT, '...done fuzzing'
|
|
||||||
|
|
||||||
if options.cleanup:
|
|
||||||
common.teardown()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
common.setup()
|
|
||||||
try:
|
|
||||||
_main()
|
|
||||||
except Exception as e:
|
|
||||||
traceback.print_exc()
|
|
||||||
common.teardown()
|
|
||||||
|
|
|
@ -1,403 +0,0 @@
|
||||||
"""
|
|
||||||
Unit-test suite for the S3 fuzzer
|
|
||||||
|
|
||||||
The fuzzer is a grammar-based random S3 operation generator
|
|
||||||
that produces random operation sequences in an effort to
|
|
||||||
crash the server. This unit-test suite does not test
|
|
||||||
S3 servers, but rather the fuzzer infrastructure.
|
|
||||||
|
|
||||||
It works by running the fuzzer off of a simple grammar,
|
|
||||||
and checking the producted requests to ensure that they
|
|
||||||
include the expected sorts of operations in the expected
|
|
||||||
proportions.
|
|
||||||
"""
|
|
||||||
import sys
|
|
||||||
import itertools
|
|
||||||
import nose
|
|
||||||
import random
|
|
||||||
import string
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from ..headers import *
|
|
||||||
|
|
||||||
from nose.tools import eq_ as eq
|
|
||||||
from nose.tools import assert_true
|
|
||||||
from nose.plugins.attrib import attr
|
|
||||||
|
|
||||||
from ...functional.utils import assert_raises
|
|
||||||
|
|
||||||
_decision_graph = {}
|
|
||||||
|
|
||||||
def check_access_denied(fn, *args, **kwargs):
|
|
||||||
e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs)
|
|
||||||
eq(e.status, 403)
|
|
||||||
eq(e.reason, 'Forbidden')
|
|
||||||
eq(e.error_code, 'AccessDenied')
|
|
||||||
|
|
||||||
|
|
||||||
def build_graph():
|
|
||||||
graph = {}
|
|
||||||
graph['start'] = {
|
|
||||||
'set': {},
|
|
||||||
'choices': ['node2']
|
|
||||||
}
|
|
||||||
graph['leaf'] = {
|
|
||||||
'set': {
|
|
||||||
'key1': 'value1',
|
|
||||||
'key2': 'value2'
|
|
||||||
},
|
|
||||||
'headers': [
|
|
||||||
['1-2', 'random-header-{random 5-10 printable}', '{random 20-30 punctuation}']
|
|
||||||
],
|
|
||||||
'choices': []
|
|
||||||
}
|
|
||||||
graph['node1'] = {
|
|
||||||
'set': {
|
|
||||||
'key3': 'value3',
|
|
||||||
'header_val': [
|
|
||||||
'3 h1',
|
|
||||||
'2 h2',
|
|
||||||
'h3'
|
|
||||||
]
|
|
||||||
},
|
|
||||||
'headers': [
|
|
||||||
['1-1', 'my-header', '{header_val}'],
|
|
||||||
],
|
|
||||||
'choices': ['leaf']
|
|
||||||
}
|
|
||||||
graph['node2'] = {
|
|
||||||
'set': {
|
|
||||||
'randkey': 'value-{random 10-15 printable}',
|
|
||||||
'path': '/{bucket_readable}',
|
|
||||||
'indirect_key1': '{key1}'
|
|
||||||
},
|
|
||||||
'choices': ['leaf']
|
|
||||||
}
|
|
||||||
graph['bad_node'] = {
|
|
||||||
'set': {
|
|
||||||
'key1': 'value1'
|
|
||||||
},
|
|
||||||
'choices': ['leaf']
|
|
||||||
}
|
|
||||||
graph['nonexistant_child_node'] = {
|
|
||||||
'set': {},
|
|
||||||
'choices': ['leafy_greens']
|
|
||||||
}
|
|
||||||
graph['weighted_node'] = {
|
|
||||||
'set': {
|
|
||||||
'k1': [
|
|
||||||
'foo',
|
|
||||||
'2 bar',
|
|
||||||
'1 baz'
|
|
||||||
]
|
|
||||||
},
|
|
||||||
'choices': [
|
|
||||||
'foo',
|
|
||||||
'2 bar',
|
|
||||||
'1 baz'
|
|
||||||
]
|
|
||||||
}
|
|
||||||
graph['null_choice_node'] = {
|
|
||||||
'set': {},
|
|
||||||
'choices': [None]
|
|
||||||
}
|
|
||||||
graph['repeated_headers_node'] = {
|
|
||||||
'set': {},
|
|
||||||
'headers': [
|
|
||||||
['1-2', 'random-header-{random 5-10 printable}', '{random 20-30 punctuation}']
|
|
||||||
],
|
|
||||||
'choices': ['leaf']
|
|
||||||
}
|
|
||||||
graph['weighted_null_choice_node'] = {
|
|
||||||
'set': {},
|
|
||||||
'choices': ['3 null']
|
|
||||||
}
|
|
||||||
return graph
|
|
||||||
|
|
||||||
|
|
||||||
#def test_foo():
|
|
||||||
#graph_file = open('request_decision_graph.yml', 'r')
|
|
||||||
#graph = yaml.safe_load(graph_file)
|
|
||||||
#eq(graph['bucket_put_simple']['set']['grantee'], 0)
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_graph():
|
|
||||||
graph_file = open('request_decision_graph.yml', 'r')
|
|
||||||
graph = yaml.safe_load(graph_file)
|
|
||||||
graph['start']
|
|
||||||
|
|
||||||
|
|
||||||
def test_descend_leaf_node():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = descend_graph(graph, 'leaf', prng)
|
|
||||||
|
|
||||||
eq(decision['key1'], 'value1')
|
|
||||||
eq(decision['key2'], 'value2')
|
|
||||||
e = assert_raises(KeyError, lambda x: decision[x], 'key3')
|
|
||||||
|
|
||||||
|
|
||||||
def test_descend_node():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = descend_graph(graph, 'node1', prng)
|
|
||||||
|
|
||||||
eq(decision['key1'], 'value1')
|
|
||||||
eq(decision['key2'], 'value2')
|
|
||||||
eq(decision['key3'], 'value3')
|
|
||||||
|
|
||||||
|
|
||||||
def test_descend_bad_node():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
assert_raises(DecisionGraphError, descend_graph, graph, 'bad_node', prng)
|
|
||||||
|
|
||||||
|
|
||||||
def test_descend_nonexistant_child():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
assert_raises(KeyError, descend_graph, graph, 'nonexistant_child_node', prng)
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_printable():
|
|
||||||
prng = random.Random(1)
|
|
||||||
got = expand({}, '{random 10-15 printable}', prng)
|
|
||||||
eq(got, '[/pNI$;92@')
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_binary():
|
|
||||||
prng = random.Random(1)
|
|
||||||
got = expand({}, '{random 10-15 binary}', prng)
|
|
||||||
eq(got, '\xdfj\xf1\xd80>a\xcd\xc4\xbb')
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_printable_no_whitespace():
|
|
||||||
prng = random.Random(1)
|
|
||||||
for _ in xrange(1000):
|
|
||||||
got = expand({}, '{random 500 printable_no_whitespace}', prng)
|
|
||||||
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace and x in string.printable for x in got]))
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_binary_no_whitespace():
|
|
||||||
prng = random.Random(1)
|
|
||||||
for _ in xrange(1000):
|
|
||||||
got = expand({}, '{random 500 binary_no_whitespace}', prng)
|
|
||||||
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace for x in got]))
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_no_args():
|
|
||||||
prng = random.Random(1)
|
|
||||||
for _ in xrange(1000):
|
|
||||||
got = expand({}, '{random}', prng)
|
|
||||||
assert_true(0 <= len(got) <= 1000)
|
|
||||||
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_no_charset():
|
|
||||||
prng = random.Random(1)
|
|
||||||
for _ in xrange(1000):
|
|
||||||
got = expand({}, '{random 10-30}', prng)
|
|
||||||
assert_true(10 <= len(got) <= 30)
|
|
||||||
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_exact_length():
|
|
||||||
prng = random.Random(1)
|
|
||||||
for _ in xrange(1000):
|
|
||||||
got = expand({}, '{random 10 digits}', prng)
|
|
||||||
assert_true(len(got) == 10)
|
|
||||||
assert_true(reduce(lambda x, y: x and y, [x in string.digits for x in got]))
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_bad_charset():
|
|
||||||
prng = random.Random(1)
|
|
||||||
assert_raises(KeyError, expand, {}, '{random 10-30 foo}', prng)
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_random_missing_length():
|
|
||||||
prng = random.Random(1)
|
|
||||||
assert_raises(ValueError, expand, {}, '{random printable}', prng)
|
|
||||||
|
|
||||||
|
|
||||||
def test_assemble_decision():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = assemble_decision(graph, prng)
|
|
||||||
|
|
||||||
eq(decision['key1'], 'value1')
|
|
||||||
eq(decision['key2'], 'value2')
|
|
||||||
eq(decision['randkey'], 'value-{random 10-15 printable}')
|
|
||||||
eq(decision['indirect_key1'], '{key1}')
|
|
||||||
eq(decision['path'], '/{bucket_readable}')
|
|
||||||
assert_raises(KeyError, lambda x: decision[x], 'key3')
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_escape():
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = dict(
|
|
||||||
foo='{{bar}}',
|
|
||||||
)
|
|
||||||
got = expand(decision, '{foo}', prng)
|
|
||||||
eq(got, '{bar}')
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_indirect():
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = dict(
|
|
||||||
foo='{bar}',
|
|
||||||
bar='quux',
|
|
||||||
)
|
|
||||||
got = expand(decision, '{foo}', prng)
|
|
||||||
eq(got, 'quux')
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_indirect_double():
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = dict(
|
|
||||||
foo='{bar}',
|
|
||||||
bar='{quux}',
|
|
||||||
quux='thud',
|
|
||||||
)
|
|
||||||
got = expand(decision, '{foo}', prng)
|
|
||||||
eq(got, 'thud')
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_recursive():
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = dict(
|
|
||||||
foo='{foo}',
|
|
||||||
)
|
|
||||||
e = assert_raises(RecursionError, expand, decision, '{foo}', prng)
|
|
||||||
eq(str(e), "Runaway recursion in string formatting: 'foo'")
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_recursive_mutual():
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = dict(
|
|
||||||
foo='{bar}',
|
|
||||||
bar='{foo}',
|
|
||||||
)
|
|
||||||
e = assert_raises(RecursionError, expand, decision, '{foo}', prng)
|
|
||||||
eq(str(e), "Runaway recursion in string formatting: 'foo'")
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_recursive_not_too_eager():
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = dict(
|
|
||||||
foo='bar',
|
|
||||||
)
|
|
||||||
got = expand(decision, 100*'{foo}', prng)
|
|
||||||
eq(got, 100*'bar')
|
|
||||||
|
|
||||||
|
|
||||||
def test_make_choice_unweighted_with_space():
|
|
||||||
prng = random.Random(1)
|
|
||||||
choice = make_choice(['foo bar'], prng)
|
|
||||||
eq(choice, 'foo bar')
|
|
||||||
|
|
||||||
def test_weighted_choices():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
|
|
||||||
choices_made = {}
|
|
||||||
for _ in xrange(1000):
|
|
||||||
choice = make_choice(graph['weighted_node']['choices'], prng)
|
|
||||||
if choices_made.has_key(choice):
|
|
||||||
choices_made[choice] += 1
|
|
||||||
else:
|
|
||||||
choices_made[choice] = 1
|
|
||||||
|
|
||||||
foo_percentage = choices_made['foo'] / 1000.0
|
|
||||||
bar_percentage = choices_made['bar'] / 1000.0
|
|
||||||
baz_percentage = choices_made['baz'] / 1000.0
|
|
||||||
nose.tools.assert_almost_equal(foo_percentage, 0.25, 1)
|
|
||||||
nose.tools.assert_almost_equal(bar_percentage, 0.50, 1)
|
|
||||||
nose.tools.assert_almost_equal(baz_percentage, 0.25, 1)
|
|
||||||
|
|
||||||
|
|
||||||
def test_null_choices():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
choice = make_choice(graph['null_choice_node']['choices'], prng)
|
|
||||||
|
|
||||||
eq(choice, '')
|
|
||||||
|
|
||||||
|
|
||||||
def test_weighted_null_choices():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
choice = make_choice(graph['weighted_null_choice_node']['choices'], prng)
|
|
||||||
|
|
||||||
eq(choice, '')
|
|
||||||
|
|
||||||
|
|
||||||
def test_null_child():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = descend_graph(graph, 'null_choice_node', prng)
|
|
||||||
|
|
||||||
eq(decision, {})
|
|
||||||
|
|
||||||
|
|
||||||
def test_weighted_set():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
|
|
||||||
choices_made = {}
|
|
||||||
for _ in xrange(1000):
|
|
||||||
choice = make_choice(graph['weighted_node']['set']['k1'], prng)
|
|
||||||
if choices_made.has_key(choice):
|
|
||||||
choices_made[choice] += 1
|
|
||||||
else:
|
|
||||||
choices_made[choice] = 1
|
|
||||||
|
|
||||||
foo_percentage = choices_made['foo'] / 1000.0
|
|
||||||
bar_percentage = choices_made['bar'] / 1000.0
|
|
||||||
baz_percentage = choices_made['baz'] / 1000.0
|
|
||||||
nose.tools.assert_almost_equal(foo_percentage, 0.25, 1)
|
|
||||||
nose.tools.assert_almost_equal(bar_percentage, 0.50, 1)
|
|
||||||
nose.tools.assert_almost_equal(baz_percentage, 0.25, 1)
|
|
||||||
|
|
||||||
|
|
||||||
def test_header_presence():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = descend_graph(graph, 'node1', prng)
|
|
||||||
|
|
||||||
c1 = itertools.count()
|
|
||||||
c2 = itertools.count()
|
|
||||||
for header, value in decision['headers']:
|
|
||||||
if header == 'my-header':
|
|
||||||
eq(value, '{header_val}')
|
|
||||||
assert_true(next(c1) < 1)
|
|
||||||
elif header == 'random-header-{random 5-10 printable}':
|
|
||||||
eq(value, '{random 20-30 punctuation}')
|
|
||||||
assert_true(next(c2) < 2)
|
|
||||||
else:
|
|
||||||
raise KeyError('unexpected header found: %s' % header)
|
|
||||||
|
|
||||||
assert_true(next(c1))
|
|
||||||
assert_true(next(c2))
|
|
||||||
|
|
||||||
|
|
||||||
def test_duplicate_header():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
assert_raises(DecisionGraphError, descend_graph, graph, 'repeated_headers_node', prng)
|
|
||||||
|
|
||||||
|
|
||||||
def test_expand_headers():
|
|
||||||
graph = build_graph()
|
|
||||||
prng = random.Random(1)
|
|
||||||
decision = descend_graph(graph, 'node1', prng)
|
|
||||||
expanded_headers = expand_headers(decision, prng)
|
|
||||||
|
|
||||||
for header, value in expanded_headers.iteritems():
|
|
||||||
if header == 'my-header':
|
|
||||||
assert_true(value in ['h1', 'h2', 'h3'])
|
|
||||||
elif header.startswith('random-header-'):
|
|
||||||
assert_true(20 <= len(value) <= 30)
|
|
||||||
assert_true(string.strip(value, RepeatExpandingFormatter.charsets['punctuation']) is '')
|
|
||||||
else:
|
|
||||||
raise DecisionGraphError('unexpected header found: "%s"' % header)
|
|
||||||
|
|
|
@ -1,117 +0,0 @@
|
||||||
from boto.s3.key import Key
|
|
||||||
from optparse import OptionParser
|
|
||||||
from . import realistic
|
|
||||||
import traceback
|
|
||||||
import random
|
|
||||||
from . import common
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
def parse_opts():
|
|
||||||
parser = OptionParser()
|
|
||||||
parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
|
|
||||||
parser.add_option('-b', '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET')
|
|
||||||
parser.add_option('--seed', dest='seed', help='optional seed for the random number generator')
|
|
||||||
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def get_random_files(quantity, mean, stddev, seed):
|
|
||||||
"""Create file-like objects with pseudorandom contents.
|
|
||||||
IN:
|
|
||||||
number of files to create
|
|
||||||
mean file size in bytes
|
|
||||||
standard deviation from mean file size
|
|
||||||
seed for PRNG
|
|
||||||
OUT:
|
|
||||||
list of file handles
|
|
||||||
"""
|
|
||||||
file_generator = realistic.files(mean, stddev, seed)
|
|
||||||
return [file_generator.next() for _ in xrange(quantity)]
|
|
||||||
|
|
||||||
|
|
||||||
def upload_objects(bucket, files, seed):
|
|
||||||
"""Upload a bunch of files to an S3 bucket
|
|
||||||
IN:
|
|
||||||
boto S3 bucket object
|
|
||||||
list of file handles to upload
|
|
||||||
seed for PRNG
|
|
||||||
OUT:
|
|
||||||
list of boto S3 key objects
|
|
||||||
"""
|
|
||||||
keys = []
|
|
||||||
name_generator = realistic.names(15, 4, seed=seed)
|
|
||||||
|
|
||||||
for fp in files:
|
|
||||||
print >> sys.stderr, 'sending file with size %dB' % fp.size
|
|
||||||
key = Key(bucket)
|
|
||||||
key.key = name_generator.next()
|
|
||||||
key.set_contents_from_file(fp, rewind=True)
|
|
||||||
key.set_acl('public-read')
|
|
||||||
keys.append(key)
|
|
||||||
|
|
||||||
return keys
|
|
||||||
|
|
||||||
|
|
||||||
def _main():
|
|
||||||
'''To run the static content load test, make sure you've bootstrapped your
|
|
||||||
test environment and set up your config.yaml file, then run the following:
|
|
||||||
S3TEST_CONF=config.yaml virtualenv/bin/s3tests-generate-objects.py --seed 1234
|
|
||||||
|
|
||||||
This creates a bucket with your S3 credentials (from config.yaml) and
|
|
||||||
fills it with garbage objects as described in the
|
|
||||||
file_generation.groups section of config.yaml. It writes a list of
|
|
||||||
URLS to those objects to the file listed in file_generation.url_file
|
|
||||||
in config.yaml.
|
|
||||||
|
|
||||||
Once you have objcts in your bucket, run the siege benchmarking program:
|
|
||||||
siege --rc ./siege.conf -r 5
|
|
||||||
|
|
||||||
This tells siege to read the ./siege.conf config file which tells it to
|
|
||||||
use the urls in ./urls.txt and log to ./siege.log. It hits each url in
|
|
||||||
urls.txt 5 times (-r flag).
|
|
||||||
|
|
||||||
Results are printed to the terminal and written in CSV format to
|
|
||||||
./siege.log
|
|
||||||
'''
|
|
||||||
(options, args) = parse_opts()
|
|
||||||
|
|
||||||
#SETUP
|
|
||||||
random.seed(options.seed if options.seed else None)
|
|
||||||
conn = common.s3.main
|
|
||||||
|
|
||||||
if options.outfile:
|
|
||||||
OUTFILE = open(options.outfile, 'w')
|
|
||||||
elif common.config.file_generation.url_file:
|
|
||||||
OUTFILE = open(common.config.file_generation.url_file, 'w')
|
|
||||||
else:
|
|
||||||
OUTFILE = sys.stdout
|
|
||||||
|
|
||||||
if options.bucket:
|
|
||||||
bucket = conn.create_bucket(options.bucket)
|
|
||||||
else:
|
|
||||||
bucket = common.get_new_bucket()
|
|
||||||
|
|
||||||
bucket.set_acl('public-read')
|
|
||||||
keys = []
|
|
||||||
print >> OUTFILE, 'bucket: %s' % bucket.name
|
|
||||||
print >> sys.stderr, 'setup complete, generating files'
|
|
||||||
for profile in common.config.file_generation.groups:
|
|
||||||
seed = random.random()
|
|
||||||
files = get_random_files(profile[0], profile[1], profile[2], seed)
|
|
||||||
keys += upload_objects(bucket, files, seed)
|
|
||||||
|
|
||||||
print >> sys.stderr, 'finished sending files. generating urls'
|
|
||||||
for key in keys:
|
|
||||||
print >> OUTFILE, key.generate_url(0, query_auth=False)
|
|
||||||
|
|
||||||
print >> sys.stderr, 'done'
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
common.setup()
|
|
||||||
try:
|
|
||||||
_main()
|
|
||||||
except Exception as e:
|
|
||||||
traceback.print_exc()
|
|
||||||
common.teardown()
|
|
|
@ -1,265 +0,0 @@
|
||||||
import gevent
|
|
||||||
import gevent.pool
|
|
||||||
import gevent.queue
|
|
||||||
import gevent.monkey; gevent.monkey.patch_all()
|
|
||||||
import itertools
|
|
||||||
import optparse
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
import random
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
import realistic
|
|
||||||
import common
|
|
||||||
|
|
||||||
NANOSECOND = int(1e9)
|
|
||||||
|
|
||||||
def reader(bucket, worker_id, file_names, queue, rand):
|
|
||||||
while True:
|
|
||||||
objname = rand.choice(file_names)
|
|
||||||
key = bucket.new_key(objname)
|
|
||||||
|
|
||||||
fp = realistic.FileValidator()
|
|
||||||
result = dict(
|
|
||||||
type='r',
|
|
||||||
bucket=bucket.name,
|
|
||||||
key=key.name,
|
|
||||||
worker=worker_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
try:
|
|
||||||
key.get_contents_to_file(fp._file)
|
|
||||||
except gevent.GreenletExit:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
# stop timer ASAP, even on errors
|
|
||||||
end = time.time()
|
|
||||||
result.update(
|
|
||||||
error=dict(
|
|
||||||
msg=str(e),
|
|
||||||
traceback=traceback.format_exc(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
# certain kinds of programmer errors make this a busy
|
|
||||||
# loop; let parent greenlet get some time too
|
|
||||||
time.sleep(0)
|
|
||||||
else:
|
|
||||||
end = time.time()
|
|
||||||
|
|
||||||
if not fp.valid():
|
|
||||||
m='md5sum check failed start={s} ({se}) end={e} size={sz} obj={o}'.format(s=time.ctime(start), se=start, e=end, sz=fp._file.tell(), o=objname)
|
|
||||||
result.update(
|
|
||||||
error=dict(
|
|
||||||
msg=m,
|
|
||||||
traceback=traceback.format_exc(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
print "ERROR:", m
|
|
||||||
else:
|
|
||||||
elapsed = end - start
|
|
||||||
result.update(
|
|
||||||
start=start,
|
|
||||||
duration=int(round(elapsed * NANOSECOND)),
|
|
||||||
)
|
|
||||||
queue.put(result)
|
|
||||||
|
|
||||||
def writer(bucket, worker_id, file_names, files, queue, rand):
|
|
||||||
while True:
|
|
||||||
fp = next(files)
|
|
||||||
fp.seek(0)
|
|
||||||
objname = rand.choice(file_names)
|
|
||||||
key = bucket.new_key(objname)
|
|
||||||
|
|
||||||
result = dict(
|
|
||||||
type='w',
|
|
||||||
bucket=bucket.name,
|
|
||||||
key=key.name,
|
|
||||||
worker=worker_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
try:
|
|
||||||
key.set_contents_from_file(fp)
|
|
||||||
except gevent.GreenletExit:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
# stop timer ASAP, even on errors
|
|
||||||
end = time.time()
|
|
||||||
result.update(
|
|
||||||
error=dict(
|
|
||||||
msg=str(e),
|
|
||||||
traceback=traceback.format_exc(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
# certain kinds of programmer errors make this a busy
|
|
||||||
# loop; let parent greenlet get some time too
|
|
||||||
time.sleep(0)
|
|
||||||
else:
|
|
||||||
end = time.time()
|
|
||||||
|
|
||||||
elapsed = end - start
|
|
||||||
result.update(
|
|
||||||
start=start,
|
|
||||||
duration=int(round(elapsed * NANOSECOND)),
|
|
||||||
)
|
|
||||||
|
|
||||||
queue.put(result)
|
|
||||||
|
|
||||||
def parse_options():
|
|
||||||
parser = optparse.OptionParser(
|
|
||||||
usage='%prog [OPTS] <CONFIG_YAML',
|
|
||||||
)
|
|
||||||
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
|
|
||||||
help="skip cleaning up all created buckets", default=True)
|
|
||||||
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
def write_file(bucket, file_name, fp):
|
|
||||||
"""
|
|
||||||
Write a single file to the bucket using the file_name.
|
|
||||||
This is used during the warmup to initialize the files.
|
|
||||||
"""
|
|
||||||
key = bucket.new_key(file_name)
|
|
||||||
key.set_contents_from_file(fp)
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# parse options
|
|
||||||
(options, args) = parse_options()
|
|
||||||
|
|
||||||
if os.isatty(sys.stdin.fileno()):
|
|
||||||
raise RuntimeError('Need configuration in stdin.')
|
|
||||||
config = common.read_config(sys.stdin)
|
|
||||||
conn = common.connect(config.s3)
|
|
||||||
bucket = None
|
|
||||||
|
|
||||||
try:
|
|
||||||
# setup
|
|
||||||
real_stdout = sys.stdout
|
|
||||||
sys.stdout = sys.stderr
|
|
||||||
|
|
||||||
# verify all required config items are present
|
|
||||||
if 'readwrite' not in config:
|
|
||||||
raise RuntimeError('readwrite section not found in config')
|
|
||||||
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
|
|
||||||
if item not in config.readwrite:
|
|
||||||
raise RuntimeError("Missing readwrite config item: {item}".format(item=item))
|
|
||||||
for item in ['num', 'size', 'stddev']:
|
|
||||||
if item not in config.readwrite.files:
|
|
||||||
raise RuntimeError("Missing readwrite config item: files.{item}".format(item=item))
|
|
||||||
|
|
||||||
seeds = dict(config.readwrite.get('random_seed', {}))
|
|
||||||
seeds.setdefault('main', random.randrange(2**32))
|
|
||||||
|
|
||||||
rand = random.Random(seeds['main'])
|
|
||||||
|
|
||||||
for name in ['names', 'contents', 'writer', 'reader']:
|
|
||||||
seeds.setdefault(name, rand.randrange(2**32))
|
|
||||||
|
|
||||||
print 'Using random seeds: {seeds}'.format(seeds=seeds)
|
|
||||||
|
|
||||||
# setup bucket and other objects
|
|
||||||
bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30)
|
|
||||||
bucket = conn.create_bucket(bucket_name)
|
|
||||||
print "Created bucket: {name}".format(name=bucket.name)
|
|
||||||
|
|
||||||
# check flag for deterministic file name creation
|
|
||||||
if not config.readwrite.get('deterministic_file_names'):
|
|
||||||
print 'Creating random file names'
|
|
||||||
file_names = realistic.names(
|
|
||||||
mean=15,
|
|
||||||
stddev=4,
|
|
||||||
seed=seeds['names'],
|
|
||||||
)
|
|
||||||
file_names = itertools.islice(file_names, config.readwrite.files.num)
|
|
||||||
file_names = list(file_names)
|
|
||||||
else:
|
|
||||||
print 'Creating file names that are deterministic'
|
|
||||||
file_names = []
|
|
||||||
for x in xrange(config.readwrite.files.num):
|
|
||||||
file_names.append('test_file_{num}'.format(num=x))
|
|
||||||
|
|
||||||
files = realistic.files2(
|
|
||||||
mean=1024 * config.readwrite.files.size,
|
|
||||||
stddev=1024 * config.readwrite.files.stddev,
|
|
||||||
seed=seeds['contents'],
|
|
||||||
)
|
|
||||||
q = gevent.queue.Queue()
|
|
||||||
|
|
||||||
|
|
||||||
# warmup - get initial set of files uploaded if there are any writers specified
|
|
||||||
if config.readwrite.writers > 0:
|
|
||||||
print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
|
|
||||||
warmup_pool = gevent.pool.Pool(size=100)
|
|
||||||
for file_name in file_names:
|
|
||||||
fp = next(files)
|
|
||||||
warmup_pool.spawn(
|
|
||||||
write_file,
|
|
||||||
bucket=bucket,
|
|
||||||
file_name=file_name,
|
|
||||||
fp=fp,
|
|
||||||
)
|
|
||||||
warmup_pool.join()
|
|
||||||
|
|
||||||
# main work
|
|
||||||
print "Starting main worker loop."
|
|
||||||
print "Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev)
|
|
||||||
print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers)
|
|
||||||
group = gevent.pool.Group()
|
|
||||||
rand_writer = random.Random(seeds['writer'])
|
|
||||||
|
|
||||||
# Don't create random files if deterministic_files_names is set and true
|
|
||||||
if not config.readwrite.get('deterministic_file_names'):
|
|
||||||
for x in xrange(config.readwrite.writers):
|
|
||||||
this_rand = random.Random(rand_writer.randrange(2**32))
|
|
||||||
group.spawn(
|
|
||||||
writer,
|
|
||||||
bucket=bucket,
|
|
||||||
worker_id=x,
|
|
||||||
file_names=file_names,
|
|
||||||
files=files,
|
|
||||||
queue=q,
|
|
||||||
rand=this_rand,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Since the loop generating readers already uses config.readwrite.readers
|
|
||||||
# and the file names are already generated (randomly or deterministically),
|
|
||||||
# this loop needs no additional qualifiers. If zero readers are specified,
|
|
||||||
# it will behave as expected (no data is read)
|
|
||||||
rand_reader = random.Random(seeds['reader'])
|
|
||||||
for x in xrange(config.readwrite.readers):
|
|
||||||
this_rand = random.Random(rand_reader.randrange(2**32))
|
|
||||||
group.spawn(
|
|
||||||
reader,
|
|
||||||
bucket=bucket,
|
|
||||||
worker_id=x,
|
|
||||||
file_names=file_names,
|
|
||||||
queue=q,
|
|
||||||
rand=this_rand,
|
|
||||||
)
|
|
||||||
def stop():
|
|
||||||
group.kill(block=True)
|
|
||||||
q.put(StopIteration)
|
|
||||||
gevent.spawn_later(config.readwrite.duration, stop)
|
|
||||||
|
|
||||||
# wait for all the tests to finish
|
|
||||||
group.join()
|
|
||||||
print 'post-join, queue size {size}'.format(size=q.qsize())
|
|
||||||
|
|
||||||
if q.qsize() > 0:
|
|
||||||
for temp_dict in q:
|
|
||||||
if 'error' in temp_dict:
|
|
||||||
raise Exception('exception:\n\t{msg}\n\t{trace}'.format(
|
|
||||||
msg=temp_dict['error']['msg'],
|
|
||||||
trace=temp_dict['error']['traceback'])
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
yaml.safe_dump(temp_dict, stream=real_stdout)
|
|
||||||
|
|
||||||
finally:
|
|
||||||
# cleanup
|
|
||||||
if options.cleanup:
|
|
||||||
if bucket is not None:
|
|
||||||
common.nuke_bucket(bucket)
|
|
|
@ -1,281 +0,0 @@
|
||||||
import hashlib
|
|
||||||
import random
|
|
||||||
import string
|
|
||||||
import struct
|
|
||||||
import time
|
|
||||||
import math
|
|
||||||
import tempfile
|
|
||||||
import shutil
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
NANOSECOND = int(1e9)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_file_contents(size):
|
|
||||||
"""
|
|
||||||
A helper function to generate binary contents for a given size, and
|
|
||||||
calculates the md5 hash of the contents appending itself at the end of the
|
|
||||||
blob.
|
|
||||||
It uses sha1's hexdigest which is 40 chars long. So any binary generated
|
|
||||||
should remove the last 40 chars from the blob to retrieve the original hash
|
|
||||||
and binary so that validity can be proved.
|
|
||||||
"""
|
|
||||||
size = int(size)
|
|
||||||
contents = os.urandom(size)
|
|
||||||
content_hash = hashlib.sha1(contents).hexdigest()
|
|
||||||
return contents + content_hash
|
|
||||||
|
|
||||||
|
|
||||||
class FileValidator(object):
|
|
||||||
|
|
||||||
def __init__(self, f=None):
|
|
||||||
self._file = tempfile.SpooledTemporaryFile()
|
|
||||||
self.original_hash = None
|
|
||||||
self.new_hash = None
|
|
||||||
if f:
|
|
||||||
f.seek(0)
|
|
||||||
shutil.copyfileobj(f, self._file)
|
|
||||||
|
|
||||||
def valid(self):
|
|
||||||
"""
|
|
||||||
Returns True if this file looks valid. The file is valid if the end
|
|
||||||
of the file has the md5 digest for the first part of the file.
|
|
||||||
"""
|
|
||||||
self._file.seek(0)
|
|
||||||
contents = self._file.read()
|
|
||||||
self.original_hash, binary = contents[-40:], contents[:-40]
|
|
||||||
self.new_hash = hashlib.sha1(binary).hexdigest()
|
|
||||||
if not self.new_hash == self.original_hash:
|
|
||||||
print 'original hash: ', self.original_hash
|
|
||||||
print 'new hash: ', self.new_hash
|
|
||||||
print 'size: ', self._file.tell()
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
# XXX not sure if we need all of these
|
|
||||||
def seek(self, offset, whence=os.SEEK_SET):
|
|
||||||
self._file.seek(offset, whence)
|
|
||||||
|
|
||||||
def tell(self):
|
|
||||||
return self._file.tell()
|
|
||||||
|
|
||||||
def read(self, size=-1):
|
|
||||||
return self._file.read(size)
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
self._file.write(data)
|
|
||||||
self._file.seek(0)
|
|
||||||
|
|
||||||
|
|
||||||
class RandomContentFile(object):
|
|
||||||
def __init__(self, size, seed):
|
|
||||||
self.size = size
|
|
||||||
self.seed = seed
|
|
||||||
self.random = random.Random(self.seed)
|
|
||||||
|
|
||||||
# Boto likes to seek once more after it's done reading, so we need to save the last chunks/seek value.
|
|
||||||
self.last_chunks = self.chunks = None
|
|
||||||
self.last_seek = None
|
|
||||||
|
|
||||||
# Let seek initialize the rest of it, rather than dup code
|
|
||||||
self.seek(0)
|
|
||||||
|
|
||||||
def _mark_chunk(self):
|
|
||||||
self.chunks.append([self.offset, int(round((time.time() - self.last_seek) * NANOSECOND))])
|
|
||||||
|
|
||||||
def seek(self, offset, whence=os.SEEK_SET):
|
|
||||||
if whence == os.SEEK_SET:
|
|
||||||
self.offset = offset
|
|
||||||
elif whence == os.SEEK_END:
|
|
||||||
self.offset = self.size + offset;
|
|
||||||
elif whence == os.SEEK_CUR:
|
|
||||||
self.offset += offset
|
|
||||||
|
|
||||||
assert self.offset == 0
|
|
||||||
|
|
||||||
self.random.seed(self.seed)
|
|
||||||
self.buffer = ''
|
|
||||||
|
|
||||||
self.hash = hashlib.md5()
|
|
||||||
self.digest_size = self.hash.digest_size
|
|
||||||
self.digest = None
|
|
||||||
|
|
||||||
# Save the last seek time as our start time, and the last chunks
|
|
||||||
self.last_chunks = self.chunks
|
|
||||||
# Before emptying.
|
|
||||||
self.last_seek = time.time()
|
|
||||||
self.chunks = []
|
|
||||||
|
|
||||||
def tell(self):
|
|
||||||
return self.offset
|
|
||||||
|
|
||||||
def _generate(self):
|
|
||||||
# generate and return a chunk of pseudorandom data
|
|
||||||
size = min(self.size, 1*1024*1024) # generate at most 1 MB at a time
|
|
||||||
chunks = int(math.ceil(size/8.0)) # number of 8-byte chunks to create
|
|
||||||
|
|
||||||
l = [self.random.getrandbits(64) for _ in xrange(chunks)]
|
|
||||||
s = struct.pack(chunks*'Q', *l)
|
|
||||||
return s
|
|
||||||
|
|
||||||
def read(self, size=-1):
|
|
||||||
if size < 0:
|
|
||||||
size = self.size - self.offset
|
|
||||||
|
|
||||||
r = []
|
|
||||||
|
|
||||||
random_count = min(size, self.size - self.offset - self.digest_size)
|
|
||||||
if random_count > 0:
|
|
||||||
while len(self.buffer) < random_count:
|
|
||||||
self.buffer += self._generate()
|
|
||||||
self.offset += random_count
|
|
||||||
size -= random_count
|
|
||||||
data, self.buffer = self.buffer[:random_count], self.buffer[random_count:]
|
|
||||||
if self.hash is not None:
|
|
||||||
self.hash.update(data)
|
|
||||||
r.append(data)
|
|
||||||
|
|
||||||
digest_count = min(size, self.size - self.offset)
|
|
||||||
if digest_count > 0:
|
|
||||||
if self.digest is None:
|
|
||||||
self.digest = self.hash.digest()
|
|
||||||
self.hash = None
|
|
||||||
self.offset += digest_count
|
|
||||||
size -= digest_count
|
|
||||||
data = self.digest[:digest_count]
|
|
||||||
r.append(data)
|
|
||||||
|
|
||||||
self._mark_chunk()
|
|
||||||
|
|
||||||
return ''.join(r)
|
|
||||||
|
|
||||||
|
|
||||||
class PrecomputedContentFile(object):
|
|
||||||
def __init__(self, f):
|
|
||||||
self._file = tempfile.SpooledTemporaryFile()
|
|
||||||
f.seek(0)
|
|
||||||
shutil.copyfileobj(f, self._file)
|
|
||||||
|
|
||||||
self.last_chunks = self.chunks = None
|
|
||||||
self.seek(0)
|
|
||||||
|
|
||||||
def seek(self, offset, whence=os.SEEK_SET):
|
|
||||||
self._file.seek(offset, whence)
|
|
||||||
|
|
||||||
if self.tell() == 0:
|
|
||||||
# only reset the chunks when seeking to the beginning
|
|
||||||
self.last_chunks = self.chunks
|
|
||||||
self.last_seek = time.time()
|
|
||||||
self.chunks = []
|
|
||||||
|
|
||||||
def tell(self):
|
|
||||||
return self._file.tell()
|
|
||||||
|
|
||||||
def read(self, size=-1):
|
|
||||||
data = self._file.read(size)
|
|
||||||
self._mark_chunk()
|
|
||||||
return data
|
|
||||||
|
|
||||||
def _mark_chunk(self):
|
|
||||||
elapsed = time.time() - self.last_seek
|
|
||||||
elapsed_nsec = int(round(elapsed * NANOSECOND))
|
|
||||||
self.chunks.append([self.tell(), elapsed_nsec])
|
|
||||||
|
|
||||||
class FileVerifier(object):
|
|
||||||
def __init__(self):
|
|
||||||
self.size = 0
|
|
||||||
self.hash = hashlib.md5()
|
|
||||||
self.buf = ''
|
|
||||||
self.created_at = time.time()
|
|
||||||
self.chunks = []
|
|
||||||
|
|
||||||
def _mark_chunk(self):
|
|
||||||
self.chunks.append([self.size, int(round((time.time() - self.created_at) * NANOSECOND))])
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
self.size += len(data)
|
|
||||||
self.buf += data
|
|
||||||
digsz = -1*self.hash.digest_size
|
|
||||||
new_data, self.buf = self.buf[0:digsz], self.buf[digsz:]
|
|
||||||
self.hash.update(new_data)
|
|
||||||
self._mark_chunk()
|
|
||||||
|
|
||||||
def valid(self):
|
|
||||||
"""
|
|
||||||
Returns True if this file looks valid. The file is valid if the end
|
|
||||||
of the file has the md5 digest for the first part of the file.
|
|
||||||
"""
|
|
||||||
if self.size < self.hash.digest_size:
|
|
||||||
return self.hash.digest().startswith(self.buf)
|
|
||||||
|
|
||||||
return self.buf == self.hash.digest()
|
|
||||||
|
|
||||||
|
|
||||||
def files(mean, stddev, seed=None):
|
|
||||||
"""
|
|
||||||
Yields file-like objects with effectively random contents, where
|
|
||||||
the size of each file follows the normal distribution with `mean`
|
|
||||||
and `stddev`.
|
|
||||||
|
|
||||||
Beware, the file-likeness is very shallow. You can use boto's
|
|
||||||
`key.set_contents_from_file` to send these to S3, but they are not
|
|
||||||
full file objects.
|
|
||||||
|
|
||||||
The last 128 bits are the MD5 digest of the previous bytes, for
|
|
||||||
verifying round-trip data integrity. For example, if you
|
|
||||||
re-download the object and place the contents into a file called
|
|
||||||
``foo``, the following should print two identical lines:
|
|
||||||
|
|
||||||
python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo
|
|
||||||
|
|
||||||
Except for objects shorter than 16 bytes, where the second line
|
|
||||||
will be proportionally shorter.
|
|
||||||
"""
|
|
||||||
rand = random.Random(seed)
|
|
||||||
while True:
|
|
||||||
while True:
|
|
||||||
size = int(rand.normalvariate(mean, stddev))
|
|
||||||
if size >= 0:
|
|
||||||
break
|
|
||||||
yield RandomContentFile(size=size, seed=rand.getrandbits(32))
|
|
||||||
|
|
||||||
|
|
||||||
def files2(mean, stddev, seed=None, numfiles=10):
|
|
||||||
"""
|
|
||||||
Yields file objects with effectively random contents, where the
|
|
||||||
size of each file follows the normal distribution with `mean` and
|
|
||||||
`stddev`.
|
|
||||||
|
|
||||||
Rather than continuously generating new files, this pre-computes and
|
|
||||||
stores `numfiles` files and yields them in a loop.
|
|
||||||
"""
|
|
||||||
# pre-compute all the files (and save with TemporaryFiles)
|
|
||||||
fs = []
|
|
||||||
for _ in xrange(numfiles):
|
|
||||||
t = tempfile.SpooledTemporaryFile()
|
|
||||||
t.write(generate_file_contents(random.normalvariate(mean, stddev)))
|
|
||||||
t.seek(0)
|
|
||||||
fs.append(t)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
for f in fs:
|
|
||||||
yield f
|
|
||||||
|
|
||||||
|
|
||||||
def names(mean, stddev, charset=None, seed=None):
|
|
||||||
"""
|
|
||||||
Yields strings that are somewhat plausible as file names, where
|
|
||||||
the lenght of each filename follows the normal distribution with
|
|
||||||
`mean` and `stddev`.
|
|
||||||
"""
|
|
||||||
if charset is None:
|
|
||||||
charset = string.ascii_lowercase
|
|
||||||
rand = random.Random(seed)
|
|
||||||
while True:
|
|
||||||
while True:
|
|
||||||
length = int(rand.normalvariate(mean, stddev))
|
|
||||||
if length > 0:
|
|
||||||
break
|
|
||||||
name = ''.join(rand.choice(charset) for _ in xrange(length))
|
|
||||||
yield name
|
|
|
@ -1,219 +0,0 @@
|
||||||
import gevent
|
|
||||||
import gevent.pool
|
|
||||||
import gevent.queue
|
|
||||||
import gevent.monkey; gevent.monkey.patch_all()
|
|
||||||
import itertools
|
|
||||||
import optparse
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
import random
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
import realistic
|
|
||||||
import common
|
|
||||||
|
|
||||||
NANOSECOND = int(1e9)
|
|
||||||
|
|
||||||
def writer(bucket, objname, fp, queue):
|
|
||||||
key = bucket.new_key(objname)
|
|
||||||
|
|
||||||
result = dict(
|
|
||||||
type='w',
|
|
||||||
bucket=bucket.name,
|
|
||||||
key=key.name,
|
|
||||||
)
|
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
try:
|
|
||||||
key.set_contents_from_file(fp, rewind=True)
|
|
||||||
except gevent.GreenletExit:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
# stop timer ASAP, even on errors
|
|
||||||
end = time.time()
|
|
||||||
result.update(
|
|
||||||
error=dict(
|
|
||||||
msg=str(e),
|
|
||||||
traceback=traceback.format_exc(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
# certain kinds of programmer errors make this a busy
|
|
||||||
# loop; let parent greenlet get some time too
|
|
||||||
time.sleep(0)
|
|
||||||
else:
|
|
||||||
end = time.time()
|
|
||||||
|
|
||||||
elapsed = end - start
|
|
||||||
result.update(
|
|
||||||
start=start,
|
|
||||||
duration=int(round(elapsed * NANOSECOND)),
|
|
||||||
chunks=fp.last_chunks,
|
|
||||||
)
|
|
||||||
queue.put(result)
|
|
||||||
|
|
||||||
|
|
||||||
def reader(bucket, objname, queue):
|
|
||||||
key = bucket.new_key(objname)
|
|
||||||
|
|
||||||
fp = realistic.FileVerifier()
|
|
||||||
result = dict(
|
|
||||||
type='r',
|
|
||||||
bucket=bucket.name,
|
|
||||||
key=key.name,
|
|
||||||
)
|
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
try:
|
|
||||||
key.get_contents_to_file(fp)
|
|
||||||
except gevent.GreenletExit:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
# stop timer ASAP, even on errors
|
|
||||||
end = time.time()
|
|
||||||
result.update(
|
|
||||||
error=dict(
|
|
||||||
msg=str(e),
|
|
||||||
traceback=traceback.format_exc(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
# certain kinds of programmer errors make this a busy
|
|
||||||
# loop; let parent greenlet get some time too
|
|
||||||
time.sleep(0)
|
|
||||||
else:
|
|
||||||
end = time.time()
|
|
||||||
|
|
||||||
if not fp.valid():
|
|
||||||
result.update(
|
|
||||||
error=dict(
|
|
||||||
msg='md5sum check failed',
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
elapsed = end - start
|
|
||||||
result.update(
|
|
||||||
start=start,
|
|
||||||
duration=int(round(elapsed * NANOSECOND)),
|
|
||||||
chunks=fp.chunks,
|
|
||||||
)
|
|
||||||
queue.put(result)
|
|
||||||
|
|
||||||
def parse_options():
|
|
||||||
parser = optparse.OptionParser(
|
|
||||||
usage='%prog [OPTS] <CONFIG_YAML',
|
|
||||||
)
|
|
||||||
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
|
|
||||||
help="skip cleaning up all created buckets", default=True)
|
|
||||||
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# parse options
|
|
||||||
(options, args) = parse_options()
|
|
||||||
|
|
||||||
if os.isatty(sys.stdin.fileno()):
|
|
||||||
raise RuntimeError('Need configuration in stdin.')
|
|
||||||
config = common.read_config(sys.stdin)
|
|
||||||
conn = common.connect(config.s3)
|
|
||||||
bucket = None
|
|
||||||
|
|
||||||
try:
|
|
||||||
# setup
|
|
||||||
real_stdout = sys.stdout
|
|
||||||
sys.stdout = sys.stderr
|
|
||||||
|
|
||||||
# verify all required config items are present
|
|
||||||
if 'roundtrip' not in config:
|
|
||||||
raise RuntimeError('roundtrip section not found in config')
|
|
||||||
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
|
|
||||||
if item not in config.roundtrip:
|
|
||||||
raise RuntimeError("Missing roundtrip config item: {item}".format(item=item))
|
|
||||||
for item in ['num', 'size', 'stddev']:
|
|
||||||
if item not in config.roundtrip.files:
|
|
||||||
raise RuntimeError("Missing roundtrip config item: files.{item}".format(item=item))
|
|
||||||
|
|
||||||
seeds = dict(config.roundtrip.get('random_seed', {}))
|
|
||||||
seeds.setdefault('main', random.randrange(2**32))
|
|
||||||
|
|
||||||
rand = random.Random(seeds['main'])
|
|
||||||
|
|
||||||
for name in ['names', 'contents', 'writer', 'reader']:
|
|
||||||
seeds.setdefault(name, rand.randrange(2**32))
|
|
||||||
|
|
||||||
print 'Using random seeds: {seeds}'.format(seeds=seeds)
|
|
||||||
|
|
||||||
# setup bucket and other objects
|
|
||||||
bucket_name = common.choose_bucket_prefix(config.roundtrip.bucket, max_len=30)
|
|
||||||
bucket = conn.create_bucket(bucket_name)
|
|
||||||
print "Created bucket: {name}".format(name=bucket.name)
|
|
||||||
objnames = realistic.names(
|
|
||||||
mean=15,
|
|
||||||
stddev=4,
|
|
||||||
seed=seeds['names'],
|
|
||||||
)
|
|
||||||
objnames = itertools.islice(objnames, config.roundtrip.files.num)
|
|
||||||
objnames = list(objnames)
|
|
||||||
files = realistic.files(
|
|
||||||
mean=1024 * config.roundtrip.files.size,
|
|
||||||
stddev=1024 * config.roundtrip.files.stddev,
|
|
||||||
seed=seeds['contents'],
|
|
||||||
)
|
|
||||||
q = gevent.queue.Queue()
|
|
||||||
|
|
||||||
logger_g = gevent.spawn(yaml.safe_dump_all, q, stream=real_stdout)
|
|
||||||
|
|
||||||
print "Writing {num} objects with {w} workers...".format(
|
|
||||||
num=config.roundtrip.files.num,
|
|
||||||
w=config.roundtrip.writers,
|
|
||||||
)
|
|
||||||
pool = gevent.pool.Pool(size=config.roundtrip.writers)
|
|
||||||
start = time.time()
|
|
||||||
for objname in objnames:
|
|
||||||
fp = next(files)
|
|
||||||
pool.spawn(
|
|
||||||
writer,
|
|
||||||
bucket=bucket,
|
|
||||||
objname=objname,
|
|
||||||
fp=fp,
|
|
||||||
queue=q,
|
|
||||||
)
|
|
||||||
pool.join()
|
|
||||||
stop = time.time()
|
|
||||||
elapsed = stop - start
|
|
||||||
q.put(dict(
|
|
||||||
type='write_done',
|
|
||||||
duration=int(round(elapsed * NANOSECOND)),
|
|
||||||
))
|
|
||||||
|
|
||||||
print "Reading {num} objects with {w} workers...".format(
|
|
||||||
num=config.roundtrip.files.num,
|
|
||||||
w=config.roundtrip.readers,
|
|
||||||
)
|
|
||||||
# avoid accessing them in the same order as the writing
|
|
||||||
rand.shuffle(objnames)
|
|
||||||
pool = gevent.pool.Pool(size=config.roundtrip.readers)
|
|
||||||
start = time.time()
|
|
||||||
for objname in objnames:
|
|
||||||
pool.spawn(
|
|
||||||
reader,
|
|
||||||
bucket=bucket,
|
|
||||||
objname=objname,
|
|
||||||
queue=q,
|
|
||||||
)
|
|
||||||
pool.join()
|
|
||||||
stop = time.time()
|
|
||||||
elapsed = stop - start
|
|
||||||
q.put(dict(
|
|
||||||
type='read_done',
|
|
||||||
duration=int(round(elapsed * NANOSECOND)),
|
|
||||||
))
|
|
||||||
|
|
||||||
q.put(StopIteration)
|
|
||||||
logger_g.get()
|
|
||||||
|
|
||||||
finally:
|
|
||||||
# cleanup
|
|
||||||
if options.cleanup:
|
|
||||||
if bucket is not None:
|
|
||||||
common.nuke_bucket(bucket)
|
|
|
@ -1,79 +0,0 @@
|
||||||
from s3tests import realistic
|
|
||||||
import shutil
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
|
|
||||||
# XXX not used for now
|
|
||||||
def create_files(mean=2000):
|
|
||||||
return realistic.files2(
|
|
||||||
mean=1024 * mean,
|
|
||||||
stddev=1024 * 500,
|
|
||||||
seed=1256193726,
|
|
||||||
numfiles=4,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestFiles(object):
|
|
||||||
# the size and seed is what we can get when generating a bunch of files
|
|
||||||
# with pseudo random numbers based on sttdev, seed, and mean.
|
|
||||||
|
|
||||||
# this fails, demonstrating the (current) problem
|
|
||||||
#def test_random_file_invalid(self):
|
|
||||||
# size = 2506764
|
|
||||||
# seed = 3391518755
|
|
||||||
# source = realistic.RandomContentFile(size=size, seed=seed)
|
|
||||||
# t = tempfile.SpooledTemporaryFile()
|
|
||||||
# shutil.copyfileobj(source, t)
|
|
||||||
# precomputed = realistic.PrecomputedContentFile(t)
|
|
||||||
# assert precomputed.valid()
|
|
||||||
|
|
||||||
# verifier = realistic.FileVerifier()
|
|
||||||
# shutil.copyfileobj(precomputed, verifier)
|
|
||||||
|
|
||||||
# assert verifier.valid()
|
|
||||||
|
|
||||||
# this passes
|
|
||||||
def test_random_file_valid(self):
|
|
||||||
size = 2506001
|
|
||||||
seed = 3391518755
|
|
||||||
source = realistic.RandomContentFile(size=size, seed=seed)
|
|
||||||
t = tempfile.SpooledTemporaryFile()
|
|
||||||
shutil.copyfileobj(source, t)
|
|
||||||
precomputed = realistic.PrecomputedContentFile(t)
|
|
||||||
|
|
||||||
verifier = realistic.FileVerifier()
|
|
||||||
shutil.copyfileobj(precomputed, verifier)
|
|
||||||
|
|
||||||
assert verifier.valid()
|
|
||||||
|
|
||||||
|
|
||||||
# new implementation
|
|
||||||
class TestFileValidator(object):
|
|
||||||
|
|
||||||
def test_new_file_is_valid(self):
|
|
||||||
size = 2506001
|
|
||||||
contents = realistic.generate_file_contents(size)
|
|
||||||
t = tempfile.SpooledTemporaryFile()
|
|
||||||
t.write(contents)
|
|
||||||
t.seek(0)
|
|
||||||
fp = realistic.FileValidator(t)
|
|
||||||
assert fp.valid()
|
|
||||||
|
|
||||||
def test_new_file_is_valid_when_size_is_1(self):
|
|
||||||
size = 1
|
|
||||||
contents = realistic.generate_file_contents(size)
|
|
||||||
t = tempfile.SpooledTemporaryFile()
|
|
||||||
t.write(contents)
|
|
||||||
t.seek(0)
|
|
||||||
fp = realistic.FileValidator(t)
|
|
||||||
assert fp.valid()
|
|
||||||
|
|
||||||
def test_new_file_is_valid_on_several_calls(self):
|
|
||||||
size = 2506001
|
|
||||||
contents = realistic.generate_file_contents(size)
|
|
||||||
t = tempfile.SpooledTemporaryFile()
|
|
||||||
t.write(contents)
|
|
||||||
t.seek(0)
|
|
||||||
fp = realistic.FileValidator(t)
|
|
||||||
assert fp.valid()
|
|
||||||
assert fp.valid()
|
|
301
s3tests_boto3/common.py
Normal file
301
s3tests_boto3/common.py
Normal file
|
@ -0,0 +1,301 @@
|
||||||
|
import boto.s3.connection
|
||||||
|
import munch
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import yaml
|
||||||
|
import re
|
||||||
|
from lxml import etree
|
||||||
|
|
||||||
|
from doctest import Example
|
||||||
|
from lxml.doctestcompare import LXMLOutputChecker
|
||||||
|
|
||||||
|
s3 = munch.Munch()
|
||||||
|
config = munch.Munch()
|
||||||
|
prefix = ''
|
||||||
|
|
||||||
|
bucket_counter = itertools.count(1)
|
||||||
|
key_counter = itertools.count(1)
|
||||||
|
|
||||||
|
def choose_bucket_prefix(template, max_len=30):
|
||||||
|
"""
|
||||||
|
Choose a prefix for our test buckets, so they're easy to identify.
|
||||||
|
|
||||||
|
Use template and feed it more and more random filler, until it's
|
||||||
|
as long as possible but still below max_len.
|
||||||
|
"""
|
||||||
|
rand = ''.join(
|
||||||
|
random.choice(string.ascii_lowercase + string.digits)
|
||||||
|
for c in range(255)
|
||||||
|
)
|
||||||
|
|
||||||
|
while rand:
|
||||||
|
s = template.format(random=rand)
|
||||||
|
if len(s) <= max_len:
|
||||||
|
return s
|
||||||
|
rand = rand[:-1]
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
|
||||||
|
template=template,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def nuke_bucket(bucket):
|
||||||
|
try:
|
||||||
|
bucket.set_canned_acl('private')
|
||||||
|
# TODO: deleted_cnt and the while loop is a work around for rgw
|
||||||
|
# not sending the
|
||||||
|
deleted_cnt = 1
|
||||||
|
while deleted_cnt:
|
||||||
|
deleted_cnt = 0
|
||||||
|
for key in bucket.list():
|
||||||
|
print('Cleaning bucket {bucket} key {key}'.format(
|
||||||
|
bucket=bucket,
|
||||||
|
key=key,
|
||||||
|
))
|
||||||
|
key.set_canned_acl('private')
|
||||||
|
key.delete()
|
||||||
|
deleted_cnt += 1
|
||||||
|
bucket.delete()
|
||||||
|
except boto.exception.S3ResponseError as e:
|
||||||
|
# TODO workaround for buggy rgw that fails to send
|
||||||
|
# error_code, remove
|
||||||
|
if (e.status == 403
|
||||||
|
and e.error_code is None
|
||||||
|
and e.body == ''):
|
||||||
|
e.error_code = 'AccessDenied'
|
||||||
|
if e.error_code != 'AccessDenied':
|
||||||
|
print('GOT UNWANTED ERROR', e.error_code)
|
||||||
|
raise
|
||||||
|
# seems like we're not the owner of the bucket; ignore
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_prefixed_buckets():
|
||||||
|
for name, conn in list(s3.items()):
|
||||||
|
print('Cleaning buckets from connection {name}'.format(name=name))
|
||||||
|
for bucket in conn.get_all_buckets():
|
||||||
|
if bucket.name.startswith(prefix):
|
||||||
|
print('Cleaning bucket {bucket}'.format(bucket=bucket))
|
||||||
|
nuke_bucket(bucket)
|
||||||
|
|
||||||
|
print('Done with cleanup of test buckets.')
|
||||||
|
|
||||||
|
def read_config(fp):
|
||||||
|
config = munch.Munch()
|
||||||
|
g = yaml.safe_load_all(fp)
|
||||||
|
for new in g:
|
||||||
|
config.update(munch.Munchify(new))
|
||||||
|
return config
|
||||||
|
|
||||||
|
def connect(conf):
|
||||||
|
mapping = dict(
|
||||||
|
port='port',
|
||||||
|
host='host',
|
||||||
|
is_secure='is_secure',
|
||||||
|
access_key='aws_access_key_id',
|
||||||
|
secret_key='aws_secret_access_key',
|
||||||
|
)
|
||||||
|
kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
|
||||||
|
#process calling_format argument
|
||||||
|
calling_formats = dict(
|
||||||
|
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
|
||||||
|
subdomain=boto.s3.connection.SubdomainCallingFormat(),
|
||||||
|
vhost=boto.s3.connection.VHostCallingFormat(),
|
||||||
|
)
|
||||||
|
kwargs['calling_format'] = calling_formats['ordinary']
|
||||||
|
if 'calling_format' in conf:
|
||||||
|
raw_calling_format = conf['calling_format']
|
||||||
|
try:
|
||||||
|
kwargs['calling_format'] = calling_formats[raw_calling_format]
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(
|
||||||
|
'calling_format unknown: %r' % raw_calling_format
|
||||||
|
)
|
||||||
|
# TODO test vhost calling format
|
||||||
|
conn = boto.s3.connection.S3Connection(**kwargs)
|
||||||
|
return conn
|
||||||
|
|
||||||
|
def setup():
|
||||||
|
global s3, config, prefix
|
||||||
|
s3.clear()
|
||||||
|
config.clear()
|
||||||
|
|
||||||
|
try:
|
||||||
|
path = os.environ['S3TEST_CONF']
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(
|
||||||
|
'To run tests, point environment '
|
||||||
|
+ 'variable S3TEST_CONF to a config file.',
|
||||||
|
)
|
||||||
|
with file(path) as f:
|
||||||
|
config.update(read_config(f))
|
||||||
|
|
||||||
|
# These 3 should always be present.
|
||||||
|
if 's3' not in config:
|
||||||
|
raise RuntimeError('Your config file is missing the s3 section!')
|
||||||
|
if 'defaults' not in config.s3:
|
||||||
|
raise RuntimeError('Your config file is missing the s3.defaults section!')
|
||||||
|
if 'fixtures' not in config:
|
||||||
|
raise RuntimeError('Your config file is missing the fixtures section!')
|
||||||
|
|
||||||
|
template = config.fixtures.get('bucket prefix', 'test-{random}-')
|
||||||
|
prefix = choose_bucket_prefix(template=template)
|
||||||
|
if prefix == '':
|
||||||
|
raise RuntimeError("Empty Prefix! Aborting!")
|
||||||
|
|
||||||
|
defaults = config.s3.defaults
|
||||||
|
for section in list(config.s3.keys()):
|
||||||
|
if section == 'defaults':
|
||||||
|
continue
|
||||||
|
|
||||||
|
conf = {}
|
||||||
|
conf.update(defaults)
|
||||||
|
conf.update(config.s3[section])
|
||||||
|
conn = connect(conf)
|
||||||
|
s3[section] = conn
|
||||||
|
|
||||||
|
# WARNING! we actively delete all buckets we see with the prefix
|
||||||
|
# we've chosen! Choose your prefix with care, and don't reuse
|
||||||
|
# credentials!
|
||||||
|
|
||||||
|
# We also assume nobody else is going to use buckets with that
|
||||||
|
# prefix. This is racy but given enough randomness, should not
|
||||||
|
# really fail.
|
||||||
|
nuke_prefixed_buckets()
|
||||||
|
|
||||||
|
def get_new_bucket(connection=None):
|
||||||
|
"""
|
||||||
|
Get a bucket that exists and is empty.
|
||||||
|
|
||||||
|
Always recreates a bucket from scratch. This is useful to also
|
||||||
|
reset ACLs and such.
|
||||||
|
"""
|
||||||
|
if connection is None:
|
||||||
|
connection = s3.main
|
||||||
|
name = '{prefix}{num}'.format(
|
||||||
|
prefix=prefix,
|
||||||
|
num=next(bucket_counter),
|
||||||
|
)
|
||||||
|
# the only way for this to fail with a pre-existing bucket is if
|
||||||
|
# someone raced us between setup nuke_prefixed_buckets and here;
|
||||||
|
# ignore that as astronomically unlikely
|
||||||
|
bucket = connection.create_bucket(name)
|
||||||
|
return bucket
|
||||||
|
|
||||||
|
def teardown():
|
||||||
|
nuke_prefixed_buckets()
|
||||||
|
|
||||||
|
def with_setup_kwargs(setup, teardown=None):
|
||||||
|
"""Decorator to add setup and/or teardown methods to a test function::
|
||||||
|
|
||||||
|
@with_setup_args(setup, teardown)
|
||||||
|
def test_something():
|
||||||
|
" ... "
|
||||||
|
|
||||||
|
The setup function should return (kwargs) which will be passed to
|
||||||
|
test function, and teardown function.
|
||||||
|
|
||||||
|
Note that `with_setup_kwargs` is useful *only* for test functions, not for test
|
||||||
|
methods or inside of TestCase subclasses.
|
||||||
|
"""
|
||||||
|
def decorate(func):
|
||||||
|
kwargs = {}
|
||||||
|
|
||||||
|
def test_wrapped(*args, **kwargs2):
|
||||||
|
k2 = kwargs.copy()
|
||||||
|
k2.update(kwargs2)
|
||||||
|
k2['testname'] = func.__name__
|
||||||
|
func(*args, **k2)
|
||||||
|
|
||||||
|
test_wrapped.__name__ = func.__name__
|
||||||
|
|
||||||
|
def setup_wrapped():
|
||||||
|
k = setup()
|
||||||
|
kwargs.update(k)
|
||||||
|
if hasattr(func, 'setup'):
|
||||||
|
func.setup()
|
||||||
|
test_wrapped.setup = setup_wrapped
|
||||||
|
|
||||||
|
if teardown:
|
||||||
|
def teardown_wrapped():
|
||||||
|
if hasattr(func, 'teardown'):
|
||||||
|
func.teardown()
|
||||||
|
teardown(**kwargs)
|
||||||
|
|
||||||
|
test_wrapped.teardown = teardown_wrapped
|
||||||
|
else:
|
||||||
|
if hasattr(func, 'teardown'):
|
||||||
|
test_wrapped.teardown = func.teardown()
|
||||||
|
return test_wrapped
|
||||||
|
return decorate
|
||||||
|
|
||||||
|
# Demo case for the above, when you run test_gen():
|
||||||
|
# _test_gen will run twice,
|
||||||
|
# with the following stderr printing
|
||||||
|
# setup_func {'b': 2}
|
||||||
|
# testcase ('1',) {'b': 2, 'testname': '_test_gen'}
|
||||||
|
# teardown_func {'b': 2}
|
||||||
|
# setup_func {'b': 2}
|
||||||
|
# testcase () {'b': 2, 'testname': '_test_gen'}
|
||||||
|
# teardown_func {'b': 2}
|
||||||
|
#
|
||||||
|
#def setup_func():
|
||||||
|
# kwargs = {'b': 2}
|
||||||
|
# print("setup_func", kwargs, file=sys.stderr)
|
||||||
|
# return kwargs
|
||||||
|
#
|
||||||
|
#def teardown_func(**kwargs):
|
||||||
|
# print("teardown_func", kwargs, file=sys.stderr)
|
||||||
|
#
|
||||||
|
#@with_setup_kwargs(setup=setup_func, teardown=teardown_func)
|
||||||
|
#def _test_gen(*args, **kwargs):
|
||||||
|
# print("testcase", args, kwargs, file=sys.stderr)
|
||||||
|
#
|
||||||
|
#def test_gen():
|
||||||
|
# yield _test_gen, '1'
|
||||||
|
# yield _test_gen
|
||||||
|
|
||||||
|
def trim_xml(xml_str):
|
||||||
|
p = etree.XMLParser(remove_blank_text=True)
|
||||||
|
elem = etree.XML(xml_str, parser=p)
|
||||||
|
return etree.tostring(elem)
|
||||||
|
|
||||||
|
def normalize_xml(xml, pretty_print=True):
|
||||||
|
if xml is None:
|
||||||
|
return xml
|
||||||
|
|
||||||
|
root = etree.fromstring(xml.encode(encoding='ascii'))
|
||||||
|
|
||||||
|
for element in root.iter('*'):
|
||||||
|
if element.text is not None and not element.text.strip():
|
||||||
|
element.text = None
|
||||||
|
if element.text is not None:
|
||||||
|
element.text = element.text.strip().replace("\n", "").replace("\r", "")
|
||||||
|
if element.tail is not None and not element.tail.strip():
|
||||||
|
element.tail = None
|
||||||
|
if element.tail is not None:
|
||||||
|
element.tail = element.tail.strip().replace("\n", "").replace("\r", "")
|
||||||
|
|
||||||
|
# Sort the elements
|
||||||
|
for parent in root.xpath('//*[./*]'): # Search for parent elements
|
||||||
|
parent[:] = sorted(parent,key=lambda x: x.tag)
|
||||||
|
|
||||||
|
xmlstr = etree.tostring(root, encoding="utf-8", xml_declaration=True, pretty_print=pretty_print)
|
||||||
|
# there are two different DTD URIs
|
||||||
|
xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
|
||||||
|
xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)
|
||||||
|
for uri in ['http://doc.s3.amazonaws.com/doc/2006-03-01/', 'http://s3.amazonaws.com/doc/2006-03-01/']:
|
||||||
|
xmlstr = xmlstr.replace(uri, 'URI-DTD')
|
||||||
|
#xmlstr = re.sub(r'>\s+', '>', xmlstr, count=0, flags=re.MULTILINE)
|
||||||
|
return xmlstr
|
||||||
|
|
||||||
|
def assert_xml_equal(got, want):
|
||||||
|
assert want is not None, 'Wanted XML cannot be None'
|
||||||
|
if got is None:
|
||||||
|
raise AssertionError('Got input to validate was None')
|
||||||
|
checker = LXMLOutputChecker()
|
||||||
|
if not checker.check_output(want, got, 0):
|
||||||
|
message = checker.output_difference(Example("", want), got, 0)
|
||||||
|
raise AssertionError(message)
|
782
s3tests_boto3/functional/__init__.py
Normal file
782
s3tests_boto3/functional/__init__.py
Normal file
|
@ -0,0 +1,782 @@
|
||||||
|
import pytest
|
||||||
|
import boto3
|
||||||
|
from botocore import UNSIGNED
|
||||||
|
from botocore.client import Config
|
||||||
|
from botocore.exceptions import ClientError
|
||||||
|
from botocore.handlers import disable_signing
|
||||||
|
import configparser
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
import munch
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import itertools
|
||||||
|
import urllib3
|
||||||
|
import re
|
||||||
|
|
||||||
|
config = munch.Munch
|
||||||
|
|
||||||
|
# this will be assigned by setup()
|
||||||
|
prefix = None
|
||||||
|
|
||||||
|
def get_prefix():
|
||||||
|
assert prefix is not None
|
||||||
|
return prefix
|
||||||
|
|
||||||
|
def choose_bucket_prefix(template, max_len=30):
|
||||||
|
"""
|
||||||
|
Choose a prefix for our test buckets, so they're easy to identify.
|
||||||
|
|
||||||
|
Use template and feed it more and more random filler, until it's
|
||||||
|
as long as possible but still below max_len.
|
||||||
|
"""
|
||||||
|
rand = ''.join(
|
||||||
|
random.choice(string.ascii_lowercase + string.digits)
|
||||||
|
for c in range(255)
|
||||||
|
)
|
||||||
|
|
||||||
|
while rand:
|
||||||
|
s = template.format(random=rand)
|
||||||
|
if len(s) <= max_len:
|
||||||
|
return s
|
||||||
|
rand = rand[:-1]
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
|
||||||
|
template=template,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_buckets_list(client=None, prefix=None):
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
if prefix == None:
|
||||||
|
prefix = get_prefix()
|
||||||
|
response = client.list_buckets()
|
||||||
|
bucket_dicts = response['Buckets']
|
||||||
|
buckets_list = []
|
||||||
|
for bucket in bucket_dicts:
|
||||||
|
if prefix in bucket['Name']:
|
||||||
|
buckets_list.append(bucket['Name'])
|
||||||
|
|
||||||
|
return buckets_list
|
||||||
|
|
||||||
|
def get_objects_list(bucket, client=None, prefix=None):
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
if prefix == None:
|
||||||
|
response = client.list_objects(Bucket=bucket)
|
||||||
|
else:
|
||||||
|
response = client.list_objects(Bucket=bucket, Prefix=prefix)
|
||||||
|
objects_list = []
|
||||||
|
|
||||||
|
if 'Contents' in response:
|
||||||
|
contents = response['Contents']
|
||||||
|
for obj in contents:
|
||||||
|
objects_list.append(obj['Key'])
|
||||||
|
|
||||||
|
return objects_list
|
||||||
|
|
||||||
|
# generator function that returns object listings in batches, where each
|
||||||
|
# batch is a list of dicts compatible with delete_objects()
|
||||||
|
def list_versions(client, bucket, batch_size):
|
||||||
|
kwargs = {'Bucket': bucket, 'MaxKeys': batch_size}
|
||||||
|
truncated = True
|
||||||
|
while truncated:
|
||||||
|
listing = client.list_object_versions(**kwargs)
|
||||||
|
|
||||||
|
kwargs['KeyMarker'] = listing.get('NextKeyMarker')
|
||||||
|
kwargs['VersionIdMarker'] = listing.get('NextVersionIdMarker')
|
||||||
|
truncated = listing['IsTruncated']
|
||||||
|
|
||||||
|
objs = listing.get('Versions', []) + listing.get('DeleteMarkers', [])
|
||||||
|
if len(objs):
|
||||||
|
yield [{'Key': o['Key'], 'VersionId': o['VersionId']} for o in objs]
|
||||||
|
|
||||||
|
def nuke_bucket(client, bucket):
|
||||||
|
batch_size = 128
|
||||||
|
max_retain_date = None
|
||||||
|
|
||||||
|
# list and delete objects in batches
|
||||||
|
for objects in list_versions(client, bucket, batch_size):
|
||||||
|
delete = client.delete_objects(Bucket=bucket,
|
||||||
|
Delete={'Objects': objects, 'Quiet': True},
|
||||||
|
BypassGovernanceRetention=True)
|
||||||
|
|
||||||
|
# check for object locks on 403 AccessDenied errors
|
||||||
|
for err in delete.get('Errors', []):
|
||||||
|
if err.get('Code') != 'AccessDenied':
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
res = client.get_object_retention(Bucket=bucket,
|
||||||
|
Key=err['Key'], VersionId=err['VersionId'])
|
||||||
|
retain_date = res['Retention']['RetainUntilDate']
|
||||||
|
if not max_retain_date or max_retain_date < retain_date:
|
||||||
|
max_retain_date = retain_date
|
||||||
|
except ClientError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if max_retain_date:
|
||||||
|
# wait out the retention period (up to 60 seconds)
|
||||||
|
now = datetime.datetime.now(max_retain_date.tzinfo)
|
||||||
|
if max_retain_date > now:
|
||||||
|
delta = max_retain_date - now
|
||||||
|
if delta.total_seconds() > 60:
|
||||||
|
raise RuntimeError('bucket {} still has objects \
|
||||||
|
locked for {} more seconds, not waiting for \
|
||||||
|
bucket cleanup'.format(bucket, delta.total_seconds()))
|
||||||
|
print('nuke_bucket', bucket, 'waiting', delta.total_seconds(),
|
||||||
|
'seconds for object locks to expire')
|
||||||
|
time.sleep(delta.total_seconds())
|
||||||
|
|
||||||
|
for objects in list_versions(client, bucket, batch_size):
|
||||||
|
client.delete_objects(Bucket=bucket,
|
||||||
|
Delete={'Objects': objects, 'Quiet': True},
|
||||||
|
BypassGovernanceRetention=True)
|
||||||
|
|
||||||
|
client.delete_bucket(Bucket=bucket)
|
||||||
|
|
||||||
|
def nuke_prefixed_buckets(prefix, client=None):
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
buckets = get_buckets_list(client, prefix)
|
||||||
|
|
||||||
|
err = None
|
||||||
|
for bucket_name in buckets:
|
||||||
|
try:
|
||||||
|
nuke_bucket(client, bucket_name)
|
||||||
|
except Exception as e:
|
||||||
|
# The exception shouldn't be raised when doing cleanup. Pass and continue
|
||||||
|
# the bucket cleanup process. Otherwise left buckets wouldn't be cleared
|
||||||
|
# resulting in some kind of resource leak. err is used to hint user some
|
||||||
|
# exception once occurred.
|
||||||
|
err = e
|
||||||
|
pass
|
||||||
|
if err:
|
||||||
|
raise err
|
||||||
|
|
||||||
|
print('Done with cleanup of buckets in tests.')
|
||||||
|
|
||||||
|
def configured_storage_classes():
|
||||||
|
sc = ['STANDARD']
|
||||||
|
|
||||||
|
extra_sc = re.split(r"[\b\W\b]+", config.storage_classes)
|
||||||
|
|
||||||
|
for item in extra_sc:
|
||||||
|
if item != 'STANDARD':
|
||||||
|
sc.append(item)
|
||||||
|
|
||||||
|
sc = [i for i in sc if i]
|
||||||
|
print("storage classes configured: " + str(sc))
|
||||||
|
|
||||||
|
return sc
|
||||||
|
|
||||||
|
def configure():
|
||||||
|
cfg = configparser.RawConfigParser()
|
||||||
|
try:
|
||||||
|
path = os.environ['S3TEST_CONF']
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(
|
||||||
|
'To run tests, point environment '
|
||||||
|
+ 'variable S3TEST_CONF to a config file.',
|
||||||
|
)
|
||||||
|
cfg.read(path)
|
||||||
|
|
||||||
|
if not cfg.defaults():
|
||||||
|
raise RuntimeError('Your config file is missing the DEFAULT section!')
|
||||||
|
if not cfg.has_section("s3 main"):
|
||||||
|
raise RuntimeError('Your config file is missing the "s3 main" section!')
|
||||||
|
if not cfg.has_section("s3 alt"):
|
||||||
|
raise RuntimeError('Your config file is missing the "s3 alt" section!')
|
||||||
|
if not cfg.has_section("s3 tenant"):
|
||||||
|
raise RuntimeError('Your config file is missing the "s3 tenant" section!')
|
||||||
|
|
||||||
|
global prefix
|
||||||
|
|
||||||
|
defaults = cfg.defaults()
|
||||||
|
|
||||||
|
# vars from the DEFAULT section
|
||||||
|
config.default_host = defaults.get("host")
|
||||||
|
config.default_port = int(defaults.get("port"))
|
||||||
|
config.default_is_secure = cfg.getboolean('DEFAULT', "is_secure")
|
||||||
|
|
||||||
|
proto = 'https' if config.default_is_secure else 'http'
|
||||||
|
config.default_endpoint = "%s://%s:%d" % (proto, config.default_host, config.default_port)
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.default_ssl_verify = cfg.getboolean('DEFAULT', "ssl_verify")
|
||||||
|
except configparser.NoOptionError:
|
||||||
|
config.default_ssl_verify = False
|
||||||
|
|
||||||
|
# Disable InsecureRequestWarning reported by urllib3 when ssl_verify is False
|
||||||
|
if not config.default_ssl_verify:
|
||||||
|
urllib3.disable_warnings()
|
||||||
|
|
||||||
|
# vars from the main section
|
||||||
|
config.main_access_key = cfg.get('s3 main',"access_key")
|
||||||
|
config.main_secret_key = cfg.get('s3 main',"secret_key")
|
||||||
|
config.main_display_name = cfg.get('s3 main',"display_name")
|
||||||
|
config.main_user_id = cfg.get('s3 main',"user_id")
|
||||||
|
config.main_email = cfg.get('s3 main',"email")
|
||||||
|
try:
|
||||||
|
config.main_kms_keyid = cfg.get('s3 main',"kms_keyid")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.main_kms_keyid = 'testkey-1'
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.main_kms_keyid2 = cfg.get('s3 main',"kms_keyid2")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.main_kms_keyid2 = 'testkey-2'
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.main_api_name = cfg.get('s3 main',"api_name")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.main_api_name = ""
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.storage_classes = cfg.get('s3 main',"storage_classes")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.storage_classes = ""
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.lc_debug_interval = int(cfg.get('s3 main',"lc_debug_interval"))
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.lc_debug_interval = 10
|
||||||
|
|
||||||
|
config.alt_access_key = cfg.get('s3 alt',"access_key")
|
||||||
|
config.alt_secret_key = cfg.get('s3 alt',"secret_key")
|
||||||
|
config.alt_display_name = cfg.get('s3 alt',"display_name")
|
||||||
|
config.alt_user_id = cfg.get('s3 alt',"user_id")
|
||||||
|
config.alt_email = cfg.get('s3 alt',"email")
|
||||||
|
|
||||||
|
config.tenant_access_key = cfg.get('s3 tenant',"access_key")
|
||||||
|
config.tenant_secret_key = cfg.get('s3 tenant',"secret_key")
|
||||||
|
config.tenant_display_name = cfg.get('s3 tenant',"display_name")
|
||||||
|
config.tenant_user_id = cfg.get('s3 tenant',"user_id")
|
||||||
|
config.tenant_email = cfg.get('s3 tenant',"email")
|
||||||
|
config.tenant_name = cfg.get('s3 tenant',"tenant")
|
||||||
|
|
||||||
|
config.iam_access_key = cfg.get('iam',"access_key")
|
||||||
|
config.iam_secret_key = cfg.get('iam',"secret_key")
|
||||||
|
config.iam_display_name = cfg.get('iam',"display_name")
|
||||||
|
config.iam_user_id = cfg.get('iam',"user_id")
|
||||||
|
config.iam_email = cfg.get('iam',"email")
|
||||||
|
|
||||||
|
config.iam_root_access_key = cfg.get('iam root',"access_key")
|
||||||
|
config.iam_root_secret_key = cfg.get('iam root',"secret_key")
|
||||||
|
config.iam_root_user_id = cfg.get('iam root',"user_id")
|
||||||
|
config.iam_root_email = cfg.get('iam root',"email")
|
||||||
|
|
||||||
|
config.iam_alt_root_access_key = cfg.get('iam alt root',"access_key")
|
||||||
|
config.iam_alt_root_secret_key = cfg.get('iam alt root',"secret_key")
|
||||||
|
config.iam_alt_root_user_id = cfg.get('iam alt root',"user_id")
|
||||||
|
config.iam_alt_root_email = cfg.get('iam alt root',"email")
|
||||||
|
|
||||||
|
# vars from the fixtures section
|
||||||
|
template = cfg.get('fixtures', "bucket prefix", fallback='test-{random}-')
|
||||||
|
prefix = choose_bucket_prefix(template=template)
|
||||||
|
template = cfg.get('fixtures', "iam name prefix", fallback="s3-tests-")
|
||||||
|
config.iam_name_prefix = choose_bucket_prefix(template=template)
|
||||||
|
template = cfg.get('fixtures', "iam path prefix", fallback="/s3-tests/")
|
||||||
|
config.iam_path_prefix = choose_bucket_prefix(template=template)
|
||||||
|
|
||||||
|
if cfg.has_section("s3 cloud"):
|
||||||
|
get_cloud_config(cfg)
|
||||||
|
else:
|
||||||
|
config.cloud_storage_class = None
|
||||||
|
|
||||||
|
def setup():
|
||||||
|
alt_client = get_alt_client()
|
||||||
|
tenant_client = get_tenant_client()
|
||||||
|
nuke_prefixed_buckets(prefix=prefix)
|
||||||
|
nuke_prefixed_buckets(prefix=prefix, client=alt_client)
|
||||||
|
nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
|
||||||
|
|
||||||
|
def teardown():
|
||||||
|
alt_client = get_alt_client()
|
||||||
|
tenant_client = get_tenant_client()
|
||||||
|
nuke_prefixed_buckets(prefix=prefix)
|
||||||
|
nuke_prefixed_buckets(prefix=prefix, client=alt_client)
|
||||||
|
nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
|
||||||
|
try:
|
||||||
|
iam_client = get_iam_client()
|
||||||
|
list_roles_resp = iam_client.list_roles()
|
||||||
|
for role in list_roles_resp['Roles']:
|
||||||
|
list_policies_resp = iam_client.list_role_policies(RoleName=role['RoleName'])
|
||||||
|
for policy in list_policies_resp['PolicyNames']:
|
||||||
|
del_policy_resp = iam_client.delete_role_policy(
|
||||||
|
RoleName=role['RoleName'],
|
||||||
|
PolicyName=policy
|
||||||
|
)
|
||||||
|
del_role_resp = iam_client.delete_role(RoleName=role['RoleName'])
|
||||||
|
list_oidc_resp = iam_client.list_open_id_connect_providers()
|
||||||
|
for oidcprovider in list_oidc_resp['OpenIDConnectProviderList']:
|
||||||
|
del_oidc_resp = iam_client.delete_open_id_connect_provider(
|
||||||
|
OpenIDConnectProviderArn=oidcprovider['Arn']
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@pytest.fixture(scope="package")
|
||||||
|
def configfile():
|
||||||
|
configure()
|
||||||
|
return config
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def setup_teardown(configfile):
|
||||||
|
setup()
|
||||||
|
yield
|
||||||
|
teardown()
|
||||||
|
|
||||||
|
def check_webidentity():
|
||||||
|
cfg = configparser.RawConfigParser()
|
||||||
|
try:
|
||||||
|
path = os.environ['S3TEST_CONF']
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(
|
||||||
|
'To run tests, point environment '
|
||||||
|
+ 'variable S3TEST_CONF to a config file.',
|
||||||
|
)
|
||||||
|
cfg.read(path)
|
||||||
|
if not cfg.has_section("webidentity"):
|
||||||
|
raise RuntimeError('Your config file is missing the "webidentity" section!')
|
||||||
|
|
||||||
|
config.webidentity_thumbprint = cfg.get('webidentity', "thumbprint")
|
||||||
|
config.webidentity_aud = cfg.get('webidentity', "aud")
|
||||||
|
config.webidentity_token = cfg.get('webidentity', "token")
|
||||||
|
config.webidentity_realm = cfg.get('webidentity', "KC_REALM")
|
||||||
|
config.webidentity_sub = cfg.get('webidentity', "sub")
|
||||||
|
config.webidentity_azp = cfg.get('webidentity', "azp")
|
||||||
|
config.webidentity_user_token = cfg.get('webidentity', "user_token")
|
||||||
|
|
||||||
|
def get_cloud_config(cfg):
|
||||||
|
config.cloud_host = cfg.get('s3 cloud',"host")
|
||||||
|
config.cloud_port = int(cfg.get('s3 cloud',"port"))
|
||||||
|
config.cloud_is_secure = cfg.getboolean('s3 cloud', "is_secure")
|
||||||
|
|
||||||
|
proto = 'https' if config.cloud_is_secure else 'http'
|
||||||
|
config.cloud_endpoint = "%s://%s:%d" % (proto, config.cloud_host, config.cloud_port)
|
||||||
|
|
||||||
|
config.cloud_access_key = cfg.get('s3 cloud',"access_key")
|
||||||
|
config.cloud_secret_key = cfg.get('s3 cloud',"secret_key")
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.cloud_storage_class = cfg.get('s3 cloud', "cloud_storage_class")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.cloud_storage_class = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.cloud_retain_head_object = cfg.get('s3 cloud',"retain_head_object")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.cloud_retain_head_object = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.cloud_target_path = cfg.get('s3 cloud',"target_path")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.cloud_target_path = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.cloud_target_storage_class = cfg.get('s3 cloud',"target_storage_class")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.cloud_target_storage_class = 'STANDARD'
|
||||||
|
|
||||||
|
try:
|
||||||
|
config.cloud_regular_storage_class = cfg.get('s3 cloud', "storage_class")
|
||||||
|
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||||
|
config.cloud_regular_storage_class = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_client(client_config=None):
|
||||||
|
if client_config == None:
|
||||||
|
client_config = Config(signature_version='s3v4')
|
||||||
|
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=config.main_access_key,
|
||||||
|
aws_secret_access_key=config.main_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=client_config)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_v2_client():
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=config.main_access_key,
|
||||||
|
aws_secret_access_key=config.main_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=Config(signature_version='s3'))
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_sts_client(**kwargs):
|
||||||
|
kwargs.setdefault('aws_access_key_id', config.alt_access_key)
|
||||||
|
kwargs.setdefault('aws_secret_access_key', config.alt_secret_key)
|
||||||
|
kwargs.setdefault('config', Config(signature_version='s3v4'))
|
||||||
|
|
||||||
|
client = boto3.client(service_name='sts',
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
region_name='',
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
**kwargs)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_iam_client(**kwargs):
|
||||||
|
kwargs.setdefault('aws_access_key_id', config.iam_access_key)
|
||||||
|
kwargs.setdefault('aws_secret_access_key', config.iam_secret_key)
|
||||||
|
|
||||||
|
client = boto3.client(service_name='iam',
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
region_name='',
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
**kwargs)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_iam_s3client(**kwargs):
|
||||||
|
kwargs.setdefault('aws_access_key_id', config.iam_access_key)
|
||||||
|
kwargs.setdefault('aws_secret_access_key', config.iam_secret_key)
|
||||||
|
kwargs.setdefault('config', Config(signature_version='s3v4'))
|
||||||
|
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
**kwargs)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_iam_root_client(**kwargs):
|
||||||
|
kwargs.setdefault('service_name', 'iam')
|
||||||
|
kwargs.setdefault('aws_access_key_id', config.iam_root_access_key)
|
||||||
|
kwargs.setdefault('aws_secret_access_key', config.iam_root_secret_key)
|
||||||
|
|
||||||
|
return boto3.client(endpoint_url=config.default_endpoint,
|
||||||
|
region_name='',
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
def get_iam_alt_root_client(**kwargs):
|
||||||
|
kwargs.setdefault('service_name', 'iam')
|
||||||
|
kwargs.setdefault('aws_access_key_id', config.iam_alt_root_access_key)
|
||||||
|
kwargs.setdefault('aws_secret_access_key', config.iam_alt_root_secret_key)
|
||||||
|
|
||||||
|
return boto3.client(endpoint_url=config.default_endpoint,
|
||||||
|
region_name='',
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
def get_alt_client(client_config=None):
|
||||||
|
if client_config == None:
|
||||||
|
client_config = Config(signature_version='s3v4')
|
||||||
|
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=config.alt_access_key,
|
||||||
|
aws_secret_access_key=config.alt_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=client_config)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_cloud_client(client_config=None):
|
||||||
|
if client_config == None:
|
||||||
|
client_config = Config(signature_version='s3v4')
|
||||||
|
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=config.cloud_access_key,
|
||||||
|
aws_secret_access_key=config.cloud_secret_key,
|
||||||
|
endpoint_url=config.cloud_endpoint,
|
||||||
|
use_ssl=config.cloud_is_secure,
|
||||||
|
config=client_config)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_tenant_client(client_config=None):
|
||||||
|
if client_config == None:
|
||||||
|
client_config = Config(signature_version='s3v4')
|
||||||
|
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=config.tenant_access_key,
|
||||||
|
aws_secret_access_key=config.tenant_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=client_config)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_v2_tenant_client():
|
||||||
|
client_config = Config(signature_version='s3')
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=config.tenant_access_key,
|
||||||
|
aws_secret_access_key=config.tenant_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=client_config)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_tenant_iam_client():
|
||||||
|
|
||||||
|
client = boto3.client(service_name='iam',
|
||||||
|
region_name='us-east-1',
|
||||||
|
aws_access_key_id=config.tenant_access_key,
|
||||||
|
aws_secret_access_key=config.tenant_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
use_ssl=config.default_is_secure)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_alt_iam_client():
|
||||||
|
|
||||||
|
client = boto3.client(service_name='iam',
|
||||||
|
region_name='',
|
||||||
|
aws_access_key_id=config.alt_access_key,
|
||||||
|
aws_secret_access_key=config.alt_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
use_ssl=config.default_is_secure)
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_unauthenticated_client():
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id='',
|
||||||
|
aws_secret_access_key='',
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=Config(signature_version=UNSIGNED))
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_bad_auth_client(aws_access_key_id='badauth'):
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=aws_access_key_id,
|
||||||
|
aws_secret_access_key='roflmao',
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=Config(signature_version='s3v4'))
|
||||||
|
return client
|
||||||
|
|
||||||
|
def get_svc_client(client_config=None, svc='s3'):
|
||||||
|
if client_config == None:
|
||||||
|
client_config = Config(signature_version='s3v4')
|
||||||
|
|
||||||
|
client = boto3.client(service_name=svc,
|
||||||
|
aws_access_key_id=config.main_access_key,
|
||||||
|
aws_secret_access_key=config.main_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify,
|
||||||
|
config=client_config)
|
||||||
|
return client
|
||||||
|
|
||||||
|
bucket_counter = itertools.count(1)
|
||||||
|
|
||||||
|
def get_new_bucket_name():
|
||||||
|
"""
|
||||||
|
Get a bucket name that probably does not exist.
|
||||||
|
|
||||||
|
We make every attempt to use a unique random prefix, so if a
|
||||||
|
bucket by this name happens to exist, it's ok if tests give
|
||||||
|
false negatives.
|
||||||
|
"""
|
||||||
|
name = '{prefix}{num}'.format(
|
||||||
|
prefix=prefix,
|
||||||
|
num=next(bucket_counter),
|
||||||
|
)
|
||||||
|
return name
|
||||||
|
|
||||||
|
def get_new_bucket_resource(name=None):
|
||||||
|
"""
|
||||||
|
Get a bucket that exists and is empty.
|
||||||
|
|
||||||
|
Always recreates a bucket from scratch. This is useful to also
|
||||||
|
reset ACLs and such.
|
||||||
|
"""
|
||||||
|
s3 = boto3.resource('s3',
|
||||||
|
aws_access_key_id=config.main_access_key,
|
||||||
|
aws_secret_access_key=config.main_secret_key,
|
||||||
|
endpoint_url=config.default_endpoint,
|
||||||
|
use_ssl=config.default_is_secure,
|
||||||
|
verify=config.default_ssl_verify)
|
||||||
|
if name is None:
|
||||||
|
name = get_new_bucket_name()
|
||||||
|
bucket = s3.Bucket(name)
|
||||||
|
bucket_location = bucket.create()
|
||||||
|
return bucket
|
||||||
|
|
||||||
|
def get_new_bucket(client=None, name=None):
|
||||||
|
"""
|
||||||
|
Get a bucket that exists and is empty.
|
||||||
|
|
||||||
|
Always recreates a bucket from scratch. This is useful to also
|
||||||
|
reset ACLs and such.
|
||||||
|
"""
|
||||||
|
if client is None:
|
||||||
|
client = get_client()
|
||||||
|
if name is None:
|
||||||
|
name = get_new_bucket_name()
|
||||||
|
|
||||||
|
client.create_bucket(Bucket=name)
|
||||||
|
return name
|
||||||
|
|
||||||
|
def get_parameter_name():
|
||||||
|
parameter_name=""
|
||||||
|
rand = ''.join(
|
||||||
|
random.choice(string.ascii_lowercase + string.digits)
|
||||||
|
for c in range(255)
|
||||||
|
)
|
||||||
|
while rand:
|
||||||
|
parameter_name = '{random}'.format(random=rand)
|
||||||
|
if len(parameter_name) <= 10:
|
||||||
|
return parameter_name
|
||||||
|
rand = rand[:-1]
|
||||||
|
return parameter_name
|
||||||
|
|
||||||
|
def get_sts_user_id():
|
||||||
|
return config.alt_user_id
|
||||||
|
|
||||||
|
def get_config_is_secure():
|
||||||
|
return config.default_is_secure
|
||||||
|
|
||||||
|
def get_config_host():
|
||||||
|
return config.default_host
|
||||||
|
|
||||||
|
def get_config_port():
|
||||||
|
return config.default_port
|
||||||
|
|
||||||
|
def get_config_endpoint():
|
||||||
|
return config.default_endpoint
|
||||||
|
|
||||||
|
def get_config_ssl_verify():
|
||||||
|
return config.default_ssl_verify
|
||||||
|
|
||||||
|
def get_main_aws_access_key():
|
||||||
|
return config.main_access_key
|
||||||
|
|
||||||
|
def get_main_aws_secret_key():
|
||||||
|
return config.main_secret_key
|
||||||
|
|
||||||
|
def get_main_display_name():
|
||||||
|
return config.main_display_name
|
||||||
|
|
||||||
|
def get_main_user_id():
|
||||||
|
return config.main_user_id
|
||||||
|
|
||||||
|
def get_main_email():
|
||||||
|
return config.main_email
|
||||||
|
|
||||||
|
def get_main_api_name():
|
||||||
|
return config.main_api_name
|
||||||
|
|
||||||
|
def get_main_kms_keyid():
|
||||||
|
return config.main_kms_keyid
|
||||||
|
|
||||||
|
def get_secondary_kms_keyid():
|
||||||
|
return config.main_kms_keyid2
|
||||||
|
|
||||||
|
def get_alt_aws_access_key():
|
||||||
|
return config.alt_access_key
|
||||||
|
|
||||||
|
def get_alt_aws_secret_key():
|
||||||
|
return config.alt_secret_key
|
||||||
|
|
||||||
|
def get_alt_display_name():
|
||||||
|
return config.alt_display_name
|
||||||
|
|
||||||
|
def get_alt_user_id():
|
||||||
|
return config.alt_user_id
|
||||||
|
|
||||||
|
def get_alt_email():
|
||||||
|
return config.alt_email
|
||||||
|
|
||||||
|
def get_tenant_aws_access_key():
|
||||||
|
return config.tenant_access_key
|
||||||
|
|
||||||
|
def get_tenant_aws_secret_key():
|
||||||
|
return config.tenant_secret_key
|
||||||
|
|
||||||
|
def get_tenant_display_name():
|
||||||
|
return config.tenant_display_name
|
||||||
|
|
||||||
|
def get_tenant_name():
|
||||||
|
return config.tenant_name
|
||||||
|
|
||||||
|
def get_tenant_user_id():
|
||||||
|
return config.tenant_user_id
|
||||||
|
|
||||||
|
def get_tenant_email():
|
||||||
|
return config.tenant_email
|
||||||
|
|
||||||
|
def get_thumbprint():
|
||||||
|
return config.webidentity_thumbprint
|
||||||
|
|
||||||
|
def get_aud():
|
||||||
|
return config.webidentity_aud
|
||||||
|
|
||||||
|
def get_sub():
|
||||||
|
return config.webidentity_sub
|
||||||
|
|
||||||
|
def get_azp():
|
||||||
|
return config.webidentity_azp
|
||||||
|
|
||||||
|
def get_token():
|
||||||
|
return config.webidentity_token
|
||||||
|
|
||||||
|
def get_realm_name():
|
||||||
|
return config.webidentity_realm
|
||||||
|
|
||||||
|
def get_iam_name_prefix():
|
||||||
|
return config.iam_name_prefix
|
||||||
|
|
||||||
|
def make_iam_name(name):
|
||||||
|
return config.iam_name_prefix + name
|
||||||
|
|
||||||
|
def get_iam_path_prefix():
|
||||||
|
return config.iam_path_prefix
|
||||||
|
|
||||||
|
def get_iam_access_key():
|
||||||
|
return config.iam_access_key
|
||||||
|
|
||||||
|
def get_iam_secret_key():
|
||||||
|
return config.iam_secret_key
|
||||||
|
|
||||||
|
def get_iam_root_user_id():
|
||||||
|
return config.iam_root_user_id
|
||||||
|
|
||||||
|
def get_iam_root_email():
|
||||||
|
return config.iam_root_email
|
||||||
|
|
||||||
|
def get_iam_alt_root_user_id():
|
||||||
|
return config.iam_alt_root_user_id
|
||||||
|
|
||||||
|
def get_iam_alt_root_email():
|
||||||
|
return config.iam_alt_root_email
|
||||||
|
|
||||||
|
def get_user_token():
|
||||||
|
return config.webidentity_user_token
|
||||||
|
|
||||||
|
def get_cloud_storage_class():
|
||||||
|
return config.cloud_storage_class
|
||||||
|
|
||||||
|
def get_cloud_retain_head_object():
|
||||||
|
return config.cloud_retain_head_object
|
||||||
|
|
||||||
|
def get_cloud_regular_storage_class():
|
||||||
|
return config.cloud_regular_storage_class
|
||||||
|
|
||||||
|
def get_cloud_target_path():
|
||||||
|
return config.cloud_target_path
|
||||||
|
|
||||||
|
def get_cloud_target_storage_class():
|
||||||
|
return config.cloud_target_storage_class
|
||||||
|
|
||||||
|
def get_lc_debug_interval():
|
||||||
|
return config.lc_debug_interval
|
199
s3tests_boto3/functional/iam.py
Normal file
199
s3tests_boto3/functional/iam.py
Normal file
|
@ -0,0 +1,199 @@
|
||||||
|
from botocore.exceptions import ClientError
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from . import (
|
||||||
|
configfile,
|
||||||
|
get_iam_root_client,
|
||||||
|
get_iam_root_user_id,
|
||||||
|
get_iam_root_email,
|
||||||
|
get_iam_alt_root_client,
|
||||||
|
get_iam_alt_root_user_id,
|
||||||
|
get_iam_alt_root_email,
|
||||||
|
get_iam_path_prefix,
|
||||||
|
)
|
||||||
|
|
||||||
|
def nuke_user_keys(client, name):
|
||||||
|
p = client.get_paginator('list_access_keys')
|
||||||
|
for response in p.paginate(UserName=name):
|
||||||
|
for key in response['AccessKeyMetadata']:
|
||||||
|
try:
|
||||||
|
client.delete_access_key(UserName=name, AccessKeyId=key['AccessKeyId'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_user_policies(client, name):
|
||||||
|
p = client.get_paginator('list_user_policies')
|
||||||
|
for response in p.paginate(UserName=name):
|
||||||
|
for policy in response['PolicyNames']:
|
||||||
|
try:
|
||||||
|
client.delete_user_policy(UserName=name, PolicyName=policy)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_attached_user_policies(client, name):
|
||||||
|
p = client.get_paginator('list_attached_user_policies')
|
||||||
|
for response in p.paginate(UserName=name):
|
||||||
|
for policy in response['AttachedPolicies']:
|
||||||
|
try:
|
||||||
|
client.detach_user_policy(UserName=name, PolicyArn=policy['PolicyArn'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_user(client, name):
|
||||||
|
# delete access keys, user policies, etc
|
||||||
|
try:
|
||||||
|
nuke_user_keys(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
nuke_user_policies(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
nuke_attached_user_policies(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
client.delete_user(UserName=name)
|
||||||
|
|
||||||
|
def nuke_users(client, **kwargs):
|
||||||
|
p = client.get_paginator('list_users')
|
||||||
|
for response in p.paginate(**kwargs):
|
||||||
|
for user in response['Users']:
|
||||||
|
try:
|
||||||
|
nuke_user(client, user['UserName'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_group_policies(client, name):
|
||||||
|
p = client.get_paginator('list_group_policies')
|
||||||
|
for response in p.paginate(GroupName=name):
|
||||||
|
for policy in response['PolicyNames']:
|
||||||
|
try:
|
||||||
|
client.delete_group_policy(GroupName=name, PolicyName=policy)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_attached_group_policies(client, name):
|
||||||
|
p = client.get_paginator('list_attached_group_policies')
|
||||||
|
for response in p.paginate(GroupName=name):
|
||||||
|
for policy in response['AttachedPolicies']:
|
||||||
|
try:
|
||||||
|
client.detach_group_policy(GroupName=name, PolicyArn=policy['PolicyArn'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_group_users(client, name):
|
||||||
|
p = client.get_paginator('get_group')
|
||||||
|
for response in p.paginate(GroupName=name):
|
||||||
|
for user in response['Users']:
|
||||||
|
try:
|
||||||
|
client.remove_user_from_group(GroupName=name, UserName=user['UserName'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_group(client, name):
|
||||||
|
# delete group policies and remove all users
|
||||||
|
try:
|
||||||
|
nuke_group_policies(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
nuke_attached_group_policies(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
nuke_group_users(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
client.delete_group(GroupName=name)
|
||||||
|
|
||||||
|
def nuke_groups(client, **kwargs):
|
||||||
|
p = client.get_paginator('list_groups')
|
||||||
|
for response in p.paginate(**kwargs):
|
||||||
|
for user in response['Groups']:
|
||||||
|
try:
|
||||||
|
nuke_group(client, user['GroupName'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_role_policies(client, name):
|
||||||
|
p = client.get_paginator('list_role_policies')
|
||||||
|
for response in p.paginate(RoleName=name):
|
||||||
|
for policy in response['PolicyNames']:
|
||||||
|
try:
|
||||||
|
client.delete_role_policy(RoleName=name, PolicyName=policy)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_attached_role_policies(client, name):
|
||||||
|
p = client.get_paginator('list_attached_role_policies')
|
||||||
|
for response in p.paginate(RoleName=name):
|
||||||
|
for policy in response['AttachedPolicies']:
|
||||||
|
try:
|
||||||
|
client.detach_role_policy(RoleName=name, PolicyArn=policy['PolicyArn'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_role(client, name):
|
||||||
|
# delete role policies, etc
|
||||||
|
try:
|
||||||
|
nuke_role_policies(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
nuke_attached_role_policies(client, name)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
client.delete_role(RoleName=name)
|
||||||
|
|
||||||
|
def nuke_roles(client, **kwargs):
|
||||||
|
p = client.get_paginator('list_roles')
|
||||||
|
for response in p.paginate(**kwargs):
|
||||||
|
for role in response['Roles']:
|
||||||
|
try:
|
||||||
|
nuke_role(client, role['RoleName'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def nuke_oidc_providers(client, prefix):
|
||||||
|
result = client.list_open_id_connect_providers()
|
||||||
|
for provider in result['OpenIDConnectProviderList']:
|
||||||
|
arn = provider['Arn']
|
||||||
|
if f':oidc-provider{prefix}' in arn:
|
||||||
|
try:
|
||||||
|
client.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# fixture for iam account root user
|
||||||
|
@pytest.fixture
|
||||||
|
def iam_root(configfile):
|
||||||
|
client = get_iam_root_client()
|
||||||
|
try:
|
||||||
|
arn = client.get_user()['User']['Arn']
|
||||||
|
if not arn.endswith(':root'):
|
||||||
|
pytest.skip('[iam root] user does not have :root arn')
|
||||||
|
except ClientError as e:
|
||||||
|
pytest.skip('[iam root] user does not belong to an account')
|
||||||
|
|
||||||
|
yield client
|
||||||
|
nuke_users(client, PathPrefix=get_iam_path_prefix())
|
||||||
|
nuke_groups(client, PathPrefix=get_iam_path_prefix())
|
||||||
|
nuke_roles(client, PathPrefix=get_iam_path_prefix())
|
||||||
|
nuke_oidc_providers(client, get_iam_path_prefix())
|
||||||
|
|
||||||
|
# fixture for iam alt account root user
|
||||||
|
@pytest.fixture
|
||||||
|
def iam_alt_root(configfile):
|
||||||
|
client = get_iam_alt_root_client()
|
||||||
|
try:
|
||||||
|
arn = client.get_user()['User']['Arn']
|
||||||
|
if not arn.endswith(':root'):
|
||||||
|
pytest.skip('[iam alt root] user does not have :root arn')
|
||||||
|
except ClientError as e:
|
||||||
|
pytest.skip('[iam alt root] user does not belong to an account')
|
||||||
|
|
||||||
|
yield client
|
||||||
|
nuke_users(client, PathPrefix=get_iam_path_prefix())
|
||||||
|
nuke_roles(client, PathPrefix=get_iam_path_prefix())
|
46
s3tests_boto3/functional/policy.py
Normal file
46
s3tests_boto3/functional/policy.py
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
import json
|
||||||
|
|
||||||
|
class Statement(object):
|
||||||
|
def __init__(self, action, resource, principal = {"AWS" : "*"}, effect= "Allow", condition = None):
|
||||||
|
self.principal = principal
|
||||||
|
self.action = action
|
||||||
|
self.resource = resource
|
||||||
|
self.condition = condition
|
||||||
|
self.effect = effect
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
d = { "Action" : self.action,
|
||||||
|
"Principal" : self.principal,
|
||||||
|
"Effect" : self.effect,
|
||||||
|
"Resource" : self.resource
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.condition is not None:
|
||||||
|
d["Condition"] = self.condition
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
class Policy(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.statements = []
|
||||||
|
|
||||||
|
def add_statement(self, s):
|
||||||
|
self.statements.append(s)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def to_json(self):
|
||||||
|
policy_dict = {
|
||||||
|
"Version" : "2012-10-17",
|
||||||
|
"Statement":
|
||||||
|
[s.to_dict() for s in self.statements]
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.dumps(policy_dict)
|
||||||
|
|
||||||
|
def make_json_policy(action, resource, principal={"AWS": "*"}, effect="Allow", conditions=None):
|
||||||
|
"""
|
||||||
|
Helper function to make single statement policies
|
||||||
|
"""
|
||||||
|
s = Statement(action, resource, principal, effect=effect, condition=conditions)
|
||||||
|
p = Policy()
|
||||||
|
return p.add_statement(s).to_json()
|
92
s3tests_boto3/functional/rgw_interactive.py
Normal file
92
s3tests_boto3/functional/rgw_interactive.py
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
import boto3
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
host = "localhost"
|
||||||
|
port = 8000
|
||||||
|
|
||||||
|
## AWS access key
|
||||||
|
access_key = "0555b35654ad1656d804"
|
||||||
|
|
||||||
|
## AWS secret key
|
||||||
|
secret_key = "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
|
||||||
|
|
||||||
|
prefix = "YOURNAMEHERE-1234-"
|
||||||
|
|
||||||
|
endpoint_url = "http://%s:%d" % (host, port)
|
||||||
|
|
||||||
|
client = boto3.client(service_name='s3',
|
||||||
|
aws_access_key_id=access_key,
|
||||||
|
aws_secret_access_key=secret_key,
|
||||||
|
endpoint_url=endpoint_url,
|
||||||
|
use_ssl=False,
|
||||||
|
verify=False)
|
||||||
|
|
||||||
|
s3 = boto3.resource('s3',
|
||||||
|
use_ssl=False,
|
||||||
|
verify=False,
|
||||||
|
endpoint_url=endpoint_url,
|
||||||
|
aws_access_key_id=access_key,
|
||||||
|
aws_secret_access_key=secret_key)
|
||||||
|
|
||||||
|
def choose_bucket_prefix(template, max_len=30):
|
||||||
|
"""
|
||||||
|
Choose a prefix for our test buckets, so they're easy to identify.
|
||||||
|
|
||||||
|
Use template and feed it more and more random filler, until it's
|
||||||
|
as long as possible but still below max_len.
|
||||||
|
"""
|
||||||
|
rand = ''.join(
|
||||||
|
random.choice(string.ascii_lowercase + string.digits)
|
||||||
|
for c in range(255)
|
||||||
|
)
|
||||||
|
|
||||||
|
while rand:
|
||||||
|
s = template.format(random=rand)
|
||||||
|
if len(s) <= max_len:
|
||||||
|
return s
|
||||||
|
rand = rand[:-1]
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
|
||||||
|
template=template,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
bucket_counter = itertools.count(1)
|
||||||
|
|
||||||
|
def get_new_bucket_name():
|
||||||
|
"""
|
||||||
|
Get a bucket name that probably does not exist.
|
||||||
|
|
||||||
|
We make every attempt to use a unique random prefix, so if a
|
||||||
|
bucket by this name happens to exist, it's ok if tests give
|
||||||
|
false negatives.
|
||||||
|
"""
|
||||||
|
name = '{prefix}{num}'.format(
|
||||||
|
prefix=prefix,
|
||||||
|
num=next(bucket_counter),
|
||||||
|
)
|
||||||
|
return name
|
||||||
|
|
||||||
|
def get_new_bucket(session=boto3, name=None, headers=None):
|
||||||
|
"""
|
||||||
|
Get a bucket that exists and is empty.
|
||||||
|
|
||||||
|
Always recreates a bucket from scratch. This is useful to also
|
||||||
|
reset ACLs and such.
|
||||||
|
"""
|
||||||
|
s3 = session.resource('s3',
|
||||||
|
use_ssl=False,
|
||||||
|
verify=False,
|
||||||
|
endpoint_url=endpoint_url,
|
||||||
|
aws_access_key_id=access_key,
|
||||||
|
aws_secret_access_key=secret_key)
|
||||||
|
if name is None:
|
||||||
|
name = get_new_bucket_name()
|
||||||
|
bucket = s3.Bucket(name)
|
||||||
|
bucket_location = bucket.create()
|
||||||
|
return bucket
|
572
s3tests_boto3/functional/test_headers.py
Normal file
572
s3tests_boto3/functional/test_headers.py
Normal file
|
@ -0,0 +1,572 @@
|
||||||
|
import boto3
|
||||||
|
import pytest
|
||||||
|
from botocore.exceptions import ClientError
|
||||||
|
from email.utils import formatdate
|
||||||
|
|
||||||
|
from .utils import assert_raises
|
||||||
|
from .utils import _get_status_and_error_code
|
||||||
|
from .utils import _get_status
|
||||||
|
|
||||||
|
from . import (
|
||||||
|
configfile,
|
||||||
|
setup_teardown,
|
||||||
|
get_client,
|
||||||
|
get_v2_client,
|
||||||
|
get_new_bucket,
|
||||||
|
get_new_bucket_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_header_create_object(headers, client=None):
|
||||||
|
""" Create a new bucket, add an object w/header customizations
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
key_name = 'foo'
|
||||||
|
|
||||||
|
# pass in custom headers before PutObject call
|
||||||
|
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
|
||||||
|
client.meta.events.register('before-call.s3.PutObject', add_headers)
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name)
|
||||||
|
|
||||||
|
return bucket_name, key_name
|
||||||
|
|
||||||
|
|
||||||
|
def _add_header_create_bad_object(headers, client=None):
|
||||||
|
""" Create a new bucket, add an object with a header. This should cause a failure
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
key_name = 'foo'
|
||||||
|
|
||||||
|
# pass in custom headers before PutObject call
|
||||||
|
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
|
||||||
|
client.meta.events.register('before-call.s3.PutObject', add_headers)
|
||||||
|
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
def _remove_header_create_object(remove, client=None):
|
||||||
|
""" Create a new bucket, add an object without a header
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
key_name = 'foo'
|
||||||
|
|
||||||
|
# remove custom headers before PutObject call
|
||||||
|
def remove_header(**kwargs):
|
||||||
|
if (remove in kwargs['params']['headers']):
|
||||||
|
del kwargs['params']['headers'][remove]
|
||||||
|
|
||||||
|
client.meta.events.register('before-call.s3.PutObject', remove_header)
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name)
|
||||||
|
|
||||||
|
return bucket_name, key_name
|
||||||
|
|
||||||
|
def _remove_header_create_bad_object(remove, client=None):
|
||||||
|
""" Create a new bucket, add an object without a header. This should cause a failure
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
key_name = 'foo'
|
||||||
|
|
||||||
|
# remove custom headers before PutObject call
|
||||||
|
def remove_header(**kwargs):
|
||||||
|
if (remove in kwargs['params']['headers']):
|
||||||
|
del kwargs['params']['headers'][remove]
|
||||||
|
|
||||||
|
client.meta.events.register('before-call.s3.PutObject', remove_header)
|
||||||
|
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
def _add_header_create_bucket(headers, client=None):
|
||||||
|
""" Create a new bucket, w/header customizations
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket_name()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
# pass in custom headers before PutObject call
|
||||||
|
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
|
||||||
|
client.meta.events.register('before-call.s3.CreateBucket', add_headers)
|
||||||
|
client.create_bucket(Bucket=bucket_name)
|
||||||
|
|
||||||
|
return bucket_name
|
||||||
|
|
||||||
|
|
||||||
|
def _add_header_create_bad_bucket(headers=None, client=None):
|
||||||
|
""" Create a new bucket, w/header customizations that should cause a failure
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket_name()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
# pass in custom headers before PutObject call
|
||||||
|
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
|
||||||
|
client.meta.events.register('before-call.s3.CreateBucket', add_headers)
|
||||||
|
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
|
||||||
|
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
def _remove_header_create_bucket(remove, client=None):
|
||||||
|
""" Create a new bucket, without a header
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket_name()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
# remove custom headers before PutObject call
|
||||||
|
def remove_header(**kwargs):
|
||||||
|
if (remove in kwargs['params']['headers']):
|
||||||
|
del kwargs['params']['headers'][remove]
|
||||||
|
|
||||||
|
client.meta.events.register('before-call.s3.CreateBucket', remove_header)
|
||||||
|
client.create_bucket(Bucket=bucket_name)
|
||||||
|
|
||||||
|
return bucket_name
|
||||||
|
|
||||||
|
def _remove_header_create_bad_bucket(remove, client=None):
|
||||||
|
""" Create a new bucket, without a header. This should cause a failure
|
||||||
|
"""
|
||||||
|
bucket_name = get_new_bucket_name()
|
||||||
|
if client == None:
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
# remove custom headers before PutObject call
|
||||||
|
def remove_header(**kwargs):
|
||||||
|
if (remove in kwargs['params']['headers']):
|
||||||
|
del kwargs['params']['headers'][remove]
|
||||||
|
|
||||||
|
client.meta.events.register('before-call.s3.CreateBucket', remove_header)
|
||||||
|
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
|
||||||
|
|
||||||
|
return e
|
||||||
|
|
||||||
|
#
|
||||||
|
# common tests
|
||||||
|
#
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_md5_invalid_short():
|
||||||
|
e = _add_header_create_bad_object({'Content-MD5':'YWJyYWNhZGFicmE='})
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
assert error_code == 'InvalidDigest'
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_md5_bad():
|
||||||
|
e = _add_header_create_bad_object({'Content-MD5':'rL0Y20xC+Fzt72VPzMSk2A=='})
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
assert error_code == 'BadDigest'
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_md5_empty():
|
||||||
|
e = _add_header_create_bad_object({'Content-MD5':''})
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
assert error_code == 'InvalidDigest'
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_md5_none():
|
||||||
|
bucket_name, key_name = _remove_header_create_object('Content-MD5')
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_expect_mismatch():
|
||||||
|
bucket_name, key_name = _add_header_create_object({'Expect': 200})
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_expect_empty():
|
||||||
|
bucket_name, key_name = _add_header_create_object({'Expect': ''})
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_expect_none():
|
||||||
|
bucket_name, key_name = _remove_header_create_object('Expect')
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_contentlength_empty():
|
||||||
|
e = _add_header_create_bad_object({'Content-Length':''})
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
@pytest.mark.fails_on_mod_proxy_fcgi
|
||||||
|
def test_object_create_bad_contentlength_negative():
|
||||||
|
client = get_client()
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
key_name = 'foo'
|
||||||
|
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, ContentLength=-1)
|
||||||
|
status = _get_status(e.response)
|
||||||
|
assert status == 400
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_contentlength_none():
|
||||||
|
remove = 'Content-Length'
|
||||||
|
e = _remove_header_create_bad_object('Content-Length')
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 411
|
||||||
|
assert error_code == 'MissingContentLength'
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_contenttype_invalid():
|
||||||
|
bucket_name, key_name = _add_header_create_object({'Content-Type': 'text/plain'})
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_contenttype_empty():
|
||||||
|
client = get_client()
|
||||||
|
key_name = 'foo'
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar', ContentType='')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_object_create_bad_contenttype_none():
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
key_name = 'foo'
|
||||||
|
client = get_client()
|
||||||
|
# as long as ContentType isn't specified in put_object it isn't going into the request
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_authorization_empty():
|
||||||
|
e = _add_header_create_bad_object({'Authorization': ''})
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_date_and_amz_date():
|
||||||
|
date = formatdate(usegmt=True)
|
||||||
|
bucket_name, key_name = _add_header_create_object({'Date': date, 'X-Amz-Date': date})
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_amz_date_and_no_date():
|
||||||
|
date = formatdate(usegmt=True)
|
||||||
|
bucket_name, key_name = _add_header_create_object({'Date': '', 'X-Amz-Date': date})
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
# the teardown is really messed up here. check it out
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_authorization_none():
|
||||||
|
e = _remove_header_create_bad_object('Authorization')
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_bucket_create_contentlength_none():
|
||||||
|
remove = 'Content-Length'
|
||||||
|
_remove_header_create_bucket(remove)
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_acl_create_contentlength_none():
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
client = get_client()
|
||||||
|
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
|
||||||
|
|
||||||
|
remove = 'Content-Length'
|
||||||
|
def remove_header(**kwargs):
|
||||||
|
if (remove in kwargs['params']['headers']):
|
||||||
|
del kwargs['params']['headers'][remove]
|
||||||
|
|
||||||
|
client.meta.events.register('before-call.s3.PutObjectAcl', remove_header)
|
||||||
|
client.put_object_acl(Bucket=bucket_name, Key='foo', ACL='public-read')
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_bucket_put_bad_canned_acl():
|
||||||
|
bucket_name = get_new_bucket()
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
headers = {'x-amz-acl': 'public-ready'}
|
||||||
|
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
|
||||||
|
client.meta.events.register('before-call.s3.PutBucketAcl', add_headers)
|
||||||
|
|
||||||
|
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, ACL='public-read')
|
||||||
|
status = _get_status(e.response)
|
||||||
|
assert status == 400
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_bucket_create_bad_expect_mismatch():
|
||||||
|
bucket_name = get_new_bucket_name()
|
||||||
|
client = get_client()
|
||||||
|
|
||||||
|
headers = {'Expect': 200}
|
||||||
|
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
|
||||||
|
client.meta.events.register('before-call.s3.CreateBucket', add_headers)
|
||||||
|
client.create_bucket(Bucket=bucket_name)
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
def test_bucket_create_bad_expect_empty():
|
||||||
|
headers = {'Expect': ''}
|
||||||
|
_add_header_create_bucket(headers)
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: The request isn't even making it to the RGW past the frontend
|
||||||
|
# This test had 'fails_on_rgw' before the move to boto3
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_bucket_create_bad_contentlength_empty():
|
||||||
|
headers = {'Content-Length': ''}
|
||||||
|
e = _add_header_create_bad_bucket(headers)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
@pytest.mark.fails_on_mod_proxy_fcgi
|
||||||
|
def test_bucket_create_bad_contentlength_negative():
|
||||||
|
headers = {'Content-Length': '-1'}
|
||||||
|
e = _add_header_create_bad_bucket(headers)
|
||||||
|
status = _get_status(e.response)
|
||||||
|
assert status == 400
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_bucket_create_bad_contentlength_none():
|
||||||
|
remove = 'Content-Length'
|
||||||
|
_remove_header_create_bucket(remove)
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_bucket_create_bad_authorization_empty():
|
||||||
|
headers = {'Authorization': ''}
|
||||||
|
e = _add_header_create_bad_bucket(headers)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_common
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_bucket_create_bad_authorization_none():
|
||||||
|
e = _remove_header_create_bad_bucket('Authorization')
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_md5_invalid_garbage_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'Content-MD5': 'AWS HAHAHA'}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
assert error_code == 'InvalidDigest'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the Content-Length header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_contentlength_mismatch_below_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
content = 'bar'
|
||||||
|
length = len(content) - 1
|
||||||
|
headers = {'Content-Length': str(length)}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
assert error_code == 'BadDigest'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_authorization_incorrect_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'InvalidDigest'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_authorization_invalid_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'Authorization': 'AWS HAHAHA'}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
assert error_code == 'InvalidArgument'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_ua_empty_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'User-Agent': ''}
|
||||||
|
bucket_name, key_name = _add_header_create_object(headers, v2_client)
|
||||||
|
v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_ua_none_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
remove = 'User-Agent'
|
||||||
|
bucket_name, key_name = _remove_header_create_object(remove, v2_client)
|
||||||
|
v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_date_invalid_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Bad Date'}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_date_empty_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': ''}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_object_create_bad_date_none_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
remove = 'x-amz-date'
|
||||||
|
e = _remove_header_create_bad_object(remove, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_date_before_today_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'RequestTimeTooSkewed'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_date_before_epoch_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_object_create_bad_date_after_end_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Tue, 07 Jul 9999 21:53:04 GMT'}
|
||||||
|
e = _add_header_create_bad_object(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'RequestTimeTooSkewed'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_bucket_create_bad_authorization_invalid_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'Authorization': 'AWS HAHAHA'}
|
||||||
|
e = _add_header_create_bad_bucket(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 400
|
||||||
|
assert error_code == 'InvalidArgument'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_bucket_create_bad_ua_empty_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'User-Agent': ''}
|
||||||
|
_add_header_create_bucket(headers, v2_client)
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_bucket_create_bad_ua_none_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
remove = 'User-Agent'
|
||||||
|
_remove_header_create_bucket(remove, v2_client)
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_bucket_create_bad_date_invalid_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Bad Date'}
|
||||||
|
e = _add_header_create_bad_bucket(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_bucket_create_bad_date_empty_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': ''}
|
||||||
|
e = _add_header_create_bad_bucket(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
|
||||||
|
@pytest.mark.fails_on_rgw
|
||||||
|
def test_bucket_create_bad_date_none_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
remove = 'x-amz-date'
|
||||||
|
e = _remove_header_create_bad_bucket(remove, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_bucket_create_bad_date_before_today_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
|
||||||
|
e = _add_header_create_bad_bucket(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'RequestTimeTooSkewed'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_bucket_create_bad_date_after_today_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Tue, 07 Jul 2030 21:53:04 GMT'}
|
||||||
|
e = _add_header_create_bad_bucket(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'RequestTimeTooSkewed'
|
||||||
|
|
||||||
|
@pytest.mark.auth_aws2
|
||||||
|
def test_bucket_create_bad_date_before_epoch_aws2():
|
||||||
|
v2_client = get_v2_client()
|
||||||
|
headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
|
||||||
|
e = _add_header_create_bad_bucket(headers, v2_client)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
2922
s3tests_boto3/functional/test_iam.py
Normal file
2922
s3tests_boto3/functional/test_iam.py
Normal file
File diff suppressed because it is too large
Load diff
13917
s3tests_boto3/functional/test_s3.py
Normal file
13917
s3tests_boto3/functional/test_s3.py
Normal file
File diff suppressed because it is too large
Load diff
1685
s3tests_boto3/functional/test_s3select.py
Normal file
1685
s3tests_boto3/functional/test_s3select.py
Normal file
File diff suppressed because it is too large
Load diff
159
s3tests_boto3/functional/test_sns.py
Normal file
159
s3tests_boto3/functional/test_sns.py
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
import json
|
||||||
|
import pytest
|
||||||
|
from botocore.exceptions import ClientError
|
||||||
|
from . import (
|
||||||
|
configfile,
|
||||||
|
get_iam_root_client,
|
||||||
|
get_iam_alt_root_client,
|
||||||
|
get_new_bucket_name,
|
||||||
|
get_prefix,
|
||||||
|
nuke_prefixed_buckets,
|
||||||
|
)
|
||||||
|
from .iam import iam_root, iam_alt_root
|
||||||
|
from .utils import assert_raises, _get_status_and_error_code
|
||||||
|
|
||||||
|
def get_new_topic_name():
|
||||||
|
return get_new_bucket_name()
|
||||||
|
|
||||||
|
def nuke_topics(client, prefix):
|
||||||
|
p = client.get_paginator('list_topics')
|
||||||
|
for response in p.paginate():
|
||||||
|
for topic in response['Topics']:
|
||||||
|
arn = topic['TopicArn']
|
||||||
|
if prefix not in arn:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
client.delete_topic(TopicArn=arn)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sns(iam_root):
|
||||||
|
client = get_iam_root_client(service_name='sns')
|
||||||
|
yield client
|
||||||
|
nuke_topics(client, get_prefix())
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sns_alt(iam_alt_root):
|
||||||
|
client = get_iam_alt_root_client(service_name='sns')
|
||||||
|
yield client
|
||||||
|
nuke_topics(client, get_prefix())
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def s3(iam_root):
|
||||||
|
client = get_iam_root_client(service_name='s3')
|
||||||
|
yield client
|
||||||
|
nuke_prefixed_buckets(get_prefix(), client)
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def s3_alt(iam_alt_root):
|
||||||
|
client = get_iam_alt_root_client(service_name='s3')
|
||||||
|
yield client
|
||||||
|
nuke_prefixed_buckets(get_prefix(), client)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.iam_account
|
||||||
|
@pytest.mark.sns
|
||||||
|
def test_account_topic(sns):
|
||||||
|
name = get_new_topic_name()
|
||||||
|
|
||||||
|
response = sns.create_topic(Name=name)
|
||||||
|
arn = response['TopicArn']
|
||||||
|
assert arn.startswith('arn:aws:sns:')
|
||||||
|
assert arn.endswith(f':{name}')
|
||||||
|
|
||||||
|
response = sns.list_topics()
|
||||||
|
assert arn in [p['TopicArn'] for p in response['Topics']]
|
||||||
|
|
||||||
|
sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue='')
|
||||||
|
|
||||||
|
response = sns.get_topic_attributes(TopicArn=arn)
|
||||||
|
assert 'Attributes' in response
|
||||||
|
|
||||||
|
sns.delete_topic(TopicArn=arn)
|
||||||
|
|
||||||
|
response = sns.list_topics()
|
||||||
|
assert arn not in [p['TopicArn'] for p in response['Topics']]
|
||||||
|
|
||||||
|
with pytest.raises(sns.exceptions.NotFoundException):
|
||||||
|
sns.get_topic_attributes(TopicArn=arn)
|
||||||
|
sns.delete_topic(TopicArn=arn)
|
||||||
|
|
||||||
|
@pytest.mark.iam_account
|
||||||
|
@pytest.mark.sns
|
||||||
|
def test_cross_account_topic(sns, sns_alt):
|
||||||
|
name = get_new_topic_name()
|
||||||
|
arn = sns.create_topic(Name=name)['TopicArn']
|
||||||
|
|
||||||
|
# not visible to any alt user apis
|
||||||
|
with pytest.raises(sns.exceptions.NotFoundException):
|
||||||
|
sns_alt.get_topic_attributes(TopicArn=arn)
|
||||||
|
with pytest.raises(sns.exceptions.NotFoundException):
|
||||||
|
sns_alt.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue='')
|
||||||
|
|
||||||
|
# delete returns success
|
||||||
|
sns_alt.delete_topic(TopicArn=arn)
|
||||||
|
|
||||||
|
response = sns_alt.list_topics()
|
||||||
|
assert arn not in [p['TopicArn'] for p in response['Topics']]
|
||||||
|
|
||||||
|
@pytest.mark.iam_account
|
||||||
|
@pytest.mark.sns
|
||||||
|
def test_account_topic_publish(sns, s3):
|
||||||
|
name = get_new_topic_name()
|
||||||
|
|
||||||
|
response = sns.create_topic(Name=name)
|
||||||
|
topic_arn = response['TopicArn']
|
||||||
|
|
||||||
|
bucket = get_new_bucket_name()
|
||||||
|
s3.create_bucket(Bucket=bucket)
|
||||||
|
|
||||||
|
config = {'TopicConfigurations': [{
|
||||||
|
'Id': 'id',
|
||||||
|
'TopicArn': topic_arn,
|
||||||
|
'Events': [ 's3:ObjectCreated:*' ],
|
||||||
|
}]}
|
||||||
|
s3.put_bucket_notification_configuration(
|
||||||
|
Bucket=bucket, NotificationConfiguration=config)
|
||||||
|
|
||||||
|
@pytest.mark.iam_account
|
||||||
|
@pytest.mark.iam_cross_account
|
||||||
|
@pytest.mark.sns
|
||||||
|
def test_cross_account_topic_publish(sns, s3_alt, iam_alt_root):
|
||||||
|
name = get_new_topic_name()
|
||||||
|
|
||||||
|
response = sns.create_topic(Name=name)
|
||||||
|
topic_arn = response['TopicArn']
|
||||||
|
|
||||||
|
bucket = get_new_bucket_name()
|
||||||
|
s3_alt.create_bucket(Bucket=bucket)
|
||||||
|
|
||||||
|
config = {'TopicConfigurations': [{
|
||||||
|
'Id': 'id',
|
||||||
|
'TopicArn': topic_arn,
|
||||||
|
'Events': [ 's3:ObjectCreated:*' ],
|
||||||
|
}]}
|
||||||
|
|
||||||
|
# expect AccessDenies because no resource policy allows cross-account access
|
||||||
|
e = assert_raises(ClientError, s3_alt.put_bucket_notification_configuration,
|
||||||
|
Bucket=bucket, NotificationConfiguration=config)
|
||||||
|
status, error_code = _get_status_and_error_code(e.response)
|
||||||
|
assert status == 403
|
||||||
|
assert error_code == 'AccessDenied'
|
||||||
|
|
||||||
|
# add topic policy to allow the alt user
|
||||||
|
alt_principal = iam_alt_root.get_user()['User']['Arn']
|
||||||
|
policy = json.dumps({
|
||||||
|
'Version': '2012-10-17',
|
||||||
|
'Statement': [{
|
||||||
|
'Effect': 'Allow',
|
||||||
|
'Principal': {'AWS': alt_principal},
|
||||||
|
'Action': 'sns:Publish',
|
||||||
|
'Resource': topic_arn
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
sns.set_topic_attributes(TopicArn=topic_arn, AttributeName='Policy',
|
||||||
|
AttributeValue=policy)
|
||||||
|
|
||||||
|
s3_alt.put_bucket_notification_configuration(
|
||||||
|
Bucket=bucket, NotificationConfiguration=config)
|
2104
s3tests_boto3/functional/test_sts.py
Normal file
2104
s3tests_boto3/functional/test_sts.py
Normal file
File diff suppressed because it is too large
Load diff
9
s3tests_boto3/functional/test_utils.py
Normal file
9
s3tests_boto3/functional/test_utils.py
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
def test_generate():
|
||||||
|
FIVE_MB = 5 * 1024 * 1024
|
||||||
|
assert len(''.join(utils.generate_random(0))) == 0
|
||||||
|
assert len(''.join(utils.generate_random(1))) == 1
|
||||||
|
assert len(''.join(utils.generate_random(FIVE_MB - 1))) == FIVE_MB - 1
|
||||||
|
assert len(''.join(utils.generate_random(FIVE_MB))) == FIVE_MB
|
||||||
|
assert len(''.join(utils.generate_random(FIVE_MB + 1))) == FIVE_MB + 1
|
47
s3tests_boto3/functional/utils.py
Normal file
47
s3tests_boto3/functional/utils.py
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
import random
|
||||||
|
import requests
|
||||||
|
import string
|
||||||
|
import time
|
||||||
|
|
||||||
|
def assert_raises(excClass, callableObj, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Like unittest.TestCase.assertRaises, but returns the exception.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
callableObj(*args, **kwargs)
|
||||||
|
except excClass as e:
|
||||||
|
return e
|
||||||
|
else:
|
||||||
|
if hasattr(excClass, '__name__'):
|
||||||
|
excName = excClass.__name__
|
||||||
|
else:
|
||||||
|
excName = str(excClass)
|
||||||
|
raise AssertionError("%s not raised" % excName)
|
||||||
|
|
||||||
|
def generate_random(size, part_size=5*1024*1024):
|
||||||
|
"""
|
||||||
|
Generate the specified number random data.
|
||||||
|
(actually each MB is a repetition of the first KB)
|
||||||
|
"""
|
||||||
|
chunk = 1024
|
||||||
|
allowed = string.ascii_letters
|
||||||
|
for x in range(0, size, part_size):
|
||||||
|
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
|
||||||
|
s = ''
|
||||||
|
left = size - x
|
||||||
|
this_part_size = min(left, part_size)
|
||||||
|
for y in range(this_part_size // chunk):
|
||||||
|
s = s + strpart
|
||||||
|
s = s + strpart[:(this_part_size % chunk)]
|
||||||
|
yield s
|
||||||
|
if (x == size):
|
||||||
|
return
|
||||||
|
|
||||||
|
def _get_status(response):
|
||||||
|
status = response['ResponseMetadata']['HTTPStatusCode']
|
||||||
|
return status
|
||||||
|
|
||||||
|
def _get_status_and_error_code(response):
|
||||||
|
status = response['ResponseMetadata']['HTTPStatusCode']
|
||||||
|
error_code = response['Error']['Code']
|
||||||
|
return status, error_code
|
14
setup.py
14
setup.py
|
@ -14,20 +14,10 @@ setup(
|
||||||
|
|
||||||
install_requires=[
|
install_requires=[
|
||||||
'boto >=2.0b4',
|
'boto >=2.0b4',
|
||||||
|
'boto3 >=1.0.0',
|
||||||
'PyYAML',
|
'PyYAML',
|
||||||
'bunch >=1.0.0',
|
'munch >=2.0.0',
|
||||||
'gevent >=1.0',
|
'gevent >=1.0',
|
||||||
'isodate >=0.4.4',
|
'isodate >=0.4.4',
|
||||||
],
|
],
|
||||||
|
|
||||||
entry_points={
|
|
||||||
'console_scripts': [
|
|
||||||
's3tests-generate-objects = s3tests.generate_objects:main',
|
|
||||||
's3tests-test-readwrite = s3tests.readwrite:main',
|
|
||||||
's3tests-test-roundtrip = s3tests.roundtrip:main',
|
|
||||||
's3tests-fuzz-headers = s3tests.fuzz.headers:main',
|
|
||||||
's3tests-analysis-rwstats = s3tests.analysis.rwstats:main',
|
|
||||||
],
|
|
||||||
},
|
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
382
siege.conf
382
siege.conf
|
@ -1,382 +0,0 @@
|
||||||
# Updated by Siege 2.69, May-24-2010
|
|
||||||
# Copyright 2000-2007 by Jeffrey Fulmer, et al.
|
|
||||||
#
|
|
||||||
# Siege configuration file -- edit as necessary
|
|
||||||
# For more information about configuring and running
|
|
||||||
# this program, visit: http://www.joedog.org/
|
|
||||||
|
|
||||||
#
|
|
||||||
# Variable declarations. You can set variables here
|
|
||||||
# for use in the directives below. Example:
|
|
||||||
# PROXY = proxy.joedog.org
|
|
||||||
# Reference variables inside ${} or $(), example:
|
|
||||||
# proxy-host = ${PROXY}
|
|
||||||
# You can also reference ENVIRONMENT variables without
|
|
||||||
# actually declaring them, example:
|
|
||||||
# logfile = $(HOME)/var/siege.log
|
|
||||||
|
|
||||||
#
|
|
||||||
# Signify verbose mode, true turns on verbose output
|
|
||||||
# ex: verbose = true|false
|
|
||||||
#
|
|
||||||
verbose = true
|
|
||||||
|
|
||||||
#
|
|
||||||
# CSV Verbose format: with this option, you can choose
|
|
||||||
# to format verbose output in traditional siege format
|
|
||||||
# or comma separated format. The latter will allow you
|
|
||||||
# to redirect output to a file for import into a spread
|
|
||||||
# sheet, i.e., siege > file.csv
|
|
||||||
# ex: csv = true|false (default false)
|
|
||||||
#
|
|
||||||
csv = true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Full URL verbose format: By default siege displays
|
|
||||||
# the URL path and not the full URL. With this option,
|
|
||||||
# you # can instruct siege to show the complete URL.
|
|
||||||
# ex: fullurl = true|false (default false)
|
|
||||||
#
|
|
||||||
# fullurl = true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Display id: in verbose mode, display the siege user
|
|
||||||
# id associated with the HTTP transaction information
|
|
||||||
# ex: display-id = true|false
|
|
||||||
#
|
|
||||||
# display-id =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Show logfile location. By default, siege displays the
|
|
||||||
# logfile location at the end of every run when logging
|
|
||||||
# You can turn this message off with this directive.
|
|
||||||
# ex: show-logfile = false
|
|
||||||
#
|
|
||||||
show-logfile = true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Default logging status, true turns logging on.
|
|
||||||
# ex: logging = true|false
|
|
||||||
#
|
|
||||||
logging = true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Logfile, the default siege logfile is $PREFIX/var/siege.log
|
|
||||||
# This directive allows you to choose an alternative log file.
|
|
||||||
# Environment variables may be used as shown in the examples:
|
|
||||||
# ex: logfile = /home/jeff/var/log/siege.log
|
|
||||||
# logfile = ${HOME}/var/log/siege.log
|
|
||||||
# logfile = ${LOGFILE}
|
|
||||||
#
|
|
||||||
logfile = ./siege.log
|
|
||||||
|
|
||||||
#
|
|
||||||
# HTTP protocol. Options HTTP/1.1 and HTTP/1.0.
|
|
||||||
# Some webservers have broken implementation of the
|
|
||||||
# 1.1 protocol which skews throughput evaluations.
|
|
||||||
# If you notice some siege clients hanging for
|
|
||||||
# extended periods of time, change this to HTTP/1.0
|
|
||||||
# ex: protocol = HTTP/1.1
|
|
||||||
# protocol = HTTP/1.0
|
|
||||||
#
|
|
||||||
protocol = HTTP/1.1
|
|
||||||
|
|
||||||
#
|
|
||||||
# Chunked encoding is required by HTTP/1.1 protocol
|
|
||||||
# but siege allows you to turn it off as desired.
|
|
||||||
#
|
|
||||||
# ex: chunked = true
|
|
||||||
#
|
|
||||||
chunked = true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Cache revalidation.
|
|
||||||
# Siege supports cache revalidation for both ETag and
|
|
||||||
# Last-modified headers. If a copy is still fresh, the
|
|
||||||
# server responds with 304.
|
|
||||||
# HTTP/1.1 200 0.00 secs: 2326 bytes ==> /apache_pb.gif
|
|
||||||
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
|
|
||||||
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
|
|
||||||
#
|
|
||||||
# ex: cache = true
|
|
||||||
#
|
|
||||||
cache = false
|
|
||||||
|
|
||||||
#
|
|
||||||
# Connection directive. Options "close" and "keep-alive"
|
|
||||||
# Starting with release 2.57b3, siege implements persistent
|
|
||||||
# connections in accordance to RFC 2068 using both chunked
|
|
||||||
# encoding and content-length directives to determine the
|
|
||||||
# page size. To run siege with persistent connections set
|
|
||||||
# the connection directive to keep-alive. (Default close)
|
|
||||||
# CAUTION: use the keep-alive directive with care.
|
|
||||||
# DOUBLE CAUTION: this directive does not work well on HPUX
|
|
||||||
# TRIPLE CAUTION: don't use keep-alives until further notice
|
|
||||||
# ex: connection = close
|
|
||||||
# connection = keep-alive
|
|
||||||
#
|
|
||||||
connection = close
|
|
||||||
|
|
||||||
#
|
|
||||||
# Default number of simulated concurrent users
|
|
||||||
# ex: concurrent = 25
|
|
||||||
#
|
|
||||||
concurrent = 15
|
|
||||||
|
|
||||||
#
|
|
||||||
# Default duration of the siege. The right hand argument has
|
|
||||||
# a modifier which specifies the time units, H=hours, M=minutes,
|
|
||||||
# and S=seconds. If a modifier is not specified, then minutes
|
|
||||||
# are assumed.
|
|
||||||
# ex: time = 50M
|
|
||||||
#
|
|
||||||
# time =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Repetitions. The length of siege may be specified in client
|
|
||||||
# reps rather then a time duration. Instead of specifying a time
|
|
||||||
# span, you can tell each siege instance to hit the server X number
|
|
||||||
# of times. So if you chose 'reps = 20' and you've selected 10
|
|
||||||
# concurrent users, then siege will hit the server 200 times.
|
|
||||||
# ex: reps = 20
|
|
||||||
#
|
|
||||||
# reps =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Default URLs file, set at configuration time, the default
|
|
||||||
# file is PREFIX/etc/urls.txt. So if you configured siege
|
|
||||||
# with --prefix=/usr/local then the urls.txt file is installed
|
|
||||||
# int /usr/local/etc/urls.txt. Use the "file = " directive to
|
|
||||||
# configure an alternative URLs file. You may use environment
|
|
||||||
# variables as shown in the examples below:
|
|
||||||
# ex: file = /export/home/jdfulmer/MYURLS.txt
|
|
||||||
# file = $HOME/etc/urls.txt
|
|
||||||
# file = $URLSFILE
|
|
||||||
#
|
|
||||||
file = ./urls.txt
|
|
||||||
|
|
||||||
#
|
|
||||||
# Default URL, this is a single URL that you want to test. This
|
|
||||||
# is usually set at the command line with the -u option. When
|
|
||||||
# used, this option overrides the urls.txt (-f FILE/--file=FILE)
|
|
||||||
# option. You will HAVE to comment this out for in order to use
|
|
||||||
# the urls.txt file option.
|
|
||||||
# ex: url = https://shemp.whoohoo.com/docs/index.jsp
|
|
||||||
#
|
|
||||||
# url =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Default delay value, see the siege(1) man page.
|
|
||||||
# This value is used for load testing, it is not used
|
|
||||||
# for benchmarking.
|
|
||||||
# ex: delay = 3
|
|
||||||
#
|
|
||||||
delay = 1
|
|
||||||
|
|
||||||
#
|
|
||||||
# Connection timeout value. Set the value in seconds for
|
|
||||||
# socket connection timeouts. The default value is 30 seconds.
|
|
||||||
# ex: timeout = 30
|
|
||||||
#
|
|
||||||
# timeout =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Session expiration: This directive allows you to delete all
|
|
||||||
# cookies after you pass through the URLs. This means siege will
|
|
||||||
# grab a new session with each run through its URLs. The default
|
|
||||||
# value is false.
|
|
||||||
# ex: expire-session = true
|
|
||||||
#
|
|
||||||
# expire-session =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Failures: This is the number of total connection failures allowed
|
|
||||||
# before siege aborts. Connection failures (timeouts, socket failures,
|
|
||||||
# etc.) are combined with 400 and 500 level errors in the final stats,
|
|
||||||
# but those errors do not count against the abort total. If you set
|
|
||||||
# this total to 10, then siege will abort after ten socket timeouts,
|
|
||||||
# but it will NOT abort after ten 404s. This is designed to prevent
|
|
||||||
# a run-away mess on an unattended siege. The default value is 1024
|
|
||||||
# ex: failures = 50
|
|
||||||
#
|
|
||||||
# failures =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Internet simulation. If true, siege clients will hit
|
|
||||||
# the URLs in the urls.txt file randomly, thereby simulating
|
|
||||||
# internet usage. If false, siege will run through the
|
|
||||||
# urls.txt file in order from first to last and back again.
|
|
||||||
# ex: internet = true
|
|
||||||
#
|
|
||||||
internet = false
|
|
||||||
|
|
||||||
#
|
|
||||||
# Default benchmarking value, If true, there is NO delay
|
|
||||||
# between server requests, siege runs as fast as the web
|
|
||||||
# server and the network will let it. Set this to false
|
|
||||||
# for load testing.
|
|
||||||
# ex: benchmark = true
|
|
||||||
#
|
|
||||||
benchmark = false
|
|
||||||
|
|
||||||
#
|
|
||||||
# Set the siege User-Agent to identify yourself at the
|
|
||||||
# host, the default is: JoeDog/1.00 [en] (X11; I; Siege #.##)
|
|
||||||
# But that wreaks of corporate techno speak. Feel free
|
|
||||||
# to make it more interesting :-) Since Limey is recovering
|
|
||||||
# from minor surgery as I write this, I'll dedicate the
|
|
||||||
# example to him...
|
|
||||||
# ex: user-agent = Limey The Bulldog
|
|
||||||
#
|
|
||||||
# user-agent =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Accept-encoding. This option allows you to specify
|
|
||||||
# acceptable encodings returned by the server. Use this
|
|
||||||
# directive to turn on compression. By default we accept
|
|
||||||
# gzip compression.
|
|
||||||
#
|
|
||||||
# ex: accept-encoding = *
|
|
||||||
# accept-encoding = gzip
|
|
||||||
# accept-encoding = compress;q=0.5;gzip;q=1
|
|
||||||
accept-encoding = gzip
|
|
||||||
|
|
||||||
#
|
|
||||||
# TURN OFF THAT ANNOYING SPINNER!
|
|
||||||
# Siege spawns a thread and runs a spinner to entertain you
|
|
||||||
# as it collects and computes its stats. If you don't like
|
|
||||||
# this feature, you may turn it off here.
|
|
||||||
# ex: spinner = false
|
|
||||||
#
|
|
||||||
spinner = true
|
|
||||||
|
|
||||||
#
|
|
||||||
# WWW-Authenticate login. When siege hits a webpage
|
|
||||||
# that requires basic authentication, it will search its
|
|
||||||
# logins for authentication which matches the specific realm
|
|
||||||
# requested by the server. If it finds a match, it will send
|
|
||||||
# that login information. If it fails to match the realm, it
|
|
||||||
# will send the default login information. (Default is "all").
|
|
||||||
# You may configure siege with several logins as long as no
|
|
||||||
# two realms match. The format for logins is:
|
|
||||||
# username:password[:realm] where "realm" is optional.
|
|
||||||
# If you do not supply a realm, then it will default to "all"
|
|
||||||
# ex: login = jdfulmer:topsecret:Admin
|
|
||||||
# login = jeff:supersecret
|
|
||||||
#
|
|
||||||
# login =
|
|
||||||
|
|
||||||
#
|
|
||||||
# WWW-Authenticate username and password. When siege
|
|
||||||
# hits a webpage that requires authentication, it will
|
|
||||||
# send this user name and password to the server. Note
|
|
||||||
# this is NOT form based authentication. You will have
|
|
||||||
# to construct URLs for that.
|
|
||||||
# ex: username = jdfulmer
|
|
||||||
# password = whoohoo
|
|
||||||
#
|
|
||||||
# username =
|
|
||||||
# password =
|
|
||||||
|
|
||||||
#
|
|
||||||
# ssl-cert
|
|
||||||
# This optional feature allows you to specify a path to a client
|
|
||||||
# certificate. It is not neccessary to specify a certificate in
|
|
||||||
# order to use https. If you don't know why you would want one,
|
|
||||||
# then you probably don't need this feature. Use openssl to
|
|
||||||
# generate a certificate and key with the following command:
|
|
||||||
# $ openssl req -nodes -new -days 365 -newkey rsa:1024 \
|
|
||||||
# -keyout key.pem -out cert.pem
|
|
||||||
# Specify a path to cert.pem as follows:
|
|
||||||
# ex: ssl-cert = /home/jeff/.certs/cert.pem
|
|
||||||
#
|
|
||||||
# ssl-cert =
|
|
||||||
|
|
||||||
#
|
|
||||||
# ssl-key
|
|
||||||
# Use this option to specify the key you generated with the command
|
|
||||||
# above. ex: ssl-key = /home/jeff/.certs/key.pem
|
|
||||||
# You may actually skip this option and combine both your cert and
|
|
||||||
# your key in a single file:
|
|
||||||
# $ cat key.pem > client.pem
|
|
||||||
# $ cat cert.pem >> client.pem
|
|
||||||
# Now set the path for ssl-cert:
|
|
||||||
# ex: ssl-cert = /home/jeff/.certs/client.pem
|
|
||||||
# (in this scenario, you comment out ssl-key)
|
|
||||||
#
|
|
||||||
# ssl-key =
|
|
||||||
|
|
||||||
#
|
|
||||||
# ssl-timeout
|
|
||||||
# This option sets a connection timeout for the ssl library
|
|
||||||
# ex: ssl-timeout = 30
|
|
||||||
#
|
|
||||||
# ssl-timeout =
|
|
||||||
|
|
||||||
#
|
|
||||||
# ssl-ciphers
|
|
||||||
# You can use this feature to select a specific ssl cipher
|
|
||||||
# for HTTPs. To view the ones available with your library run
|
|
||||||
# the following command: openssl ciphers
|
|
||||||
# ex: ssl-ciphers = EXP-RC4-MD5
|
|
||||||
#
|
|
||||||
# ssl-ciphers =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Login URL. This is the first URL to be hit by every siege
|
|
||||||
# client. This feature was designed to allow you to login to
|
|
||||||
# a server and establish a session. It will only be hit once
|
|
||||||
# so if you need to hit this URL more then once, make sure it
|
|
||||||
# also appears in your urls.txt file.
|
|
||||||
#
|
|
||||||
# ex: login-url = http://eos.haha.com/login.jsp POST name=jeff&pass=foo
|
|
||||||
#
|
|
||||||
# login-url =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Proxy protocol. This option allows you to select a proxy
|
|
||||||
# server stress testing. The proxy will request the URL(s)
|
|
||||||
# specified by -u"my.url.org" OR from the urls.txt file.
|
|
||||||
#
|
|
||||||
# ex: proxy-host = proxy.whoohoo.org
|
|
||||||
# proxy-port = 8080
|
|
||||||
#
|
|
||||||
# proxy-host =
|
|
||||||
# proxy-port =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Proxy-Authenticate. When scout hits a proxy server which
|
|
||||||
# requires username and password authentication, it will this
|
|
||||||
# username and password to the server. The format is username,
|
|
||||||
# password and optional realm each separated by a colon. You
|
|
||||||
# may enter more than one proxy-login as long as each one has
|
|
||||||
# a different realm. If you do not enter a realm, then scout
|
|
||||||
# will send that login information to all proxy challenges. If
|
|
||||||
# you have more than one proxy-login, then scout will attempt
|
|
||||||
# to match the login to the realm.
|
|
||||||
# ex: proxy-login: jeff:secret:corporate
|
|
||||||
# proxy-login: jeff:whoohoo
|
|
||||||
#
|
|
||||||
# proxy-login =
|
|
||||||
|
|
||||||
#
|
|
||||||
# Redirection support. This option allows to to control
|
|
||||||
# whether a Location: hint will be followed. Most users
|
|
||||||
# will want to follow redirection information, but sometimes
|
|
||||||
# it's desired to just get the Location information.
|
|
||||||
#
|
|
||||||
# ex: follow-location = false
|
|
||||||
#
|
|
||||||
# follow-location =
|
|
||||||
|
|
||||||
# Zero-length data. siege can be configured to disregard
|
|
||||||
# results in which zero bytes are read after the headers.
|
|
||||||
# Alternatively, such results can be counted in the final
|
|
||||||
# tally of outcomes.
|
|
||||||
#
|
|
||||||
# ex: zero-data-ok = false
|
|
||||||
#
|
|
||||||
# zero-data-ok =
|
|
||||||
|
|
||||||
#
|
|
||||||
# end of siegerc
|
|
9
tox.ini
Normal file
9
tox.ini
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
[tox]
|
||||||
|
envlist = py
|
||||||
|
|
||||||
|
[testenv]
|
||||||
|
deps = -rrequirements.txt
|
||||||
|
passenv =
|
||||||
|
S3TEST_CONF
|
||||||
|
S3_USE_SIGV4
|
||||||
|
commands = pytest {posargs}
|
Loading…
Reference in a new issue