Merge pull request #337 from ceph/wip-python-3-port

Wip python 3 port
This commit is contained in:
Casey Bodley 2020-01-15 08:48:54 -05:00 committed by GitHub
commit 13452bd25f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
41 changed files with 402 additions and 5104 deletions

View file

@ -4,56 +4,52 @@ set -e
virtualenv="virtualenv"
declare -a packages
if [ -f /etc/debian_version ]; then
packages=(debianutils python-pip python-virtualenv python-dev libevent-dev libffi-dev libxml2-dev libxslt-dev zlib1g-dev)
packages=(debianutils python3-pip python3-virtualenv python3-dev libevent-dev libffi-dev libxml2-dev libxslt-dev zlib1g-dev)
for package in ${packages[@]}; do
if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
# add a space after old values
missing="${missing:+$missing }$package"
fi
done
if [ -n "$missing" ]; then
echo "$0: missing required DEB packages. Installing via sudo." 1>&2
sudo apt-get -y install $missing
fi
else
packages=(which libevent-devel libffi-devel libxml2-devel libxslt-devel zlib-devel)
if [ -f /etc/fedora-release ]; then
packages+=(python2-pip python2-virtualenv python2-devel)
elif [ -f /etc/redhat-release ]; then
unset ${GREP_OPTIONS}
eval $(cat /etc/os-release | grep VERSION_ID)
if [ ${VERSION_ID:0:1} -lt 8 ]; then
packages+=(python-virtualenv python-devel)
else
packages+=(python2-virtualenv python2-devel)
virtualenv="virtualenv-2"
fi
fi
elif [ -f /etc/redhat-release ]; then
packages=(which python3-virtualenv python36-devel libevent-devel libffi-devel libxml2-devel libxslt-devel zlib-devel)
for package in ${packages[@]}; do
# When the package is python36-devel we change it to python3-devel on Fedora
if [[ ${package} == "python36-devel" && -f /etc/fedora-release ]]; then
package=python36
fi
if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
missing="${missing:+$missing }$package"
fi
done
if [ -n "$missing" ]; then
echo "$0: missing required RPM packages. Installing via sudo." 1>&2
echo "$0: Missing required RPM packages: ${missing}." 1>&2
sudo yum -y install $missing
fi
else
echo "s3-tests can only be run on Red Hat, Centos, Fedora, Ubunutu, or Debian platforms"
exit 1
fi
${virtualenv} --python=$(which python2) --no-site-packages --distribute virtualenv
# s3-tests only works on python 3.6 not newer versions of python3
${virtualenv} --python=$(which python3.6) --no-site-packages --distribute virtualenv
# avoid pip bugs
./virtualenv/bin/pip install --upgrade pip
./virtualenv/bin/pip3 install --upgrade pip
# slightly old version of setuptools; newer fails w/ requests 0.14.0
./virtualenv/bin/pip install setuptools==32.3.1
./virtualenv/bin/pip3 install setuptools==32.3.1
./virtualenv/bin/pip install -r requirements.txt
./virtualenv/bin/pip3 install -r requirements.txt
# forbid setuptools from using the network because it'll try to use
# easy_install, and we really wanted pip; next line will fail if pip
# requirements.txt does not match setup.py requirements -- sucky but
# good enough for now
./virtualenv/bin/python setup.py develop
./virtualenv/bin/python3 setup.py develop

View file

@ -1,569 +0,0 @@
#
# FUZZ testing uses a probabalistic grammar to generate
# pseudo-random requests which will be sent to a server
# over long periods of time, with the goal of turning up
# garbage-input and buffer-overflow sensitivities.
#
# Each state ...
# generates/chooses contents for variables
# chooses a next state (from a weighted set of options)
#
# A terminal state is one from which there are no successors,
# at which point a message is generated (from the variables)
# and sent to the server.
#
# The test program doesn't actually know (or care) what
# response should be returned ... since the goal is to
# crash the server.
#
start:
set:
garbage:
- '{random 10-3000 printable}'
- '{random 10-1000 binary}'
garbage_no_whitespace:
- '{random 10-3000 printable_no_whitespace}'
- '{random 10-1000 binary_no_whitespace}'
acl_header:
- 'private'
- 'public-read'
- 'public-read-write'
- 'authenticated-read'
- 'bucket-owner-read'
- 'bucket-owner-full-control'
- '{random 3000 letters}'
- '{random 100-1000 binary_no_whitespace}'
choices:
- bucket
- object
bucket:
set:
urlpath: '/{bucket}'
choices:
- 13 bucket_get
- 8 bucket_put
- 5 bucket_delete
- bucket_garbage_method
bucket_garbage_method:
set:
method:
- '{random 1-100 printable}'
- '{random 10-100 binary}'
bucket:
- '{bucket_readable}'
- '{bucket_not_readable}'
- '{bucket_writable}'
- '{bucket_not_writable}'
- '2 {garbage_no_whitespace}'
choices:
- bucket_get_simple
- bucket_get_filtered
- bucket_get_uploads
- bucket_put_create
- bucket_put_versioning
- bucket_put_simple
bucket_delete:
set:
method: DELETE
bucket:
- '{bucket_writable}'
- '{bucket_not_writable}'
- '2 {garbage_no_whitespace}'
query:
- null
- policy
- website
- '2 {garbage_no_whitespace}'
choices: []
bucket_get:
set:
method: GET
bucket:
- '{bucket_readable}'
- '{bucket_not_readable}'
- '2 {garbage_no_whitespace}'
choices:
- 11 bucket_get_simple
- bucket_get_filtered
- bucket_get_uploads
bucket_get_simple:
set:
query:
- acl
- policy
- location
- logging
- notification
- versions
- requestPayment
- versioning
- website
- '2 {garbage_no_whitespace}'
choices: []
bucket_get_uploads:
set:
delimiter:
- null
- '3 delimiter={garbage_no_whitespace}'
prefix:
- null
- '3 prefix={garbage_no_whitespace}'
key_marker:
- null
- 'key-marker={object_readable}'
- 'key-marker={object_not_readable}'
- 'key-marker={invalid_key}'
- 'key-marker={random 100-1000 printable_no_whitespace}'
max_uploads:
- null
- 'max-uploads={random 1-5 binary_no_whitespace}'
- 'max-uploads={random 1-1000 digits}'
upload_id_marker:
- null
- '3 upload-id-marker={random 0-1000 printable_no_whitespace}'
query:
- 'uploads'
- 'uploads&{delimiter}&{prefix}'
- 'uploads&{max_uploads}&{key_marker}&{upload_id_marker}'
- '2 {garbage_no_whitespace}'
choices: []
bucket_get_filtered:
set:
delimiter:
- 'delimiter={garbage_no_whitespace}'
prefix:
- 'prefix={garbage_no_whitespace}'
marker:
- 'marker={object_readable}'
- 'marker={object_not_readable}'
- 'marker={invalid_key}'
- 'marker={random 100-1000 printable_no_whitespace}'
max_keys:
- 'max-keys={random 1-5 binary_no_whitespace}'
- 'max-keys={random 1-1000 digits}'
query:
- null
- '{delimiter}&{prefix}'
- '{max-keys}&{marker}'
- '2 {garbage_no_whitespace}'
choices: []
bucket_put:
set:
bucket:
- '{bucket_writable}'
- '{bucket_not_writable}'
- '2 {garbage_no_whitespace}'
method: PUT
choices:
- bucket_put_simple
- bucket_put_create
- bucket_put_versioning
bucket_put_create:
set:
body:
- '2 {garbage}'
- '<CreateBucketConfiguration><LocationConstraint>{random 2-10 binary}</LocationConstraint></CreateBucketConfiguration>'
headers:
- ['0-5', 'x-amz-acl', '{acl_header}']
choices: []
bucket_put_versioning:
set:
body:
- '{garbage}'
- '4 <VersioningConfiguration>{versioning_status}{mfa_delete_body}</VersioningConfiguration>'
mfa_delete_body:
- null
- '<Status>{random 2-10 binary}</Status>'
- '<Status>{random 2000-3000 printable}</Status>'
versioning_status:
- null
- '<MfaDelete>{random 2-10 binary}</MfaDelete>'
- '<MfaDelete>{random 2000-3000 printable}</MfaDelete>'
mfa_header:
- '{random 10-1000 printable_no_whitespace} {random 10-1000 printable_no_whitespace}'
headers:
- ['0-1', 'x-amz-mfa', '{mfa_header}']
choices: []
bucket_put_simple:
set:
body:
- '{acl_body}'
- '{policy_body}'
- '{logging_body}'
- '{notification_body}'
- '{request_payment_body}'
- '{website_body}'
acl_body:
- null
- '<AccessControlPolicy>{owner}{acl}</AccessControlPolicy>'
owner:
- null
- '7 <Owner>{id}{display_name}</Owner>'
id:
- null
- '<ID>{random 10-200 binary}</ID>'
- '<ID>{random 1000-3000 printable}</ID>'
display_name:
- null
- '2 <DisplayName>{random 10-200 binary}</DisplayName>'
- '2 <DisplayName>{random 1000-3000 printable}</DisplayName>'
- '2 <DisplayName>{random 10-300 letters}@{random 10-300 letters}.{random 2-4 letters}</DisplayName>'
acl:
- null
- '10 <AccessControlList><Grant>{grantee}{permission}</Grant></AccessControlList>'
grantee:
- null
- '7 <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">{id}{display_name}</Grantee>'
permission:
- null
- '7 <Permission>{permission_value}</Permission>'
permission_value:
- '2 {garbage}'
- FULL_CONTROL
- WRITE
- WRITE_ACP
- READ
- READ_ACP
policy_body:
- null
- '2 {garbage}'
logging_body:
- null
- '<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />'
- '<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01"><LoggingEnabled>{bucket}{target_prefix}{target_grants}</LoggingEnabled></BucketLoggingStatus>'
target_prefix:
- null
- '<TargetPrefix>{random 10-1000 printable}</TargetPrefix>'
- '<TargetPrefix>{random 10-1000 binary}</TargetPrefix>'
target_grants:
- null
- '10 <TargetGrants><Grant>{grantee}{permission}</Grant></TargetGrants>'
notification_body:
- null
- '<NotificationConfiguration />'
- '2 <NotificationConfiguration><TopicConfiguration>{topic}{event}</TopicConfiguration></NotificationConfiguration>'
topic:
- null
- '2 <Topic>{garbage}</Topic>'
event:
- null
- '<Event>s3:ReducedRedundancyLostObject</Event>'
- '2 <Event>{garbage}</Event>'
request_payment_body:
- null
- '<RequestPaymentConfiguration xlmns="http://s3.amazonaws.com/doc/2006-03-01/"><Payer>{payer}</Payer></RequestPaymentConfiguration>'
payer:
- Requester
- BucketOwner
- '2 {garbage}'
website_body:
- null
- '<WebsiteConfiguration>{index_doc}{error_doc}{routing_rules}</WebsiteConfiguration>'
- '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{index_doc}{error_doc}{routing_rules}</WebsiteConfiguration>'
index_doc:
- null
- '<IndexDocument>{filename}</IndexDocument>'
- '<IndexDocument><Suffix>{filename}</Suffix></IndexDocument>'
filename:
- null
- '2 {garbage}'
- '{random 2-10 printable}.html'
- '{random 100-1000 printable}.html'
- '{random 100-1000 printable_no_whitespace}.html'
error_doc:
- null
- '<ErrorDocument>{filename}</ErrorDocument>'
- '<ErrorDocument><Key>{filename}</Key></ErrorDocument>'
routing_rules:
- null
- ['0-10', '<RoutingRules>{routing_rules_content}</RoutingRules>']
routing_rules_content:
- null
- ['0-1000', '<RoutingRule>{routing_rule}</RoutingRule>']
routing_rule:
- null
- ['0-2', '{routing_rule_condition}{routing_rule_redirect}']
routing_rule_condition:
- null
- ['0-10', '<Condition>{KeyPrefixEquals}{HttpErrorCodeReturnedEquals}</Condition>']
KeyPrefixEquals:
- null
- ['0-2', '<KeyPrefixEquals>{filename}</KeyPrefixEquals>']
HttpErrorCodeReturnedEquals:
- null
- ['0-2', '<HttpErrorCodeReturnedEquals>{HttpErrorCode}</HttpErrorCodeReturnedEquals>']
HttpErrorCode:
- null
- '2 {garbage}'
- '{random 1-10 digits}'
- '{random 1-100 printable}'
routing_rule_redirect:
- null
- '{protocol}{hostname}{ReplaceKeyPrefixWith}{ReplaceKeyWith}{HttpRedirectCode}'
protocol:
- null
- '<Protocol>http</Protocol>'
- '<Protocol>https</Protocol>'
- ['1-5', '<Protocol>{garbage}</Protocol>']
- ['1-5', '<Protocol>{filename}</Protocol>']
hostname:
- null
- ['1-5', '<HostHame>{hostname_val}</HostHame>']
- ['1-5', '<HostHame>{garbage}</HostHame>']
hostname_val:
- null
- '{random 1-255 printable_no_whitespace}'
- '{random 1-255 printable}'
- '{random 1-255 punctuation}'
- '{random 1-255 whitespace}'
- '{garbage}'
ReplaceKeyPrefixWith:
- null
- ['1-5', '<ReplaceKeyPrefixWith>{filename}</ReplaceKeyPrefixWith>']
HttpRedirectCode:
- null
- ['1-5', '<HttpRedirectCode>{random 1-10 digits}</HttpRedirectCode>']
- ['1-5', '<HttpRedirectCode>{random 1-100 printable}</HttpRedirectCode>']
- ['1-5', '<HttpRedirectCode>{filename}</HttpRedirectCode>']
choices: []
object:
set:
urlpath: '/{bucket}/{object}'
range_header:
- null
- 'bytes={random 1-2 digits}-{random 1-4 digits}'
- 'bytes={random 1-1000 binary_no_whitespace}'
if_modified_since_header:
- null
- '2 {garbage_no_whitespace}'
if_match_header:
- null
- '2 {garbage_no_whitespace}'
if_none_match_header:
- null
- '2 {garbage_no_whitespace}'
choices:
- object_delete
- object_get
- object_put
- object_head
- object_garbage_method
object_garbage_method:
set:
method:
- '{random 1-100 printable}'
- '{random 10-100 binary}'
bucket:
- '{bucket_readable}'
- '{bucket_not_readable}'
- '{bucket_writable}'
- '{bucket_not_writable}'
- '2 {garbage_no_whitespace}'
object:
- '{object_readable}'
- '{object_not_readable}'
- '{object_writable}'
- '{object_not_writable}'
- '2 {garbage_no_whitespace}'
choices:
- object_get_query
- object_get_head_simple
object_delete:
set:
method: DELETE
bucket:
- '5 {bucket_writable}'
- '{bucket_not_writable}'
- '{garbage_no_whitespace}'
object:
- '{object_writable}'
- '{object_not_writable}'
- '2 {garbage_no_whitespace}'
choices: []
object_get:
set:
method: GET
bucket:
- '5 {bucket_readable}'
- '{bucket_not_readable}'
- '{garbage_no_whitespace}'
object:
- '{object_readable}'
- '{object_not_readable}'
- '{garbage_no_whitespace}'
choices:
- 5 object_get_head_simple
- 2 object_get_query
object_get_query:
set:
query:
- 'torrent'
- 'acl'
choices: []
object_get_head_simple:
set: {}
headers:
- ['0-1', 'range', '{range_header}']
- ['0-1', 'if-modified-since', '{if_modified_since_header}']
- ['0-1', 'if-unmodified-since', '{if_modified_since_header}']
- ['0-1', 'if-match', '{if_match_header}']
- ['0-1', 'if-none-match', '{if_none_match_header}']
choices: []
object_head:
set:
method: HEAD
bucket:
- '5 {bucket_readable}'
- '{bucket_not_readable}'
- '{garbage_no_whitespace}'
object:
- '{object_readable}'
- '{object_not_readable}'
- '{garbage_no_whitespace}'
choices:
- object_get_head_simple
object_put:
set:
method: PUT
bucket:
- '5 {bucket_writable}'
- '{bucket_not_writable}'
- '{garbage_no_whitespace}'
object:
- '{object_writable}'
- '{object_not_writable}'
- '{garbage_no_whitespace}'
cache_control:
- null
- '{garbage_no_whitespace}'
- 'no-cache'
content_disposition:
- null
- '{garbage_no_whitespace}'
content_encoding:
- null
- '{garbage_no_whitespace}'
content_length:
- '{random 1-20 digits}'
- '{garbage_no_whitespace}'
content_md5:
- null
- '{garbage_no_whitespace}'
content_type:
- null
- 'binary/octet-stream'
- '{garbage_no_whitespace}'
expect:
- null
- '100-continue'
- '{garbage_no_whitespace}'
expires:
- null
- '{random 1-10000000 digits}'
- '{garbage_no_whitespace}'
meta_key:
- null
- 'foo'
- '{garbage_no_whitespace}'
meta_value:
- null
- '{garbage_no_whitespace}'
choices:
- object_put_simple
- object_put_acl
- object_put_copy
object_put_simple:
set: {}
headers:
- ['0-1', 'cache-control', '{cache_control}']
- ['0-1', 'content-disposition', '{content_disposition}']
- ['0-1', 'content-encoding', '{content_encoding}']
- ['0-1', 'content-length', '{content_length}']
- ['0-1', 'content-md5', '{content_md5}']
- ['0-1', 'content-type', '{content_type}']
- ['0-1', 'expect', '{expect}']
- ['0-1', 'expires', '{expires}']
- ['0-1', 'x-amz-acl', '{acl_header}']
- ['0-6', 'x-amz-meta-{meta_key}', '{meta_value}']
choices: []
object_put_acl:
set:
query: 'acl'
body:
- null
- '2 {garbage}'
- '<AccessControlPolicy>{owner}{acl}</AccessControlPolicy>'
owner:
- null
- '7 <Owner>{id}{display_name}</Owner>'
id:
- null
- '<ID>{random 10-200 binary}</ID>'
- '<ID>{random 1000-3000 printable}</ID>'
display_name:
- null
- '2 <DisplayName>{random 10-200 binary}</DisplayName>'
- '2 <DisplayName>{random 1000-3000 printable}</DisplayName>'
- '2 <DisplayName>{random 10-300 letters}@{random 10-300 letters}.{random 2-4 letters}</DisplayName>'
acl:
- null
- '10 <AccessControlList><Grant>{grantee}{permission}</Grant></AccessControlList>'
grantee:
- null
- '7 <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">{id}{display_name}</Grantee>'
permission:
- null
- '7 <Permission>{permission_value}</Permission>'
permission_value:
- '2 {garbage}'
- FULL_CONTROL
- WRITE
- WRITE_ACP
- READ
- READ_ACP
headers:
- ['0-1', 'cache-control', '{cache_control}']
- ['0-1', 'content-disposition', '{content_disposition}']
- ['0-1', 'content-encoding', '{content_encoding}']
- ['0-1', 'content-length', '{content_length}']
- ['0-1', 'content-md5', '{content_md5}']
- ['0-1', 'content-type', '{content_type}']
- ['0-1', 'expect', '{expect}']
- ['0-1', 'expires', '{expires}']
- ['0-1', 'x-amz-acl', '{acl_header}']
choices: []
object_put_copy:
set: {}
headers:
- ['1-1', 'x-amz-copy-source', '{source_object}']
- ['0-1', 'x-amz-acl', '{acl_header}']
- ['0-1', 'x-amz-metadata-directive', '{metadata_directive}']
- ['0-1', 'x-amz-copy-source-if-match', '{if_match_header}']
- ['0-1', 'x-amz-copy-source-if-none-match', '{if_none_match_header}']
- ['0-1', 'x-amz-copy-source-if-modified-since', '{if_modified_since_header}']
- ['0-1', 'x-amz-copy-source-if-unmodified-since', '{if_modified_since_header}']
choices: []

View file

@ -2,12 +2,11 @@ PyYAML
nose >=1.0.0
boto >=2.6.0
boto3 >=1.0.0
bunch >=1.0.0
munch >=2.0.0
# 0.14 switches to libev, that means bootstrap needs to change too
gevent >=1.0
isodate >=0.4.4
requests >=0.14.0
pytz >=2011k
ordereddict
httplib2
lxml

View file

@ -1,142 +0,0 @@
#!/usr/bin/python
import sys
import os
import yaml
import optparse
NANOSECONDS = int(1e9)
# Output stats in a format similar to siege
# see http://www.joedog.org/index/siege-home
OUTPUT_FORMAT = """Stats for type: [{type}]
Transactions: {trans:>11} hits
Availability: {avail:>11.2f} %
Elapsed time: {elapsed:>11.2f} secs
Data transferred: {data:>11.2f} MB
Response time: {resp_time:>11.2f} secs
Transaction rate: {trans_rate:>11.2f} trans/sec
Throughput: {data_rate:>11.2f} MB/sec
Concurrency: {conc:>11.2f}
Successful transactions: {trans_success:>11}
Failed transactions: {trans_fail:>11}
Longest transaction: {trans_long:>11.2f}
Shortest transaction: {trans_short:>11.2f}
"""
def parse_options():
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"-f", "--file", dest="input", metavar="FILE",
help="Name of input YAML file. Default uses sys.stdin")
parser.add_option(
"-v", "--verbose", dest="verbose", action="store_true",
help="Enable verbose output")
(options, args) = parser.parse_args()
if not options.input and os.isatty(sys.stdin.fileno()):
parser.error("option -f required if no data is provided "
"in stdin")
return (options, args)
def main():
(options, args) = parse_options()
total = {}
durations = {}
min_time = {}
max_time = {}
errors = {}
success = {}
calculate_stats(options, total, durations, min_time, max_time, errors,
success)
print_results(total, durations, min_time, max_time, errors, success)
def calculate_stats(options, total, durations, min_time, max_time, errors,
success):
print 'Calculating statistics...'
f = sys.stdin
if options.input:
f = file(options.input, 'r')
for item in yaml.safe_load_all(f):
type_ = item.get('type')
if type_ not in ('r', 'w'):
continue # ignore any invalid items
if 'error' in item:
errors[type_] = errors.get(type_, 0) + 1
continue # skip rest of analysis for this item
else:
success[type_] = success.get(type_, 0) + 1
# parse the item
data_size = item['chunks'][-1][0]
duration = item['duration']
start = item['start']
end = start + duration / float(NANOSECONDS)
if options.verbose:
print "[{type}] POSIX time: {start:>18.2f} - {end:<18.2f} " \
"{data:>11.2f} KB".format(
type=type_,
start=start,
end=end,
data=data_size / 1024.0, # convert to KB
)
# update time boundaries
prev = min_time.setdefault(type_, start)
if start < prev:
min_time[type_] = start
prev = max_time.setdefault(type_, end)
if end > prev:
max_time[type_] = end
# save the duration
if type_ not in durations:
durations[type_] = []
durations[type_].append(duration)
# add to running totals
total[type_] = total.get(type_, 0) + data_size
def print_results(total, durations, min_time, max_time, errors, success):
for type_ in total.keys():
trans_success = success.get(type_, 0)
trans_fail = errors.get(type_, 0)
trans = trans_success + trans_fail
avail = trans_success * 100.0 / trans
elapsed = max_time[type_] - min_time[type_]
data = total[type_] / 1024.0 / 1024.0 # convert to MB
resp_time = sum(durations[type_]) / float(NANOSECONDS) / \
len(durations[type_])
trans_rate = trans / elapsed
data_rate = data / elapsed
conc = trans_rate * resp_time
trans_long = max(durations[type_]) / float(NANOSECONDS)
trans_short = min(durations[type_]) / float(NANOSECONDS)
print OUTPUT_FORMAT.format(
type=type_,
trans_success=trans_success,
trans_fail=trans_fail,
trans=trans,
avail=avail,
elapsed=elapsed,
data=data,
resp_time=resp_time,
trans_rate=trans_rate,
data_rate=data_rate,
conc=conc,
trans_long=trans_long,
trans_short=trans_short,
)
if __name__ == '__main__':
main()

View file

@ -1,5 +1,5 @@
import boto.s3.connection
import bunch
import munch
import itertools
import os
import random
@ -11,8 +11,8 @@ from lxml import etree
from doctest import Example
from lxml.doctestcompare import LXMLOutputChecker
s3 = bunch.Bunch()
config = bunch.Bunch()
s3 = munch.Munch()
config = munch.Munch()
prefix = ''
bucket_counter = itertools.count(1)
@ -51,10 +51,10 @@ def nuke_bucket(bucket):
while deleted_cnt:
deleted_cnt = 0
for key in bucket.list():
print 'Cleaning bucket {bucket} key {key}'.format(
print('Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
))
key.set_canned_acl('private')
key.delete()
deleted_cnt += 1
@ -67,26 +67,26 @@ def nuke_bucket(bucket):
and e.body == ''):
e.error_code = 'AccessDenied'
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
print('GOT UNWANTED ERROR', e.error_code)
raise
# seems like we're not the owner of the bucket; ignore
pass
def nuke_prefixed_buckets():
for name, conn in s3.items():
print 'Cleaning buckets from connection {name}'.format(name=name)
for name, conn in list(s3.items()):
print('Cleaning buckets from connection {name}'.format(name=name))
for bucket in conn.get_all_buckets():
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
print('Cleaning bucket {bucket}'.format(bucket=bucket))
nuke_bucket(bucket)
print 'Done with cleanup of test buckets.'
print('Done with cleanup of test buckets.')
def read_config(fp):
config = bunch.Bunch()
config = munch.Munch()
g = yaml.safe_load_all(fp)
for new in g:
config.update(bunch.bunchify(new))
config.update(munch.Munchify(new))
return config
def connect(conf):
@ -97,7 +97,7 @@ def connect(conf):
access_key='aws_access_key_id',
secret_key='aws_secret_access_key',
)
kwargs = dict((mapping[k],v) for (k,v) in conf.iteritems() if k in mapping)
kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
#process calling_format argument
calling_formats = dict(
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
@ -105,7 +105,7 @@ def connect(conf):
vhost=boto.s3.connection.VHostCallingFormat(),
)
kwargs['calling_format'] = calling_formats['ordinary']
if conf.has_key('calling_format'):
if 'calling_format' in conf:
raw_calling_format = conf['calling_format']
try:
kwargs['calling_format'] = calling_formats[raw_calling_format]
@ -146,7 +146,7 @@ def setup():
raise RuntimeError("Empty Prefix! Aborting!")
defaults = config.s3.defaults
for section in config.s3.keys():
for section in list(config.s3.keys()):
if section == 'defaults':
continue
@ -258,9 +258,10 @@ def with_setup_kwargs(setup, teardown=None):
# yield _test_gen
def trim_xml(xml_str):
p = etree.XMLParser(remove_blank_text=True)
p = etree.XMLParser(encoding="utf-8", remove_blank_text=True)
xml_str = bytes(xml_str, "utf-8")
elem = etree.XML(xml_str, parser=p)
return etree.tostring(elem)
return etree.tostring(elem, encoding="unicode")
def normalize_xml(xml, pretty_print=True):
if xml is None:
@ -282,7 +283,7 @@ def normalize_xml(xml, pretty_print=True):
for parent in root.xpath('//*[./*]'): # Search for parent elements
parent[:] = sorted(parent,key=lambda x: x.tag)
xmlstr = etree.tostring(root, encoding="utf-8", xml_declaration=True, pretty_print=pretty_print)
xmlstr = etree.tostring(root, encoding="unicode", pretty_print=pretty_print)
# there are two different DTD URIs
xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)

View file

@ -1,5 +0,0 @@
from boto.auth_handler import AuthHandler
class AnonymousAuthHandler(AuthHandler):
def add_auth(self, http_request, **kwargs):
return # Nothing to do for anonymous access!

View file

@ -1,21 +1,20 @@
from __future__ import print_function
import sys
import ConfigParser
import configparser
import boto.exception
import boto.s3.connection
import bunch
import munch
import itertools
import os
import random
import string
from httplib import HTTPConnection, HTTPSConnection
from urlparse import urlparse
from http.client import HTTPConnection, HTTPSConnection
from urllib.parse import urlparse
from .utils import region_sync_meta
s3 = bunch.Bunch()
config = bunch.Bunch()
targets = bunch.Bunch()
s3 = munch.Munch()
config = munch.Munch()
targets = munch.Munch()
# this will be assigned by setup()
prefix = None
@ -69,7 +68,7 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
if bucket.name.startswith(prefix):
print('Cleaning bucket {bucket}'.format(bucket=bucket))
success = False
for i in xrange(2):
for i in range(2):
try:
try:
iterator = iter(bucket.list_versions())
@ -116,12 +115,12 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
def nuke_prefixed_buckets(prefix):
# If no regions are specified, use the simple method
if targets.main.master == None:
for name, conn in s3.items():
for name, conn in list(s3.items()):
print('Deleting buckets on {name}'.format(name=name))
nuke_prefixed_buckets_on_conn(prefix, name, conn)
else:
# First, delete all buckets on the master connection
for name, conn in s3.items():
for name, conn in list(s3.items()):
if conn == targets.main.master.connection:
print('Deleting buckets on {name} (master)'.format(name=name))
nuke_prefixed_buckets_on_conn(prefix, name, conn)
@ -131,7 +130,7 @@ def nuke_prefixed_buckets(prefix):
print('region-sync in nuke_prefixed_buckets')
# Now delete remaining buckets on any other connection
for name, conn in s3.items():
for name, conn in list(s3.items()):
if conn != targets.main.master.connection:
print('Deleting buckets on {name} (non-master)'.format(name=name))
nuke_prefixed_buckets_on_conn(prefix, name, conn)
@ -149,46 +148,46 @@ class TargetConfig:
self.sync_meta_wait = 0
try:
self.api_name = cfg.get(section, 'api_name')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
self.port = cfg.getint(section, 'port')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
try:
self.host=cfg.get(section, 'host')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
raise RuntimeError(
'host not specified for section {s}'.format(s=section)
)
try:
self.is_master=cfg.getboolean(section, 'is_master')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
try:
self.is_secure=cfg.getboolean(section, 'is_secure')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
try:
raw_calling_format = cfg.get(section, 'calling_format')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
raw_calling_format = 'ordinary'
try:
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
@ -208,7 +207,7 @@ class TargetConnection:
class RegionsInfo:
def __init__(self):
self.m = bunch.Bunch()
self.m = munch.Munch()
self.master = None
self.secondaries = []
@ -226,21 +225,21 @@ class RegionsInfo:
return self.m[name]
def get(self):
return self.m
def iteritems(self):
return self.m.iteritems()
def items(self):
return self.m.items()
regions = RegionsInfo()
class RegionsConn:
def __init__(self):
self.m = bunch.Bunch()
self.m = munch.Munch()
self.default = None
self.master = None
self.secondaries = []
def iteritems(self):
return self.m.iteritems()
def items(self):
return self.m.items()
def set_default(self, conn):
self.default = conn
@ -260,7 +259,7 @@ _multiprocess_can_split_ = True
def setup():
cfg = ConfigParser.RawConfigParser()
cfg = configparser.RawConfigParser()
try:
path = os.environ['S3TEST_CONF']
except KeyError:
@ -268,8 +267,7 @@ def setup():
'To run tests, point environment '
+ 'variable S3TEST_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
cfg.read(path)
global prefix
global targets
@ -277,19 +275,19 @@ def setup():
try:
template = cfg.get('fixtures', 'bucket prefix')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
template = 'test-{random}-'
prefix = choose_bucket_prefix(template=template)
try:
slow_backend = cfg.getboolean('fixtures', 'slow backend')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
slow_backend = False
# pull the default_region out, if it exists
try:
default_region = cfg.get('fixtures', 'default_region')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
default_region = None
s3.clear()
@ -315,7 +313,7 @@ def setup():
if len(regions.get()) == 0:
regions.add("default", TargetConfig(cfg, section))
config[name] = bunch.Bunch()
config[name] = munch.Munch()
for var in [
'user_id',
'display_name',
@ -329,12 +327,12 @@ def setup():
]:
try:
config[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
targets[name] = RegionsConn()
for (k, conf) in regions.iteritems():
for (k, conf) in regions.items():
conn = boto.s3.connection.S3Connection(
aws_access_key_id=cfg.get(section, 'access_key'),
aws_secret_access_key=cfg.get(section, 'secret_key'),
@ -475,7 +473,7 @@ def _make_raw_request(host, port, method, path, body=None, request_headers=None,
if request_headers is None:
request_headers = {}
c = class_(host, port, strict=True, timeout=timeout)
c = class_(host, port=port, timeout=timeout)
# TODO: We might have to modify this in future if we need to interact with
# how httplib.request handles Accept-Encoding and Host.

View file

@ -1,10 +1,9 @@
from cStringIO import StringIO
from io import StringIO
import boto.connection
import boto.exception
import boto.s3.connection
import boto.s3.acl
import boto.utils
import bunch
import nose
import operator
import random
@ -15,7 +14,7 @@ import os
import re
from email.utils import formatdate
from urlparse import urlparse
from urllib.parse import urlparse
from boto.s3.connection import S3Connection
@ -24,7 +23,6 @@ from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from .utils import assert_raises
import AnonymousAuth
from email.header import decode_header

View file

@ -1,9 +1,8 @@
from cStringIO import StringIO
from io import StringIO
import boto.exception
import boto.s3.connection
import boto.s3.acl
import boto.s3.lifecycle
import bunch
import datetime
import time
import email.utils
@ -16,7 +15,6 @@ import os
import requests
import base64
import hmac
import sha
import pytz
import json
import httplib2
@ -27,13 +25,13 @@ import random
import re
from collections import defaultdict
from urlparse import urlparse
from urllib.parse import urlparse
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import utils
from . import utils
from .utils import assert_raises
from .policy import Policy, Statement, make_json_policy
@ -117,7 +115,7 @@ def check_configure_versioning_retry(bucket, status, expected_string):
read_status = None
for i in xrange(5):
for i in range(5):
try:
read_status = bucket.get_versioning_status()['Versioning']
except KeyError:
@ -330,26 +328,26 @@ def generate_lifecycle_body(rules):
body = '<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration>'
for rule in rules:
body += '<Rule><ID>%s</ID><Status>%s</Status>' % (rule['ID'], rule['Status'])
if 'Prefix' in rule.keys():
if 'Prefix' in list(rule.keys()):
body += '<Prefix>%s</Prefix>' % rule['Prefix']
if 'Filter' in rule.keys():
if 'Filter' in list(rule.keys()):
prefix_str= '' # AWS supports empty filters
if 'Prefix' in rule['Filter'].keys():
if 'Prefix' in list(rule['Filter'].keys()):
prefix_str = '<Prefix>%s</Prefix>' % rule['Filter']['Prefix']
body += '<Filter>%s</Filter>' % prefix_str
if 'Expiration' in rule.keys():
if 'ExpiredObjectDeleteMarker' in rule['Expiration'].keys():
if 'Expiration' in list(rule.keys()):
if 'ExpiredObjectDeleteMarker' in list(rule['Expiration'].keys()):
body += '<Expiration><ExpiredObjectDeleteMarker>%s</ExpiredObjectDeleteMarker></Expiration>' \
% rule['Expiration']['ExpiredObjectDeleteMarker']
elif 'Date' in rule['Expiration'].keys():
elif 'Date' in list(rule['Expiration'].keys()):
body += '<Expiration><Date>%s</Date></Expiration>' % rule['Expiration']['Date']
else:
body += '<Expiration><Days>%d</Days></Expiration>' % rule['Expiration']['Days']
if 'NoncurrentVersionExpiration' in rule.keys():
if 'NoncurrentVersionExpiration' in list(rule.keys()):
body += '<NoncurrentVersionExpiration><NoncurrentDays>%d</NoncurrentDays></NoncurrentVersionExpiration>' % \
rule['NoncurrentVersionExpiration']['NoncurrentDays']
if 'NoncurrentVersionTransition' in rule.keys():
if 'NoncurrentVersionTransition' in list(rule.keys()):
for t in rule['NoncurrentVersionTransition']:
body += '<NoncurrentVersionTransition>'
body += '<NoncurrentDays>%d</NoncurrentDays>' % \
@ -357,7 +355,7 @@ def generate_lifecycle_body(rules):
body += '<StorageClass>%s</StorageClass>' % \
t['StorageClass']
body += '</NoncurrentVersionTransition>'
if 'AbortIncompleteMultipartUpload' in rule.keys():
if 'AbortIncompleteMultipartUpload' in list(rule.keys()):
body += '<AbortIncompleteMultipartUpload><DaysAfterInitiation>%d</DaysAfterInitiation>' \
'</AbortIncompleteMultipartUpload>' % rule['AbortIncompleteMultipartUpload']['DaysAfterInitiation']
body += '</Rule>'
@ -491,11 +489,11 @@ def generate_random(size, part_size=5*1024*1024):
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
for y in range(this_part_size // chunk):
s = s + strpart
if this_part_size > len(s):
s = s + strpart[0:this_part_size - len(s)]
@ -535,7 +533,7 @@ def _populate_key(bucket, keyname, size=7*1024*1024, storage_class=None):
key = bucket.new_key(keyname)
if storage_class:
key.storage_class = storage_class
data_str = str(generate_random(size, size).next())
data_str = str(next(generate_random(size, size)))
data = StringIO(data_str)
key.set_contents_from_file(fp=data)
return (key, data_str)
@ -754,7 +752,7 @@ class FakeFile(object):
"""
def __init__(self, char='A', interrupt=None):
self.offset = 0
self.char = char
self.char = bytes(char, 'utf-8')
self.interrupt = interrupt
def seek(self, offset, whence=os.SEEK_SET):
@ -801,7 +799,7 @@ class FakeFileVerifier(object):
if self.char == None:
self.char = data[0]
self.size += size
eq(data, self.char*size)
eq(data.decode(), self.char*size)
def _verify_atomic_key_data(key, size=-1, char=None):
"""

View file

@ -1,4 +1,4 @@
from __future__ import print_function
import sys
import collections
import nose
@ -8,7 +8,7 @@ from pprint import pprint
import time
import boto.exception
from urlparse import urlparse
from urllib.parse import urlparse
from nose.tools import eq_ as eq, ok_ as ok
from nose.plugins.attrib import attr
@ -110,7 +110,7 @@ def get_website_url(**kwargs):
def _test_website_populate_fragment(xml_fragment, fields):
for k in ['RoutingRules']:
if k in fields.keys() and len(fields[k]) > 0:
if k in list(fields.keys()) and len(fields[k]) > 0:
fields[k] = '<%s>%s</%s>' % (k, fields[k], k)
f = {
'IndexDocument_Suffix': choose_bucket_prefix(template='index-{random}.html', max_len=32),
@ -185,7 +185,7 @@ def __website_expected_reponse_status(res, status, reason):
def _website_expected_default_html(**kwargs):
fields = []
for k in kwargs.keys():
for k in list(kwargs.keys()):
# AmazonS3 seems to be inconsistent, some HTML errors include BucketName, but others do not.
if k is 'BucketName':
continue
@ -217,6 +217,7 @@ def _website_expected_error_response(res, bucket_name, status, reason, code, con
content = set([content])
for f in content:
if f is not IGNORE_FIELD and f is not None:
f = bytes(f, 'utf-8')
ok(f in body, 'HTML should contain "%s"' % (f, ))
def _website_expected_redirect_response(res, status, reason, new_url):
@ -237,7 +238,7 @@ def _website_request(bucket_name, path, connect_hostname=None, method='GET', tim
request_headers={}
request_headers['Host'] = o.hostname
request_headers['Accept'] = '*/*'
print('Request: {method} {path}\n{headers}'.format(method=method, path=path, headers=''.join(map(lambda t: t[0]+':'+t[1]+"\n", request_headers.items()))))
print('Request: {method} {path}\n{headers}'.format(method=method, path=path, headers=''.join([t[0]+':'+t[1]+"\n" for t in list(request_headers.items())])))
res = _make_raw_request(connect_hostname, config.main.port, method, path, request_headers=request_headers, secure=False, timeout=timeout)
for (k,v) in res.getheaders():
print(k,v)
@ -293,6 +294,7 @@ def test_website_public_bucket_list_public_index():
res = _website_request(bucket.name, '')
body = res.read()
print(body)
indexstring = bytes(indexstring, 'utf-8')
eq(body, indexstring) # default content should match index.html set content
__website_expected_reponse_status(res, 200, 'OK')
indexhtml.delete()
@ -321,6 +323,7 @@ def test_website_private_bucket_list_public_index():
__website_expected_reponse_status(res, 200, 'OK')
body = res.read()
print(body)
indexstring = bytes(indexstring, 'utf-8')
eq(body, indexstring, 'default content should match index.html set content')
indexhtml.delete()
bucket.delete()
@ -511,6 +514,7 @@ def test_website_private_bucket_list_empty_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should NOT match error.html set content')
errorhtml.delete()
@ -537,6 +541,7 @@ def test_website_public_bucket_list_empty_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should match error.html set content')
errorhtml.delete()
@ -568,6 +573,7 @@ def test_website_public_bucket_list_private_index_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should match error.html set content')
indexhtml.delete()
@ -600,6 +606,7 @@ def test_website_private_bucket_list_private_index_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should match error.html set content')
indexhtml.delete()
@ -1013,7 +1020,7 @@ ROUTING_RULES = {
""",
}
for k in ROUTING_RULES.keys():
for k in list(ROUTING_RULES.keys()):
if len(ROUTING_RULES[k]) > 0:
ROUTING_RULES[k] = "<!-- %s -->\n%s" % (k, ROUTING_RULES[k])
@ -1142,7 +1149,7 @@ def routing_check(*args, **kwargs):
#body = res.read()
#print(body)
#eq(body, args['content'], 'default content should match index.html set content')
ok(res.getheader('Content-Length', -1) > 0)
ok(int(res.getheader('Content-Length', -1)) > 0)
elif args['code'] >= 300 and args['code'] < 400:
_website_expected_redirect_response(res, args['code'], IGNORE_FIELD, new_url)
elif args['code'] >= 400:

View file

@ -1,6 +1,6 @@
from nose.tools import eq_ as eq
import utils
from . import utils
def test_generate():
FIVE_MB = 5 * 1024 * 1024

View file

@ -28,11 +28,11 @@ def generate_random(size, part_size=5*1024*1024):
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
for y in range(this_part_size // chunk):
s = s + strpart
s = s + strpart[:(this_part_size % chunk)]
yield s
@ -42,7 +42,7 @@ def generate_random(size, part_size=5*1024*1024):
# syncs all the regions except for the one passed in
def region_sync_meta(targets, region):
for (k, r) in targets.iteritems():
for (k, r) in targets.items():
if r == region:
continue
conf = r.conf

View file

@ -1,376 +0,0 @@
from boto.s3.connection import S3Connection
from boto.exception import BotoServerError
from boto.s3.key import Key
from httplib import BadStatusLine
from optparse import OptionParser
from .. import common
import traceback
import itertools
import random
import string
import struct
import yaml
import sys
import re
class DecisionGraphError(Exception):
""" Raised when a node in a graph tries to set a header or
key that was previously set by another node
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RecursionError(Exception):
"""Runaway recursion in string formatting"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return '{0.__doc__}: {0.msg!r}'.format(self)
def assemble_decision(decision_graph, prng):
""" Take in a graph describing the possible decision space and a random
number generator and traverse the graph to build a decision
"""
return descend_graph(decision_graph, 'start', prng)
def descend_graph(decision_graph, node_name, prng):
""" Given a graph and a particular node in that graph, set the values in
the node's "set" list, pick a choice from the "choice" list, and
recurse. Finally, return dictionary of values
"""
node = decision_graph[node_name]
try:
choice = make_choice(node['choices'], prng)
if choice == '':
decision = {}
else:
decision = descend_graph(decision_graph, choice, prng)
except IndexError:
decision = {}
for key, choices in node['set'].iteritems():
if key in decision:
raise DecisionGraphError("Node %s tried to set '%s', but that key was already set by a lower node!" %(node_name, key))
decision[key] = make_choice(choices, prng)
if 'headers' in node:
decision.setdefault('headers', [])
for desc in node['headers']:
try:
(repetition_range, header, value) = desc
except ValueError:
(header, value) = desc
repetition_range = '1'
try:
size_min, size_max = repetition_range.split('-', 1)
except ValueError:
size_min = size_max = repetition_range
size_min = int(size_min)
size_max = int(size_max)
num_reps = prng.randint(size_min, size_max)
if header in [h for h, v in decision['headers']]:
raise DecisionGraphError("Node %s tried to add header '%s', but that header already exists!" %(node_name, header))
for _ in xrange(num_reps):
decision['headers'].append([header, value])
return decision
def make_choice(choices, prng):
""" Given a list of (possibly weighted) options or just a single option!,
choose one of the options taking weights into account and return the
choice
"""
if isinstance(choices, str):
return choices
weighted_choices = []
for option in choices:
if option is None:
weighted_choices.append('')
continue
try:
(weight, value) = option.split(None, 1)
weight = int(weight)
except ValueError:
weight = 1
value = option
if value == 'null' or value == 'None':
value = ''
for _ in xrange(weight):
weighted_choices.append(value)
return prng.choice(weighted_choices)
def expand_headers(decision, prng):
expanded_headers = {}
for header in decision['headers']:
h = expand(decision, header[0], prng)
v = expand(decision, header[1], prng)
expanded_headers[h] = v
return expanded_headers
def expand(decision, value, prng):
c = itertools.count()
fmt = RepeatExpandingFormatter(prng)
new = fmt.vformat(value, [], decision)
return new
class RepeatExpandingFormatter(string.Formatter):
charsets = {
'printable_no_whitespace': string.printable.translate(None, string.whitespace),
'printable': string.printable,
'punctuation': string.punctuation,
'whitespace': string.whitespace,
'digits': string.digits
}
def __init__(self, prng, _recursion=0):
super(RepeatExpandingFormatter, self).__init__()
# this class assumes it is always instantiated once per
# formatting; use that to detect runaway recursion
self.prng = prng
self._recursion = _recursion
def get_value(self, key, args, kwargs):
fields = key.split(None, 1)
fn = getattr(self, 'special_{name}'.format(name=fields[0]), None)
if fn is not None:
if len(fields) == 1:
fields.append('')
return fn(fields[1])
val = super(RepeatExpandingFormatter, self).get_value(key, args, kwargs)
if self._recursion > 5:
raise RecursionError(key)
fmt = self.__class__(self.prng, _recursion=self._recursion+1)
n = fmt.vformat(val, args, kwargs)
return n
def special_random(self, args):
arg_list = args.split()
try:
size_min, size_max = arg_list[0].split('-', 1)
except ValueError:
size_min = size_max = arg_list[0]
except IndexError:
size_min = '0'
size_max = '1000'
size_min = int(size_min)
size_max = int(size_max)
length = self.prng.randint(size_min, size_max)
try:
charset_arg = arg_list[1]
except IndexError:
charset_arg = 'printable'
if charset_arg == 'binary' or charset_arg == 'binary_no_whitespace':
num_bytes = length + 8
tmplist = [self.prng.getrandbits(64) for _ in xrange(num_bytes / 8)]
tmpstring = struct.pack((num_bytes / 8) * 'Q', *tmplist)
if charset_arg == 'binary_no_whitespace':
tmpstring = ''.join(c for c in tmpstring if c not in string.whitespace)
return tmpstring[0:length]
else:
charset = self.charsets[charset_arg]
return ''.join([self.prng.choice(charset) for _ in xrange(length)]) # Won't scale nicely
def parse_options():
parser = OptionParser()
parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
parser.add_option('--seed', dest='seed', type='int', help='initial seed for the random number generator')
parser.add_option('--seed-file', dest='seedfile', help='read seeds for specific requests from FILE', metavar='FILE')
parser.add_option('-n', dest='num_requests', type='int', help='issue NUM requests before stopping', metavar='NUM')
parser.add_option('-v', '--verbose', dest='verbose', action="store_true", help='turn on verbose output')
parser.add_option('-d', '--debug', dest='debug', action="store_true", help='turn on debugging (very verbose) output')
parser.add_option('--decision-graph', dest='graph_filename', help='file in which to find the request decision graph')
parser.add_option('--no-cleanup', dest='cleanup', action="store_false", help='turn off teardown so you can peruse the state of buckets after testing')
parser.set_defaults(num_requests=5)
parser.set_defaults(cleanup=True)
parser.set_defaults(graph_filename='request_decision_graph.yml')
return parser.parse_args()
def randomlist(seed=None):
""" Returns an infinite generator of random numbers
"""
rng = random.Random(seed)
while True:
yield rng.randint(0,100000) #100,000 seeds is enough, right?
def populate_buckets(conn, alt):
""" Creates buckets and keys for fuzz testing and sets appropriate
permissions. Returns a dictionary of the bucket and key names.
"""
breadable = common.get_new_bucket(alt)
bwritable = common.get_new_bucket(alt)
bnonreadable = common.get_new_bucket(alt)
oreadable = Key(breadable)
owritable = Key(bwritable)
ononreadable = Key(breadable)
oreadable.set_contents_from_string('oreadable body')
owritable.set_contents_from_string('owritable body')
ononreadable.set_contents_from_string('ononreadable body')
breadable.set_acl('public-read')
bwritable.set_acl('public-read-write')
bnonreadable.set_acl('private')
oreadable.set_acl('public-read')
owritable.set_acl('public-read-write')
ononreadable.set_acl('private')
return dict(
bucket_readable=breadable.name,
bucket_writable=bwritable.name,
bucket_not_readable=bnonreadable.name,
bucket_not_writable=breadable.name,
object_readable=oreadable.key,
object_writable=owritable.key,
object_not_readable=ononreadable.key,
object_not_writable=oreadable.key,
)
def _main():
""" The main script
"""
(options, args) = parse_options()
random.seed(options.seed if options.seed else None)
s3_connection = common.s3.main
alt_connection = common.s3.alt
if options.outfile:
OUT = open(options.outfile, 'w')
else:
OUT = sys.stderr
VERBOSE = DEBUG = open('/dev/null', 'w')
if options.verbose:
VERBOSE = OUT
if options.debug:
DEBUG = OUT
VERBOSE = OUT
request_seeds = None
if options.seedfile:
FH = open(options.seedfile, 'r')
request_seeds = [int(line) for line in FH if line != '\n']
print>>OUT, 'Seedfile: %s' %options.seedfile
print>>OUT, 'Number of requests: %d' %len(request_seeds)
else:
if options.seed:
print>>OUT, 'Initial Seed: %d' %options.seed
print>>OUT, 'Number of requests: %d' %options.num_requests
random_list = randomlist(options.seed)
request_seeds = itertools.islice(random_list, options.num_requests)
print>>OUT, 'Decision Graph: %s' %options.graph_filename
graph_file = open(options.graph_filename, 'r')
decision_graph = yaml.safe_load(graph_file)
constants = populate_buckets(s3_connection, alt_connection)
print>>VERBOSE, "Test Buckets/Objects:"
for key, value in constants.iteritems():
print>>VERBOSE, "\t%s: %s" %(key, value)
print>>OUT, "Begin Fuzzing..."
print>>VERBOSE, '='*80
for request_seed in request_seeds:
print>>VERBOSE, 'Seed is: %r' %request_seed
prng = random.Random(request_seed)
decision = assemble_decision(decision_graph, prng)
decision.update(constants)
method = expand(decision, decision['method'], prng)
path = expand(decision, decision['urlpath'], prng)
try:
body = expand(decision, decision['body'], prng)
except KeyError:
body = ''
try:
headers = expand_headers(decision, prng)
except KeyError:
headers = {}
print>>VERBOSE, "%r %r" %(method[:100], path[:100])
for h, v in headers.iteritems():
print>>VERBOSE, "%r: %r" %(h[:50], v[:50])
print>>VERBOSE, "%r\n" % body[:100]
print>>DEBUG, 'FULL REQUEST'
print>>DEBUG, 'Method: %r' %method
print>>DEBUG, 'Path: %r' %path
print>>DEBUG, 'Headers:'
for h, v in headers.iteritems():
print>>DEBUG, "\t%r: %r" %(h, v)
print>>DEBUG, 'Body: %r\n' %body
failed = False # Let's be optimistic, shall we?
try:
response = s3_connection.make_request(method, path, data=body, headers=headers, override_num_retries=1)
body = response.read()
except BotoServerError, e:
response = e
body = e.body
failed = True
except BadStatusLine, e:
print>>OUT, 'FAILED: failed to parse response (BadStatusLine); probably a NUL byte in your request?'
print>>VERBOSE, '='*80
continue
if failed:
print>>OUT, 'FAILED:'
OLD_VERBOSE = VERBOSE
OLD_DEBUG = DEBUG
VERBOSE = DEBUG = OUT
print>>VERBOSE, 'Seed was: %r' %request_seed
print>>VERBOSE, 'Response status code: %d %s' %(response.status, response.reason)
print>>DEBUG, 'Body:\n%s' %body
print>>VERBOSE, '='*80
if failed:
VERBOSE = OLD_VERBOSE
DEBUG = OLD_DEBUG
print>>OUT, '...done fuzzing'
if options.cleanup:
common.teardown()
def main():
common.setup()
try:
_main()
except Exception as e:
traceback.print_exc()
common.teardown()

View file

@ -1,403 +0,0 @@
"""
Unit-test suite for the S3 fuzzer
The fuzzer is a grammar-based random S3 operation generator
that produces random operation sequences in an effort to
crash the server. This unit-test suite does not test
S3 servers, but rather the fuzzer infrastructure.
It works by running the fuzzer off of a simple grammar,
and checking the producted requests to ensure that they
include the expected sorts of operations in the expected
proportions.
"""
import sys
import itertools
import nose
import random
import string
import yaml
from ..headers import *
from nose.tools import eq_ as eq
from nose.tools import assert_true
from nose.plugins.attrib import attr
from ...functional.utils import assert_raises
_decision_graph = {}
def check_access_denied(fn, *args, **kwargs):
e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
def build_graph():
graph = {}
graph['start'] = {
'set': {},
'choices': ['node2']
}
graph['leaf'] = {
'set': {
'key1': 'value1',
'key2': 'value2'
},
'headers': [
['1-2', 'random-header-{random 5-10 printable}', '{random 20-30 punctuation}']
],
'choices': []
}
graph['node1'] = {
'set': {
'key3': 'value3',
'header_val': [
'3 h1',
'2 h2',
'h3'
]
},
'headers': [
['1-1', 'my-header', '{header_val}'],
],
'choices': ['leaf']
}
graph['node2'] = {
'set': {
'randkey': 'value-{random 10-15 printable}',
'path': '/{bucket_readable}',
'indirect_key1': '{key1}'
},
'choices': ['leaf']
}
graph['bad_node'] = {
'set': {
'key1': 'value1'
},
'choices': ['leaf']
}
graph['nonexistant_child_node'] = {
'set': {},
'choices': ['leafy_greens']
}
graph['weighted_node'] = {
'set': {
'k1': [
'foo',
'2 bar',
'1 baz'
]
},
'choices': [
'foo',
'2 bar',
'1 baz'
]
}
graph['null_choice_node'] = {
'set': {},
'choices': [None]
}
graph['repeated_headers_node'] = {
'set': {},
'headers': [
['1-2', 'random-header-{random 5-10 printable}', '{random 20-30 punctuation}']
],
'choices': ['leaf']
}
graph['weighted_null_choice_node'] = {
'set': {},
'choices': ['3 null']
}
return graph
#def test_foo():
#graph_file = open('request_decision_graph.yml', 'r')
#graph = yaml.safe_load(graph_file)
#eq(graph['bucket_put_simple']['set']['grantee'], 0)
def test_load_graph():
graph_file = open('request_decision_graph.yml', 'r')
graph = yaml.safe_load(graph_file)
graph['start']
def test_descend_leaf_node():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'leaf', prng)
eq(decision['key1'], 'value1')
eq(decision['key2'], 'value2')
e = assert_raises(KeyError, lambda x: decision[x], 'key3')
def test_descend_node():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'node1', prng)
eq(decision['key1'], 'value1')
eq(decision['key2'], 'value2')
eq(decision['key3'], 'value3')
def test_descend_bad_node():
graph = build_graph()
prng = random.Random(1)
assert_raises(DecisionGraphError, descend_graph, graph, 'bad_node', prng)
def test_descend_nonexistant_child():
graph = build_graph()
prng = random.Random(1)
assert_raises(KeyError, descend_graph, graph, 'nonexistant_child_node', prng)
def test_expand_random_printable():
prng = random.Random(1)
got = expand({}, '{random 10-15 printable}', prng)
eq(got, '[/pNI$;92@')
def test_expand_random_binary():
prng = random.Random(1)
got = expand({}, '{random 10-15 binary}', prng)
eq(got, '\xdfj\xf1\xd80>a\xcd\xc4\xbb')
def test_expand_random_printable_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 500 printable_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace and x in string.printable for x in got]))
def test_expand_random_binary_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 500 binary_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace for x in got]))
def test_expand_random_no_args():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random}', prng)
assert_true(0 <= len(got) <= 1000)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
def test_expand_random_no_charset():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 10-30}', prng)
assert_true(10 <= len(got) <= 30)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
def test_expand_random_exact_length():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 10 digits}', prng)
assert_true(len(got) == 10)
assert_true(reduce(lambda x, y: x and y, [x in string.digits for x in got]))
def test_expand_random_bad_charset():
prng = random.Random(1)
assert_raises(KeyError, expand, {}, '{random 10-30 foo}', prng)
def test_expand_random_missing_length():
prng = random.Random(1)
assert_raises(ValueError, expand, {}, '{random printable}', prng)
def test_assemble_decision():
graph = build_graph()
prng = random.Random(1)
decision = assemble_decision(graph, prng)
eq(decision['key1'], 'value1')
eq(decision['key2'], 'value2')
eq(decision['randkey'], 'value-{random 10-15 printable}')
eq(decision['indirect_key1'], '{key1}')
eq(decision['path'], '/{bucket_readable}')
assert_raises(KeyError, lambda x: decision[x], 'key3')
def test_expand_escape():
prng = random.Random(1)
decision = dict(
foo='{{bar}}',
)
got = expand(decision, '{foo}', prng)
eq(got, '{bar}')
def test_expand_indirect():
prng = random.Random(1)
decision = dict(
foo='{bar}',
bar='quux',
)
got = expand(decision, '{foo}', prng)
eq(got, 'quux')
def test_expand_indirect_double():
prng = random.Random(1)
decision = dict(
foo='{bar}',
bar='{quux}',
quux='thud',
)
got = expand(decision, '{foo}', prng)
eq(got, 'thud')
def test_expand_recursive():
prng = random.Random(1)
decision = dict(
foo='{foo}',
)
e = assert_raises(RecursionError, expand, decision, '{foo}', prng)
eq(str(e), "Runaway recursion in string formatting: 'foo'")
def test_expand_recursive_mutual():
prng = random.Random(1)
decision = dict(
foo='{bar}',
bar='{foo}',
)
e = assert_raises(RecursionError, expand, decision, '{foo}', prng)
eq(str(e), "Runaway recursion in string formatting: 'foo'")
def test_expand_recursive_not_too_eager():
prng = random.Random(1)
decision = dict(
foo='bar',
)
got = expand(decision, 100*'{foo}', prng)
eq(got, 100*'bar')
def test_make_choice_unweighted_with_space():
prng = random.Random(1)
choice = make_choice(['foo bar'], prng)
eq(choice, 'foo bar')
def test_weighted_choices():
graph = build_graph()
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
choice = make_choice(graph['weighted_node']['choices'], prng)
if choices_made.has_key(choice):
choices_made[choice] += 1
else:
choices_made[choice] = 1
foo_percentage = choices_made['foo'] / 1000.0
bar_percentage = choices_made['bar'] / 1000.0
baz_percentage = choices_made['baz'] / 1000.0
nose.tools.assert_almost_equal(foo_percentage, 0.25, 1)
nose.tools.assert_almost_equal(bar_percentage, 0.50, 1)
nose.tools.assert_almost_equal(baz_percentage, 0.25, 1)
def test_null_choices():
graph = build_graph()
prng = random.Random(1)
choice = make_choice(graph['null_choice_node']['choices'], prng)
eq(choice, '')
def test_weighted_null_choices():
graph = build_graph()
prng = random.Random(1)
choice = make_choice(graph['weighted_null_choice_node']['choices'], prng)
eq(choice, '')
def test_null_child():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'null_choice_node', prng)
eq(decision, {})
def test_weighted_set():
graph = build_graph()
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
choice = make_choice(graph['weighted_node']['set']['k1'], prng)
if choices_made.has_key(choice):
choices_made[choice] += 1
else:
choices_made[choice] = 1
foo_percentage = choices_made['foo'] / 1000.0
bar_percentage = choices_made['bar'] / 1000.0
baz_percentage = choices_made['baz'] / 1000.0
nose.tools.assert_almost_equal(foo_percentage, 0.25, 1)
nose.tools.assert_almost_equal(bar_percentage, 0.50, 1)
nose.tools.assert_almost_equal(baz_percentage, 0.25, 1)
def test_header_presence():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'node1', prng)
c1 = itertools.count()
c2 = itertools.count()
for header, value in decision['headers']:
if header == 'my-header':
eq(value, '{header_val}')
assert_true(next(c1) < 1)
elif header == 'random-header-{random 5-10 printable}':
eq(value, '{random 20-30 punctuation}')
assert_true(next(c2) < 2)
else:
raise KeyError('unexpected header found: %s' % header)
assert_true(next(c1))
assert_true(next(c2))
def test_duplicate_header():
graph = build_graph()
prng = random.Random(1)
assert_raises(DecisionGraphError, descend_graph, graph, 'repeated_headers_node', prng)
def test_expand_headers():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'node1', prng)
expanded_headers = expand_headers(decision, prng)
for header, value in expanded_headers.iteritems():
if header == 'my-header':
assert_true(value in ['h1', 'h2', 'h3'])
elif header.startswith('random-header-'):
assert_true(20 <= len(value) <= 30)
assert_true(string.strip(value, RepeatExpandingFormatter.charsets['punctuation']) is '')
else:
raise DecisionGraphError('unexpected header found: "%s"' % header)

View file

@ -1,117 +0,0 @@
from boto.s3.key import Key
from optparse import OptionParser
from . import realistic
import traceback
import random
from . import common
import sys
def parse_opts():
parser = OptionParser()
parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
parser.add_option('-b', '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET')
parser.add_option('--seed', dest='seed', help='optional seed for the random number generator')
return parser.parse_args()
def get_random_files(quantity, mean, stddev, seed):
"""Create file-like objects with pseudorandom contents.
IN:
number of files to create
mean file size in bytes
standard deviation from mean file size
seed for PRNG
OUT:
list of file handles
"""
file_generator = realistic.files(mean, stddev, seed)
return [file_generator.next() for _ in xrange(quantity)]
def upload_objects(bucket, files, seed):
"""Upload a bunch of files to an S3 bucket
IN:
boto S3 bucket object
list of file handles to upload
seed for PRNG
OUT:
list of boto S3 key objects
"""
keys = []
name_generator = realistic.names(15, 4, seed=seed)
for fp in files:
print >> sys.stderr, 'sending file with size %dB' % fp.size
key = Key(bucket)
key.key = name_generator.next()
key.set_contents_from_file(fp, rewind=True)
key.set_acl('public-read')
keys.append(key)
return keys
def _main():
'''To run the static content load test, make sure you've bootstrapped your
test environment and set up your config.yaml file, then run the following:
S3TEST_CONF=config.yaml virtualenv/bin/s3tests-generate-objects.py --seed 1234
This creates a bucket with your S3 credentials (from config.yaml) and
fills it with garbage objects as described in the
file_generation.groups section of config.yaml. It writes a list of
URLS to those objects to the file listed in file_generation.url_file
in config.yaml.
Once you have objcts in your bucket, run the siege benchmarking program:
siege --rc ./siege.conf -r 5
This tells siege to read the ./siege.conf config file which tells it to
use the urls in ./urls.txt and log to ./siege.log. It hits each url in
urls.txt 5 times (-r flag).
Results are printed to the terminal and written in CSV format to
./siege.log
'''
(options, args) = parse_opts()
#SETUP
random.seed(options.seed if options.seed else None)
conn = common.s3.main
if options.outfile:
OUTFILE = open(options.outfile, 'w')
elif common.config.file_generation.url_file:
OUTFILE = open(common.config.file_generation.url_file, 'w')
else:
OUTFILE = sys.stdout
if options.bucket:
bucket = conn.create_bucket(options.bucket)
else:
bucket = common.get_new_bucket()
bucket.set_acl('public-read')
keys = []
print >> OUTFILE, 'bucket: %s' % bucket.name
print >> sys.stderr, 'setup complete, generating files'
for profile in common.config.file_generation.groups:
seed = random.random()
files = get_random_files(profile[0], profile[1], profile[2], seed)
keys += upload_objects(bucket, files, seed)
print >> sys.stderr, 'finished sending files. generating urls'
for key in keys:
print >> OUTFILE, key.generate_url(0, query_auth=False)
print >> sys.stderr, 'done'
def main():
common.setup()
try:
_main()
except Exception as e:
traceback.print_exc()
common.teardown()

View file

@ -1,265 +0,0 @@
import gevent
import gevent.pool
import gevent.queue
import gevent.monkey; gevent.monkey.patch_all()
import itertools
import optparse
import os
import sys
import time
import traceback
import random
import yaml
import realistic
import common
NANOSECOND = int(1e9)
def reader(bucket, worker_id, file_names, queue, rand):
while True:
objname = rand.choice(file_names)
key = bucket.new_key(objname)
fp = realistic.FileValidator()
result = dict(
type='r',
bucket=bucket.name,
key=key.name,
worker=worker_id,
)
start = time.time()
try:
key.get_contents_to_file(fp._file)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
if not fp.valid():
m='md5sum check failed start={s} ({se}) end={e} size={sz} obj={o}'.format(s=time.ctime(start), se=start, e=end, sz=fp._file.tell(), o=objname)
result.update(
error=dict(
msg=m,
traceback=traceback.format_exc(),
),
)
print "ERROR:", m
else:
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
)
queue.put(result)
def writer(bucket, worker_id, file_names, files, queue, rand):
while True:
fp = next(files)
fp.seek(0)
objname = rand.choice(file_names)
key = bucket.new_key(objname)
result = dict(
type='w',
bucket=bucket.name,
key=key.name,
worker=worker_id,
)
start = time.time()
try:
key.set_contents_from_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
)
queue.put(result)
def parse_options():
parser = optparse.OptionParser(
usage='%prog [OPTS] <CONFIG_YAML',
)
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
help="skip cleaning up all created buckets", default=True)
return parser.parse_args()
def write_file(bucket, file_name, fp):
"""
Write a single file to the bucket using the file_name.
This is used during the warmup to initialize the files.
"""
key = bucket.new_key(file_name)
key.set_contents_from_file(fp)
def main():
# parse options
(options, args) = parse_options()
if os.isatty(sys.stdin.fileno()):
raise RuntimeError('Need configuration in stdin.')
config = common.read_config(sys.stdin)
conn = common.connect(config.s3)
bucket = None
try:
# setup
real_stdout = sys.stdout
sys.stdout = sys.stderr
# verify all required config items are present
if 'readwrite' not in config:
raise RuntimeError('readwrite section not found in config')
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
if item not in config.readwrite:
raise RuntimeError("Missing readwrite config item: {item}".format(item=item))
for item in ['num', 'size', 'stddev']:
if item not in config.readwrite.files:
raise RuntimeError("Missing readwrite config item: files.{item}".format(item=item))
seeds = dict(config.readwrite.get('random_seed', {}))
seeds.setdefault('main', random.randrange(2**32))
rand = random.Random(seeds['main'])
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
# check flag for deterministic file name creation
if not config.readwrite.get('deterministic_file_names'):
print 'Creating random file names'
file_names = realistic.names(
mean=15,
stddev=4,
seed=seeds['names'],
)
file_names = itertools.islice(file_names, config.readwrite.files.num)
file_names = list(file_names)
else:
print 'Creating file names that are deterministic'
file_names = []
for x in xrange(config.readwrite.files.num):
file_names.append('test_file_{num}'.format(num=x))
files = realistic.files2(
mean=1024 * config.readwrite.files.size,
stddev=1024 * config.readwrite.files.stddev,
seed=seeds['contents'],
)
q = gevent.queue.Queue()
# warmup - get initial set of files uploaded if there are any writers specified
if config.readwrite.writers > 0:
print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
warmup_pool = gevent.pool.Pool(size=100)
for file_name in file_names:
fp = next(files)
warmup_pool.spawn(
write_file,
bucket=bucket,
file_name=file_name,
fp=fp,
)
warmup_pool.join()
# main work
print "Starting main worker loop."
print "Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev)
print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers)
group = gevent.pool.Group()
rand_writer = random.Random(seeds['writer'])
# Don't create random files if deterministic_files_names is set and true
if not config.readwrite.get('deterministic_file_names'):
for x in xrange(config.readwrite.writers):
this_rand = random.Random(rand_writer.randrange(2**32))
group.spawn(
writer,
bucket=bucket,
worker_id=x,
file_names=file_names,
files=files,
queue=q,
rand=this_rand,
)
# Since the loop generating readers already uses config.readwrite.readers
# and the file names are already generated (randomly or deterministically),
# this loop needs no additional qualifiers. If zero readers are specified,
# it will behave as expected (no data is read)
rand_reader = random.Random(seeds['reader'])
for x in xrange(config.readwrite.readers):
this_rand = random.Random(rand_reader.randrange(2**32))
group.spawn(
reader,
bucket=bucket,
worker_id=x,
file_names=file_names,
queue=q,
rand=this_rand,
)
def stop():
group.kill(block=True)
q.put(StopIteration)
gevent.spawn_later(config.readwrite.duration, stop)
# wait for all the tests to finish
group.join()
print 'post-join, queue size {size}'.format(size=q.qsize())
if q.qsize() > 0:
for temp_dict in q:
if 'error' in temp_dict:
raise Exception('exception:\n\t{msg}\n\t{trace}'.format(
msg=temp_dict['error']['msg'],
trace=temp_dict['error']['traceback'])
)
else:
yaml.safe_dump(temp_dict, stream=real_stdout)
finally:
# cleanup
if options.cleanup:
if bucket is not None:
common.nuke_bucket(bucket)

View file

@ -1,281 +0,0 @@
import hashlib
import random
import string
import struct
import time
import math
import tempfile
import shutil
import os
NANOSECOND = int(1e9)
def generate_file_contents(size):
"""
A helper function to generate binary contents for a given size, and
calculates the md5 hash of the contents appending itself at the end of the
blob.
It uses sha1's hexdigest which is 40 chars long. So any binary generated
should remove the last 40 chars from the blob to retrieve the original hash
and binary so that validity can be proved.
"""
size = int(size)
contents = os.urandom(size)
content_hash = hashlib.sha1(contents).hexdigest()
return contents + content_hash
class FileValidator(object):
def __init__(self, f=None):
self._file = tempfile.SpooledTemporaryFile()
self.original_hash = None
self.new_hash = None
if f:
f.seek(0)
shutil.copyfileobj(f, self._file)
def valid(self):
"""
Returns True if this file looks valid. The file is valid if the end
of the file has the md5 digest for the first part of the file.
"""
self._file.seek(0)
contents = self._file.read()
self.original_hash, binary = contents[-40:], contents[:-40]
self.new_hash = hashlib.sha1(binary).hexdigest()
if not self.new_hash == self.original_hash:
print 'original hash: ', self.original_hash
print 'new hash: ', self.new_hash
print 'size: ', self._file.tell()
return False
return True
# XXX not sure if we need all of these
def seek(self, offset, whence=os.SEEK_SET):
self._file.seek(offset, whence)
def tell(self):
return self._file.tell()
def read(self, size=-1):
return self._file.read(size)
def write(self, data):
self._file.write(data)
self._file.seek(0)
class RandomContentFile(object):
def __init__(self, size, seed):
self.size = size
self.seed = seed
self.random = random.Random(self.seed)
# Boto likes to seek once more after it's done reading, so we need to save the last chunks/seek value.
self.last_chunks = self.chunks = None
self.last_seek = None
# Let seek initialize the rest of it, rather than dup code
self.seek(0)
def _mark_chunk(self):
self.chunks.append([self.offset, int(round((time.time() - self.last_seek) * NANOSECOND))])
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.offset = offset
elif whence == os.SEEK_END:
self.offset = self.size + offset;
elif whence == os.SEEK_CUR:
self.offset += offset
assert self.offset == 0
self.random.seed(self.seed)
self.buffer = ''
self.hash = hashlib.md5()
self.digest_size = self.hash.digest_size
self.digest = None
# Save the last seek time as our start time, and the last chunks
self.last_chunks = self.chunks
# Before emptying.
self.last_seek = time.time()
self.chunks = []
def tell(self):
return self.offset
def _generate(self):
# generate and return a chunk of pseudorandom data
size = min(self.size, 1*1024*1024) # generate at most 1 MB at a time
chunks = int(math.ceil(size/8.0)) # number of 8-byte chunks to create
l = [self.random.getrandbits(64) for _ in xrange(chunks)]
s = struct.pack(chunks*'Q', *l)
return s
def read(self, size=-1):
if size < 0:
size = self.size - self.offset
r = []
random_count = min(size, self.size - self.offset - self.digest_size)
if random_count > 0:
while len(self.buffer) < random_count:
self.buffer += self._generate()
self.offset += random_count
size -= random_count
data, self.buffer = self.buffer[:random_count], self.buffer[random_count:]
if self.hash is not None:
self.hash.update(data)
r.append(data)
digest_count = min(size, self.size - self.offset)
if digest_count > 0:
if self.digest is None:
self.digest = self.hash.digest()
self.hash = None
self.offset += digest_count
size -= digest_count
data = self.digest[:digest_count]
r.append(data)
self._mark_chunk()
return ''.join(r)
class PrecomputedContentFile(object):
def __init__(self, f):
self._file = tempfile.SpooledTemporaryFile()
f.seek(0)
shutil.copyfileobj(f, self._file)
self.last_chunks = self.chunks = None
self.seek(0)
def seek(self, offset, whence=os.SEEK_SET):
self._file.seek(offset, whence)
if self.tell() == 0:
# only reset the chunks when seeking to the beginning
self.last_chunks = self.chunks
self.last_seek = time.time()
self.chunks = []
def tell(self):
return self._file.tell()
def read(self, size=-1):
data = self._file.read(size)
self._mark_chunk()
return data
def _mark_chunk(self):
elapsed = time.time() - self.last_seek
elapsed_nsec = int(round(elapsed * NANOSECOND))
self.chunks.append([self.tell(), elapsed_nsec])
class FileVerifier(object):
def __init__(self):
self.size = 0
self.hash = hashlib.md5()
self.buf = ''
self.created_at = time.time()
self.chunks = []
def _mark_chunk(self):
self.chunks.append([self.size, int(round((time.time() - self.created_at) * NANOSECOND))])
def write(self, data):
self.size += len(data)
self.buf += data
digsz = -1*self.hash.digest_size
new_data, self.buf = self.buf[0:digsz], self.buf[digsz:]
self.hash.update(new_data)
self._mark_chunk()
def valid(self):
"""
Returns True if this file looks valid. The file is valid if the end
of the file has the md5 digest for the first part of the file.
"""
if self.size < self.hash.digest_size:
return self.hash.digest().startswith(self.buf)
return self.buf == self.hash.digest()
def files(mean, stddev, seed=None):
"""
Yields file-like objects with effectively random contents, where
the size of each file follows the normal distribution with `mean`
and `stddev`.
Beware, the file-likeness is very shallow. You can use boto's
`key.set_contents_from_file` to send these to S3, but they are not
full file objects.
The last 128 bits are the MD5 digest of the previous bytes, for
verifying round-trip data integrity. For example, if you
re-download the object and place the contents into a file called
``foo``, the following should print two identical lines:
python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo
Except for objects shorter than 16 bytes, where the second line
will be proportionally shorter.
"""
rand = random.Random(seed)
while True:
while True:
size = int(rand.normalvariate(mean, stddev))
if size >= 0:
break
yield RandomContentFile(size=size, seed=rand.getrandbits(32))
def files2(mean, stddev, seed=None, numfiles=10):
"""
Yields file objects with effectively random contents, where the
size of each file follows the normal distribution with `mean` and
`stddev`.
Rather than continuously generating new files, this pre-computes and
stores `numfiles` files and yields them in a loop.
"""
# pre-compute all the files (and save with TemporaryFiles)
fs = []
for _ in xrange(numfiles):
t = tempfile.SpooledTemporaryFile()
t.write(generate_file_contents(random.normalvariate(mean, stddev)))
t.seek(0)
fs.append(t)
while True:
for f in fs:
yield f
def names(mean, stddev, charset=None, seed=None):
"""
Yields strings that are somewhat plausible as file names, where
the lenght of each filename follows the normal distribution with
`mean` and `stddev`.
"""
if charset is None:
charset = string.ascii_lowercase
rand = random.Random(seed)
while True:
while True:
length = int(rand.normalvariate(mean, stddev))
if length > 0:
break
name = ''.join(rand.choice(charset) for _ in xrange(length))
yield name

View file

@ -1,219 +0,0 @@
import gevent
import gevent.pool
import gevent.queue
import gevent.monkey; gevent.monkey.patch_all()
import itertools
import optparse
import os
import sys
import time
import traceback
import random
import yaml
import realistic
import common
NANOSECOND = int(1e9)
def writer(bucket, objname, fp, queue):
key = bucket.new_key(objname)
result = dict(
type='w',
bucket=bucket.name,
key=key.name,
)
start = time.time()
try:
key.set_contents_from_file(fp, rewind=True)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.last_chunks,
)
queue.put(result)
def reader(bucket, objname, queue):
key = bucket.new_key(objname)
fp = realistic.FileVerifier()
result = dict(
type='r',
bucket=bucket.name,
key=key.name,
)
start = time.time()
try:
key.get_contents_to_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
if not fp.valid():
result.update(
error=dict(
msg='md5sum check failed',
),
)
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.chunks,
)
queue.put(result)
def parse_options():
parser = optparse.OptionParser(
usage='%prog [OPTS] <CONFIG_YAML',
)
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
help="skip cleaning up all created buckets", default=True)
return parser.parse_args()
def main():
# parse options
(options, args) = parse_options()
if os.isatty(sys.stdin.fileno()):
raise RuntimeError('Need configuration in stdin.')
config = common.read_config(sys.stdin)
conn = common.connect(config.s3)
bucket = None
try:
# setup
real_stdout = sys.stdout
sys.stdout = sys.stderr
# verify all required config items are present
if 'roundtrip' not in config:
raise RuntimeError('roundtrip section not found in config')
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
if item not in config.roundtrip:
raise RuntimeError("Missing roundtrip config item: {item}".format(item=item))
for item in ['num', 'size', 'stddev']:
if item not in config.roundtrip.files:
raise RuntimeError("Missing roundtrip config item: files.{item}".format(item=item))
seeds = dict(config.roundtrip.get('random_seed', {}))
seeds.setdefault('main', random.randrange(2**32))
rand = random.Random(seeds['main'])
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.roundtrip.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
objnames = realistic.names(
mean=15,
stddev=4,
seed=seeds['names'],
)
objnames = itertools.islice(objnames, config.roundtrip.files.num)
objnames = list(objnames)
files = realistic.files(
mean=1024 * config.roundtrip.files.size,
stddev=1024 * config.roundtrip.files.stddev,
seed=seeds['contents'],
)
q = gevent.queue.Queue()
logger_g = gevent.spawn(yaml.safe_dump_all, q, stream=real_stdout)
print "Writing {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.writers,
)
pool = gevent.pool.Pool(size=config.roundtrip.writers)
start = time.time()
for objname in objnames:
fp = next(files)
pool.spawn(
writer,
bucket=bucket,
objname=objname,
fp=fp,
queue=q,
)
pool.join()
stop = time.time()
elapsed = stop - start
q.put(dict(
type='write_done',
duration=int(round(elapsed * NANOSECOND)),
))
print "Reading {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.readers,
)
# avoid accessing them in the same order as the writing
rand.shuffle(objnames)
pool = gevent.pool.Pool(size=config.roundtrip.readers)
start = time.time()
for objname in objnames:
pool.spawn(
reader,
bucket=bucket,
objname=objname,
queue=q,
)
pool.join()
stop = time.time()
elapsed = stop - start
q.put(dict(
type='read_done',
duration=int(round(elapsed * NANOSECOND)),
))
q.put(StopIteration)
logger_g.get()
finally:
# cleanup
if options.cleanup:
if bucket is not None:
common.nuke_bucket(bucket)

View file

@ -1,79 +0,0 @@
from s3tests import realistic
import shutil
import tempfile
# XXX not used for now
def create_files(mean=2000):
return realistic.files2(
mean=1024 * mean,
stddev=1024 * 500,
seed=1256193726,
numfiles=4,
)
class TestFiles(object):
# the size and seed is what we can get when generating a bunch of files
# with pseudo random numbers based on sttdev, seed, and mean.
# this fails, demonstrating the (current) problem
#def test_random_file_invalid(self):
# size = 2506764
# seed = 3391518755
# source = realistic.RandomContentFile(size=size, seed=seed)
# t = tempfile.SpooledTemporaryFile()
# shutil.copyfileobj(source, t)
# precomputed = realistic.PrecomputedContentFile(t)
# assert precomputed.valid()
# verifier = realistic.FileVerifier()
# shutil.copyfileobj(precomputed, verifier)
# assert verifier.valid()
# this passes
def test_random_file_valid(self):
size = 2506001
seed = 3391518755
source = realistic.RandomContentFile(size=size, seed=seed)
t = tempfile.SpooledTemporaryFile()
shutil.copyfileobj(source, t)
precomputed = realistic.PrecomputedContentFile(t)
verifier = realistic.FileVerifier()
shutil.copyfileobj(precomputed, verifier)
assert verifier.valid()
# new implementation
class TestFileValidator(object):
def test_new_file_is_valid(self):
size = 2506001
contents = realistic.generate_file_contents(size)
t = tempfile.SpooledTemporaryFile()
t.write(contents)
t.seek(0)
fp = realistic.FileValidator(t)
assert fp.valid()
def test_new_file_is_valid_when_size_is_1(self):
size = 1
contents = realistic.generate_file_contents(size)
t = tempfile.SpooledTemporaryFile()
t.write(contents)
t.seek(0)
fp = realistic.FileValidator(t)
assert fp.valid()
def test_new_file_is_valid_on_several_calls(self):
size = 2506001
contents = realistic.generate_file_contents(size)
t = tempfile.SpooledTemporaryFile()
t.write(contents)
t.seek(0)
fp = realistic.FileValidator(t)
assert fp.valid()
assert fp.valid()

View file

@ -1,142 +0,0 @@
#!/usr/bin/python
import sys
import os
import yaml
import optparse
NANOSECONDS = int(1e9)
# Output stats in a format similar to siege
# see http://www.joedog.org/index/siege-home
OUTPUT_FORMAT = """Stats for type: [{type}]
Transactions: {trans:>11} hits
Availability: {avail:>11.2f} %
Elapsed time: {elapsed:>11.2f} secs
Data transferred: {data:>11.2f} MB
Response time: {resp_time:>11.2f} secs
Transaction rate: {trans_rate:>11.2f} trans/sec
Throughput: {data_rate:>11.2f} MB/sec
Concurrency: {conc:>11.2f}
Successful transactions: {trans_success:>11}
Failed transactions: {trans_fail:>11}
Longest transaction: {trans_long:>11.2f}
Shortest transaction: {trans_short:>11.2f}
"""
def parse_options():
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"-f", "--file", dest="input", metavar="FILE",
help="Name of input YAML file. Default uses sys.stdin")
parser.add_option(
"-v", "--verbose", dest="verbose", action="store_true",
help="Enable verbose output")
(options, args) = parser.parse_args()
if not options.input and os.isatty(sys.stdin.fileno()):
parser.error("option -f required if no data is provided "
"in stdin")
return (options, args)
def main():
(options, args) = parse_options()
total = {}
durations = {}
min_time = {}
max_time = {}
errors = {}
success = {}
calculate_stats(options, total, durations, min_time, max_time, errors,
success)
print_results(total, durations, min_time, max_time, errors, success)
def calculate_stats(options, total, durations, min_time, max_time, errors,
success):
print 'Calculating statistics...'
f = sys.stdin
if options.input:
f = file(options.input, 'r')
for item in yaml.safe_load_all(f):
type_ = item.get('type')
if type_ not in ('r', 'w'):
continue # ignore any invalid items
if 'error' in item:
errors[type_] = errors.get(type_, 0) + 1
continue # skip rest of analysis for this item
else:
success[type_] = success.get(type_, 0) + 1
# parse the item
data_size = item['chunks'][-1][0]
duration = item['duration']
start = item['start']
end = start + duration / float(NANOSECONDS)
if options.verbose:
print "[{type}] POSIX time: {start:>18.2f} - {end:<18.2f} " \
"{data:>11.2f} KB".format(
type=type_,
start=start,
end=end,
data=data_size / 1024.0, # convert to KB
)
# update time boundaries
prev = min_time.setdefault(type_, start)
if start < prev:
min_time[type_] = start
prev = max_time.setdefault(type_, end)
if end > prev:
max_time[type_] = end
# save the duration
if type_ not in durations:
durations[type_] = []
durations[type_].append(duration)
# add to running totals
total[type_] = total.get(type_, 0) + data_size
def print_results(total, durations, min_time, max_time, errors, success):
for type_ in total.keys():
trans_success = success.get(type_, 0)
trans_fail = errors.get(type_, 0)
trans = trans_success + trans_fail
avail = trans_success * 100.0 / trans
elapsed = max_time[type_] - min_time[type_]
data = total[type_] / 1024.0 / 1024.0 # convert to MB
resp_time = sum(durations[type_]) / float(NANOSECONDS) / \
len(durations[type_])
trans_rate = trans / elapsed
data_rate = data / elapsed
conc = trans_rate * resp_time
trans_long = max(durations[type_]) / float(NANOSECONDS)
trans_short = min(durations[type_]) / float(NANOSECONDS)
print OUTPUT_FORMAT.format(
type=type_,
trans_success=trans_success,
trans_fail=trans_fail,
trans=trans,
avail=avail,
elapsed=elapsed,
data=data,
resp_time=resp_time,
trans_rate=trans_rate,
data_rate=data_rate,
conc=conc,
trans_long=trans_long,
trans_short=trans_short,
)
if __name__ == '__main__':
main()

View file

@ -1,5 +1,5 @@
import boto.s3.connection
import bunch
import munch
import itertools
import os
import random
@ -11,8 +11,8 @@ from lxml import etree
from doctest import Example
from lxml.doctestcompare import LXMLOutputChecker
s3 = bunch.Bunch()
config = bunch.Bunch()
s3 = munch.Munch()
config = munch.Munch()
prefix = ''
bucket_counter = itertools.count(1)
@ -51,10 +51,10 @@ def nuke_bucket(bucket):
while deleted_cnt:
deleted_cnt = 0
for key in bucket.list():
print 'Cleaning bucket {bucket} key {key}'.format(
print('Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
))
key.set_canned_acl('private')
key.delete()
deleted_cnt += 1
@ -67,26 +67,26 @@ def nuke_bucket(bucket):
and e.body == ''):
e.error_code = 'AccessDenied'
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
print('GOT UNWANTED ERROR', e.error_code)
raise
# seems like we're not the owner of the bucket; ignore
pass
def nuke_prefixed_buckets():
for name, conn in s3.items():
print 'Cleaning buckets from connection {name}'.format(name=name)
for name, conn in list(s3.items()):
print('Cleaning buckets from connection {name}'.format(name=name))
for bucket in conn.get_all_buckets():
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
print('Cleaning bucket {bucket}'.format(bucket=bucket))
nuke_bucket(bucket)
print 'Done with cleanup of test buckets.'
print('Done with cleanup of test buckets.')
def read_config(fp):
config = bunch.Bunch()
config = munch.Munch()
g = yaml.safe_load_all(fp)
for new in g:
config.update(bunch.bunchify(new))
config.update(munch.Munchify(new))
return config
def connect(conf):
@ -97,7 +97,7 @@ def connect(conf):
access_key='aws_access_key_id',
secret_key='aws_secret_access_key',
)
kwargs = dict((mapping[k],v) for (k,v) in conf.iteritems() if k in mapping)
kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
#process calling_format argument
calling_formats = dict(
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
@ -105,7 +105,7 @@ def connect(conf):
vhost=boto.s3.connection.VHostCallingFormat(),
)
kwargs['calling_format'] = calling_formats['ordinary']
if conf.has_key('calling_format'):
if 'calling_format' in conf:
raw_calling_format = conf['calling_format']
try:
kwargs['calling_format'] = calling_formats[raw_calling_format]
@ -146,7 +146,7 @@ def setup():
raise RuntimeError("Empty Prefix! Aborting!")
defaults = config.s3.defaults
for section in config.s3.keys():
for section in list(config.s3.keys()):
if section == 'defaults':
continue

View file

@ -3,14 +3,14 @@ from botocore import UNSIGNED
from botocore.client import Config
from botocore.exceptions import ClientError
from botocore.handlers import disable_signing
import ConfigParser
import configparser
import os
import bunch
import munch
import random
import string
import itertools
config = bunch.Bunch
config = munch.Munch
# this will be assigned by setup()
prefix = None
@ -125,17 +125,17 @@ def nuke_prefixed_buckets(prefix, client=None):
for obj in delete_markers:
response = client.delete_object(Bucket=bucket_name,Key=obj[0],VersionId=obj[1])
try:
client.delete_bucket(Bucket=bucket_name)
except ClientError, e:
response = client.delete_bucket(Bucket=bucket_name)
except ClientError:
# if DELETE times out, the retry may see NoSuchBucket
if e.response['Error']['Code'] != 'NoSuchBucket':
raise e
if response['Error']['Code'] != 'NoSuchBucket':
raise ClientError
pass
print('Done with cleanup of buckets in tests.')
def setup():
cfg = ConfigParser.RawConfigParser()
cfg = configparser.RawConfigParser()
try:
path = os.environ['S3TEST_CONF']
except KeyError:
@ -143,8 +143,7 @@ def setup():
'To run tests, point environment '
+ 'variable S3TEST_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
cfg.read(path)
if not cfg.defaults():
raise RuntimeError('Your config file is missing the DEFAULT section!')
@ -175,16 +174,17 @@ def setup():
config.main_email = cfg.get('s3 main',"email")
try:
config.main_kms_keyid = cfg.get('s3 main',"kms_keyid")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
config.main_kms_keyid = 'testkey-1'
try:
config.main_kms_keyid2 = cfg.get('s3 main',"kms_keyid2")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
config.main_kms_keyid2 = 'testkey-2'
try:
config.main_api_name = cfg.get('s3 main',"api_name")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
config.main_api_name = ""
pass
@ -203,7 +203,7 @@ def setup():
# vars from the fixtures section
try:
template = cfg.get('fixtures', "bucket prefix")
except (ConfigParser.NoOptionError):
except (configparser.NoOptionError):
template = 'test-{random}-'
prefix = choose_bucket_prefix(template=template)

View file

@ -289,7 +289,7 @@ def test_object_create_bad_contentlength_mismatch_above():
key_name = 'foo'
headers = {'Content-Length': str(length)}
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
client.meta.events.register('before-sign.s3.PutObject', add_headers)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body=content)
status, error_code = _get_status_and_error_code(e.response)

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
from nose.tools import eq_ as eq
import utils
from . import utils
def test_generate():
FIVE_MB = 5 * 1024 * 1024

View file

@ -28,11 +28,11 @@ def generate_random(size, part_size=5*1024*1024):
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
for y in range(this_part_size // chunk):
s = s + strpart
s = s + strpart[:(this_part_size % chunk)]
yield s

View file

@ -1,376 +0,0 @@
from boto.s3.connection import S3Connection
from boto.exception import BotoServerError
from boto.s3.key import Key
from httplib import BadStatusLine
from optparse import OptionParser
from .. import common
import traceback
import itertools
import random
import string
import struct
import yaml
import sys
import re
class DecisionGraphError(Exception):
""" Raised when a node in a graph tries to set a header or
key that was previously set by another node
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RecursionError(Exception):
"""Runaway recursion in string formatting"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return '{0.__doc__}: {0.msg!r}'.format(self)
def assemble_decision(decision_graph, prng):
""" Take in a graph describing the possible decision space and a random
number generator and traverse the graph to build a decision
"""
return descend_graph(decision_graph, 'start', prng)
def descend_graph(decision_graph, node_name, prng):
""" Given a graph and a particular node in that graph, set the values in
the node's "set" list, pick a choice from the "choice" list, and
recurse. Finally, return dictionary of values
"""
node = decision_graph[node_name]
try:
choice = make_choice(node['choices'], prng)
if choice == '':
decision = {}
else:
decision = descend_graph(decision_graph, choice, prng)
except IndexError:
decision = {}
for key, choices in node['set'].iteritems():
if key in decision:
raise DecisionGraphError("Node %s tried to set '%s', but that key was already set by a lower node!" %(node_name, key))
decision[key] = make_choice(choices, prng)
if 'headers' in node:
decision.setdefault('headers', [])
for desc in node['headers']:
try:
(repetition_range, header, value) = desc
except ValueError:
(header, value) = desc
repetition_range = '1'
try:
size_min, size_max = repetition_range.split('-', 1)
except ValueError:
size_min = size_max = repetition_range
size_min = int(size_min)
size_max = int(size_max)
num_reps = prng.randint(size_min, size_max)
if header in [h for h, v in decision['headers']]:
raise DecisionGraphError("Node %s tried to add header '%s', but that header already exists!" %(node_name, header))
for _ in xrange(num_reps):
decision['headers'].append([header, value])
return decision
def make_choice(choices, prng):
""" Given a list of (possibly weighted) options or just a single option!,
choose one of the options taking weights into account and return the
choice
"""
if isinstance(choices, str):
return choices
weighted_choices = []
for option in choices:
if option is None:
weighted_choices.append('')
continue
try:
(weight, value) = option.split(None, 1)
weight = int(weight)
except ValueError:
weight = 1
value = option
if value == 'null' or value == 'None':
value = ''
for _ in xrange(weight):
weighted_choices.append(value)
return prng.choice(weighted_choices)
def expand_headers(decision, prng):
expanded_headers = {}
for header in decision['headers']:
h = expand(decision, header[0], prng)
v = expand(decision, header[1], prng)
expanded_headers[h] = v
return expanded_headers
def expand(decision, value, prng):
c = itertools.count()
fmt = RepeatExpandingFormatter(prng)
new = fmt.vformat(value, [], decision)
return new
class RepeatExpandingFormatter(string.Formatter):
charsets = {
'printable_no_whitespace': string.printable.translate(None, string.whitespace),
'printable': string.printable,
'punctuation': string.punctuation,
'whitespace': string.whitespace,
'digits': string.digits
}
def __init__(self, prng, _recursion=0):
super(RepeatExpandingFormatter, self).__init__()
# this class assumes it is always instantiated once per
# formatting; use that to detect runaway recursion
self.prng = prng
self._recursion = _recursion
def get_value(self, key, args, kwargs):
fields = key.split(None, 1)
fn = getattr(self, 'special_{name}'.format(name=fields[0]), None)
if fn is not None:
if len(fields) == 1:
fields.append('')
return fn(fields[1])
val = super(RepeatExpandingFormatter, self).get_value(key, args, kwargs)
if self._recursion > 5:
raise RecursionError(key)
fmt = self.__class__(self.prng, _recursion=self._recursion+1)
n = fmt.vformat(val, args, kwargs)
return n
def special_random(self, args):
arg_list = args.split()
try:
size_min, size_max = arg_list[0].split('-', 1)
except ValueError:
size_min = size_max = arg_list[0]
except IndexError:
size_min = '0'
size_max = '1000'
size_min = int(size_min)
size_max = int(size_max)
length = self.prng.randint(size_min, size_max)
try:
charset_arg = arg_list[1]
except IndexError:
charset_arg = 'printable'
if charset_arg == 'binary' or charset_arg == 'binary_no_whitespace':
num_bytes = length + 8
tmplist = [self.prng.getrandbits(64) for _ in xrange(num_bytes / 8)]
tmpstring = struct.pack((num_bytes / 8) * 'Q', *tmplist)
if charset_arg == 'binary_no_whitespace':
tmpstring = ''.join(c for c in tmpstring if c not in string.whitespace)
return tmpstring[0:length]
else:
charset = self.charsets[charset_arg]
return ''.join([self.prng.choice(charset) for _ in xrange(length)]) # Won't scale nicely
def parse_options():
parser = OptionParser()
parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
parser.add_option('--seed', dest='seed', type='int', help='initial seed for the random number generator')
parser.add_option('--seed-file', dest='seedfile', help='read seeds for specific requests from FILE', metavar='FILE')
parser.add_option('-n', dest='num_requests', type='int', help='issue NUM requests before stopping', metavar='NUM')
parser.add_option('-v', '--verbose', dest='verbose', action="store_true", help='turn on verbose output')
parser.add_option('-d', '--debug', dest='debug', action="store_true", help='turn on debugging (very verbose) output')
parser.add_option('--decision-graph', dest='graph_filename', help='file in which to find the request decision graph')
parser.add_option('--no-cleanup', dest='cleanup', action="store_false", help='turn off teardown so you can peruse the state of buckets after testing')
parser.set_defaults(num_requests=5)
parser.set_defaults(cleanup=True)
parser.set_defaults(graph_filename='request_decision_graph.yml')
return parser.parse_args()
def randomlist(seed=None):
""" Returns an infinite generator of random numbers
"""
rng = random.Random(seed)
while True:
yield rng.randint(0,100000) #100,000 seeds is enough, right?
def populate_buckets(conn, alt):
""" Creates buckets and keys for fuzz testing and sets appropriate
permissions. Returns a dictionary of the bucket and key names.
"""
breadable = common.get_new_bucket(alt)
bwritable = common.get_new_bucket(alt)
bnonreadable = common.get_new_bucket(alt)
oreadable = Key(breadable)
owritable = Key(bwritable)
ononreadable = Key(breadable)
oreadable.set_contents_from_string('oreadable body')
owritable.set_contents_from_string('owritable body')
ononreadable.set_contents_from_string('ononreadable body')
breadable.set_acl('public-read')
bwritable.set_acl('public-read-write')
bnonreadable.set_acl('private')
oreadable.set_acl('public-read')
owritable.set_acl('public-read-write')
ononreadable.set_acl('private')
return dict(
bucket_readable=breadable.name,
bucket_writable=bwritable.name,
bucket_not_readable=bnonreadable.name,
bucket_not_writable=breadable.name,
object_readable=oreadable.key,
object_writable=owritable.key,
object_not_readable=ononreadable.key,
object_not_writable=oreadable.key,
)
def _main():
""" The main script
"""
(options, args) = parse_options()
random.seed(options.seed if options.seed else None)
s3_connection = common.s3.main
alt_connection = common.s3.alt
if options.outfile:
OUT = open(options.outfile, 'w')
else:
OUT = sys.stderr
VERBOSE = DEBUG = open('/dev/null', 'w')
if options.verbose:
VERBOSE = OUT
if options.debug:
DEBUG = OUT
VERBOSE = OUT
request_seeds = None
if options.seedfile:
FH = open(options.seedfile, 'r')
request_seeds = [int(line) for line in FH if line != '\n']
print>>OUT, 'Seedfile: %s' %options.seedfile
print>>OUT, 'Number of requests: %d' %len(request_seeds)
else:
if options.seed:
print>>OUT, 'Initial Seed: %d' %options.seed
print>>OUT, 'Number of requests: %d' %options.num_requests
random_list = randomlist(options.seed)
request_seeds = itertools.islice(random_list, options.num_requests)
print>>OUT, 'Decision Graph: %s' %options.graph_filename
graph_file = open(options.graph_filename, 'r')
decision_graph = yaml.safe_load(graph_file)
constants = populate_buckets(s3_connection, alt_connection)
print>>VERBOSE, "Test Buckets/Objects:"
for key, value in constants.iteritems():
print>>VERBOSE, "\t%s: %s" %(key, value)
print>>OUT, "Begin Fuzzing..."
print>>VERBOSE, '='*80
for request_seed in request_seeds:
print>>VERBOSE, 'Seed is: %r' %request_seed
prng = random.Random(request_seed)
decision = assemble_decision(decision_graph, prng)
decision.update(constants)
method = expand(decision, decision['method'], prng)
path = expand(decision, decision['urlpath'], prng)
try:
body = expand(decision, decision['body'], prng)
except KeyError:
body = ''
try:
headers = expand_headers(decision, prng)
except KeyError:
headers = {}
print>>VERBOSE, "%r %r" %(method[:100], path[:100])
for h, v in headers.iteritems():
print>>VERBOSE, "%r: %r" %(h[:50], v[:50])
print>>VERBOSE, "%r\n" % body[:100]
print>>DEBUG, 'FULL REQUEST'
print>>DEBUG, 'Method: %r' %method
print>>DEBUG, 'Path: %r' %path
print>>DEBUG, 'Headers:'
for h, v in headers.iteritems():
print>>DEBUG, "\t%r: %r" %(h, v)
print>>DEBUG, 'Body: %r\n' %body
failed = False # Let's be optimistic, shall we?
try:
response = s3_connection.make_request(method, path, data=body, headers=headers, override_num_retries=1)
body = response.read()
except BotoServerError, e:
response = e
body = e.body
failed = True
except BadStatusLine, e:
print>>OUT, 'FAILED: failed to parse response (BadStatusLine); probably a NUL byte in your request?'
print>>VERBOSE, '='*80
continue
if failed:
print>>OUT, 'FAILED:'
OLD_VERBOSE = VERBOSE
OLD_DEBUG = DEBUG
VERBOSE = DEBUG = OUT
print>>VERBOSE, 'Seed was: %r' %request_seed
print>>VERBOSE, 'Response status code: %d %s' %(response.status, response.reason)
print>>DEBUG, 'Body:\n%s' %body
print>>VERBOSE, '='*80
if failed:
VERBOSE = OLD_VERBOSE
DEBUG = OLD_DEBUG
print>>OUT, '...done fuzzing'
if options.cleanup:
common.teardown()
def main():
common.setup()
try:
_main()
except Exception as e:
traceback.print_exc()
common.teardown()

View file

@ -1,403 +0,0 @@
"""
Unit-test suite for the S3 fuzzer
The fuzzer is a grammar-based random S3 operation generator
that produces random operation sequences in an effort to
crash the server. This unit-test suite does not test
S3 servers, but rather the fuzzer infrastructure.
It works by running the fuzzer off of a simple grammar,
and checking the producted requests to ensure that they
include the expected sorts of operations in the expected
proportions.
"""
import sys
import itertools
import nose
import random
import string
import yaml
from ..headers import *
from nose.tools import eq_ as eq
from nose.tools import assert_true
from nose.plugins.attrib import attr
from ...functional.utils import assert_raises
_decision_graph = {}
def check_access_denied(fn, *args, **kwargs):
e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
def build_graph():
graph = {}
graph['start'] = {
'set': {},
'choices': ['node2']
}
graph['leaf'] = {
'set': {
'key1': 'value1',
'key2': 'value2'
},
'headers': [
['1-2', 'random-header-{random 5-10 printable}', '{random 20-30 punctuation}']
],
'choices': []
}
graph['node1'] = {
'set': {
'key3': 'value3',
'header_val': [
'3 h1',
'2 h2',
'h3'
]
},
'headers': [
['1-1', 'my-header', '{header_val}'],
],
'choices': ['leaf']
}
graph['node2'] = {
'set': {
'randkey': 'value-{random 10-15 printable}',
'path': '/{bucket_readable}',
'indirect_key1': '{key1}'
},
'choices': ['leaf']
}
graph['bad_node'] = {
'set': {
'key1': 'value1'
},
'choices': ['leaf']
}
graph['nonexistant_child_node'] = {
'set': {},
'choices': ['leafy_greens']
}
graph['weighted_node'] = {
'set': {
'k1': [
'foo',
'2 bar',
'1 baz'
]
},
'choices': [
'foo',
'2 bar',
'1 baz'
]
}
graph['null_choice_node'] = {
'set': {},
'choices': [None]
}
graph['repeated_headers_node'] = {
'set': {},
'headers': [
['1-2', 'random-header-{random 5-10 printable}', '{random 20-30 punctuation}']
],
'choices': ['leaf']
}
graph['weighted_null_choice_node'] = {
'set': {},
'choices': ['3 null']
}
return graph
#def test_foo():
#graph_file = open('request_decision_graph.yml', 'r')
#graph = yaml.safe_load(graph_file)
#eq(graph['bucket_put_simple']['set']['grantee'], 0)
def test_load_graph():
graph_file = open('request_decision_graph.yml', 'r')
graph = yaml.safe_load(graph_file)
graph['start']
def test_descend_leaf_node():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'leaf', prng)
eq(decision['key1'], 'value1')
eq(decision['key2'], 'value2')
e = assert_raises(KeyError, lambda x: decision[x], 'key3')
def test_descend_node():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'node1', prng)
eq(decision['key1'], 'value1')
eq(decision['key2'], 'value2')
eq(decision['key3'], 'value3')
def test_descend_bad_node():
graph = build_graph()
prng = random.Random(1)
assert_raises(DecisionGraphError, descend_graph, graph, 'bad_node', prng)
def test_descend_nonexistant_child():
graph = build_graph()
prng = random.Random(1)
assert_raises(KeyError, descend_graph, graph, 'nonexistant_child_node', prng)
def test_expand_random_printable():
prng = random.Random(1)
got = expand({}, '{random 10-15 printable}', prng)
eq(got, '[/pNI$;92@')
def test_expand_random_binary():
prng = random.Random(1)
got = expand({}, '{random 10-15 binary}', prng)
eq(got, '\xdfj\xf1\xd80>a\xcd\xc4\xbb')
def test_expand_random_printable_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 500 printable_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace and x in string.printable for x in got]))
def test_expand_random_binary_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 500 binary_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace for x in got]))
def test_expand_random_no_args():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random}', prng)
assert_true(0 <= len(got) <= 1000)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
def test_expand_random_no_charset():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 10-30}', prng)
assert_true(10 <= len(got) <= 30)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
def test_expand_random_exact_length():
prng = random.Random(1)
for _ in xrange(1000):
got = expand({}, '{random 10 digits}', prng)
assert_true(len(got) == 10)
assert_true(reduce(lambda x, y: x and y, [x in string.digits for x in got]))
def test_expand_random_bad_charset():
prng = random.Random(1)
assert_raises(KeyError, expand, {}, '{random 10-30 foo}', prng)
def test_expand_random_missing_length():
prng = random.Random(1)
assert_raises(ValueError, expand, {}, '{random printable}', prng)
def test_assemble_decision():
graph = build_graph()
prng = random.Random(1)
decision = assemble_decision(graph, prng)
eq(decision['key1'], 'value1')
eq(decision['key2'], 'value2')
eq(decision['randkey'], 'value-{random 10-15 printable}')
eq(decision['indirect_key1'], '{key1}')
eq(decision['path'], '/{bucket_readable}')
assert_raises(KeyError, lambda x: decision[x], 'key3')
def test_expand_escape():
prng = random.Random(1)
decision = dict(
foo='{{bar}}',
)
got = expand(decision, '{foo}', prng)
eq(got, '{bar}')
def test_expand_indirect():
prng = random.Random(1)
decision = dict(
foo='{bar}',
bar='quux',
)
got = expand(decision, '{foo}', prng)
eq(got, 'quux')
def test_expand_indirect_double():
prng = random.Random(1)
decision = dict(
foo='{bar}',
bar='{quux}',
quux='thud',
)
got = expand(decision, '{foo}', prng)
eq(got, 'thud')
def test_expand_recursive():
prng = random.Random(1)
decision = dict(
foo='{foo}',
)
e = assert_raises(RecursionError, expand, decision, '{foo}', prng)
eq(str(e), "Runaway recursion in string formatting: 'foo'")
def test_expand_recursive_mutual():
prng = random.Random(1)
decision = dict(
foo='{bar}',
bar='{foo}',
)
e = assert_raises(RecursionError, expand, decision, '{foo}', prng)
eq(str(e), "Runaway recursion in string formatting: 'foo'")
def test_expand_recursive_not_too_eager():
prng = random.Random(1)
decision = dict(
foo='bar',
)
got = expand(decision, 100*'{foo}', prng)
eq(got, 100*'bar')
def test_make_choice_unweighted_with_space():
prng = random.Random(1)
choice = make_choice(['foo bar'], prng)
eq(choice, 'foo bar')
def test_weighted_choices():
graph = build_graph()
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
choice = make_choice(graph['weighted_node']['choices'], prng)
if choices_made.has_key(choice):
choices_made[choice] += 1
else:
choices_made[choice] = 1
foo_percentage = choices_made['foo'] / 1000.0
bar_percentage = choices_made['bar'] / 1000.0
baz_percentage = choices_made['baz'] / 1000.0
nose.tools.assert_almost_equal(foo_percentage, 0.25, 1)
nose.tools.assert_almost_equal(bar_percentage, 0.50, 1)
nose.tools.assert_almost_equal(baz_percentage, 0.25, 1)
def test_null_choices():
graph = build_graph()
prng = random.Random(1)
choice = make_choice(graph['null_choice_node']['choices'], prng)
eq(choice, '')
def test_weighted_null_choices():
graph = build_graph()
prng = random.Random(1)
choice = make_choice(graph['weighted_null_choice_node']['choices'], prng)
eq(choice, '')
def test_null_child():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'null_choice_node', prng)
eq(decision, {})
def test_weighted_set():
graph = build_graph()
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
choice = make_choice(graph['weighted_node']['set']['k1'], prng)
if choices_made.has_key(choice):
choices_made[choice] += 1
else:
choices_made[choice] = 1
foo_percentage = choices_made['foo'] / 1000.0
bar_percentage = choices_made['bar'] / 1000.0
baz_percentage = choices_made['baz'] / 1000.0
nose.tools.assert_almost_equal(foo_percentage, 0.25, 1)
nose.tools.assert_almost_equal(bar_percentage, 0.50, 1)
nose.tools.assert_almost_equal(baz_percentage, 0.25, 1)
def test_header_presence():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'node1', prng)
c1 = itertools.count()
c2 = itertools.count()
for header, value in decision['headers']:
if header == 'my-header':
eq(value, '{header_val}')
assert_true(next(c1) < 1)
elif header == 'random-header-{random 5-10 printable}':
eq(value, '{random 20-30 punctuation}')
assert_true(next(c2) < 2)
else:
raise KeyError('unexpected header found: %s' % header)
assert_true(next(c1))
assert_true(next(c2))
def test_duplicate_header():
graph = build_graph()
prng = random.Random(1)
assert_raises(DecisionGraphError, descend_graph, graph, 'repeated_headers_node', prng)
def test_expand_headers():
graph = build_graph()
prng = random.Random(1)
decision = descend_graph(graph, 'node1', prng)
expanded_headers = expand_headers(decision, prng)
for header, value in expanded_headers.iteritems():
if header == 'my-header':
assert_true(value in ['h1', 'h2', 'h3'])
elif header.startswith('random-header-'):
assert_true(20 <= len(value) <= 30)
assert_true(string.strip(value, RepeatExpandingFormatter.charsets['punctuation']) is '')
else:
raise DecisionGraphError('unexpected header found: "%s"' % header)

View file

@ -1,117 +0,0 @@
from boto.s3.key import Key
from optparse import OptionParser
from . import realistic
import traceback
import random
from . import common
import sys
def parse_opts():
parser = OptionParser()
parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
parser.add_option('-b', '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET')
parser.add_option('--seed', dest='seed', help='optional seed for the random number generator')
return parser.parse_args()
def get_random_files(quantity, mean, stddev, seed):
"""Create file-like objects with pseudorandom contents.
IN:
number of files to create
mean file size in bytes
standard deviation from mean file size
seed for PRNG
OUT:
list of file handles
"""
file_generator = realistic.files(mean, stddev, seed)
return [file_generator.next() for _ in xrange(quantity)]
def upload_objects(bucket, files, seed):
"""Upload a bunch of files to an S3 bucket
IN:
boto S3 bucket object
list of file handles to upload
seed for PRNG
OUT:
list of boto S3 key objects
"""
keys = []
name_generator = realistic.names(15, 4, seed=seed)
for fp in files:
print >> sys.stderr, 'sending file with size %dB' % fp.size
key = Key(bucket)
key.key = name_generator.next()
key.set_contents_from_file(fp, rewind=True)
key.set_acl('public-read')
keys.append(key)
return keys
def _main():
'''To run the static content load test, make sure you've bootstrapped your
test environment and set up your config.yaml file, then run the following:
S3TEST_CONF=config.yaml virtualenv/bin/s3tests-generate-objects.py --seed 1234
This creates a bucket with your S3 credentials (from config.yaml) and
fills it with garbage objects as described in the
file_generation.groups section of config.yaml. It writes a list of
URLS to those objects to the file listed in file_generation.url_file
in config.yaml.
Once you have objcts in your bucket, run the siege benchmarking program:
siege --rc ./siege.conf -r 5
This tells siege to read the ./siege.conf config file which tells it to
use the urls in ./urls.txt and log to ./siege.log. It hits each url in
urls.txt 5 times (-r flag).
Results are printed to the terminal and written in CSV format to
./siege.log
'''
(options, args) = parse_opts()
#SETUP
random.seed(options.seed if options.seed else None)
conn = common.s3.main
if options.outfile:
OUTFILE = open(options.outfile, 'w')
elif common.config.file_generation.url_file:
OUTFILE = open(common.config.file_generation.url_file, 'w')
else:
OUTFILE = sys.stdout
if options.bucket:
bucket = conn.create_bucket(options.bucket)
else:
bucket = common.get_new_bucket()
bucket.set_acl('public-read')
keys = []
print >> OUTFILE, 'bucket: %s' % bucket.name
print >> sys.stderr, 'setup complete, generating files'
for profile in common.config.file_generation.groups:
seed = random.random()
files = get_random_files(profile[0], profile[1], profile[2], seed)
keys += upload_objects(bucket, files, seed)
print >> sys.stderr, 'finished sending files. generating urls'
for key in keys:
print >> OUTFILE, key.generate_url(0, query_auth=False)
print >> sys.stderr, 'done'
def main():
common.setup()
try:
_main()
except Exception as e:
traceback.print_exc()
common.teardown()

View file

@ -1,265 +0,0 @@
import gevent
import gevent.pool
import gevent.queue
import gevent.monkey; gevent.monkey.patch_all()
import itertools
import optparse
import os
import sys
import time
import traceback
import random
import yaml
import realistic
import common
NANOSECOND = int(1e9)
def reader(bucket, worker_id, file_names, queue, rand):
while True:
objname = rand.choice(file_names)
key = bucket.new_key(objname)
fp = realistic.FileValidator()
result = dict(
type='r',
bucket=bucket.name,
key=key.name,
worker=worker_id,
)
start = time.time()
try:
key.get_contents_to_file(fp._file)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
if not fp.valid():
m='md5sum check failed start={s} ({se}) end={e} size={sz} obj={o}'.format(s=time.ctime(start), se=start, e=end, sz=fp._file.tell(), o=objname)
result.update(
error=dict(
msg=m,
traceback=traceback.format_exc(),
),
)
print "ERROR:", m
else:
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
)
queue.put(result)
def writer(bucket, worker_id, file_names, files, queue, rand):
while True:
fp = next(files)
fp.seek(0)
objname = rand.choice(file_names)
key = bucket.new_key(objname)
result = dict(
type='w',
bucket=bucket.name,
key=key.name,
worker=worker_id,
)
start = time.time()
try:
key.set_contents_from_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
)
queue.put(result)
def parse_options():
parser = optparse.OptionParser(
usage='%prog [OPTS] <CONFIG_YAML',
)
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
help="skip cleaning up all created buckets", default=True)
return parser.parse_args()
def write_file(bucket, file_name, fp):
"""
Write a single file to the bucket using the file_name.
This is used during the warmup to initialize the files.
"""
key = bucket.new_key(file_name)
key.set_contents_from_file(fp)
def main():
# parse options
(options, args) = parse_options()
if os.isatty(sys.stdin.fileno()):
raise RuntimeError('Need configuration in stdin.')
config = common.read_config(sys.stdin)
conn = common.connect(config.s3)
bucket = None
try:
# setup
real_stdout = sys.stdout
sys.stdout = sys.stderr
# verify all required config items are present
if 'readwrite' not in config:
raise RuntimeError('readwrite section not found in config')
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
if item not in config.readwrite:
raise RuntimeError("Missing readwrite config item: {item}".format(item=item))
for item in ['num', 'size', 'stddev']:
if item not in config.readwrite.files:
raise RuntimeError("Missing readwrite config item: files.{item}".format(item=item))
seeds = dict(config.readwrite.get('random_seed', {}))
seeds.setdefault('main', random.randrange(2**32))
rand = random.Random(seeds['main'])
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
# check flag for deterministic file name creation
if not config.readwrite.get('deterministic_file_names'):
print 'Creating random file names'
file_names = realistic.names(
mean=15,
stddev=4,
seed=seeds['names'],
)
file_names = itertools.islice(file_names, config.readwrite.files.num)
file_names = list(file_names)
else:
print 'Creating file names that are deterministic'
file_names = []
for x in xrange(config.readwrite.files.num):
file_names.append('test_file_{num}'.format(num=x))
files = realistic.files2(
mean=1024 * config.readwrite.files.size,
stddev=1024 * config.readwrite.files.stddev,
seed=seeds['contents'],
)
q = gevent.queue.Queue()
# warmup - get initial set of files uploaded if there are any writers specified
if config.readwrite.writers > 0:
print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
warmup_pool = gevent.pool.Pool(size=100)
for file_name in file_names:
fp = next(files)
warmup_pool.spawn(
write_file,
bucket=bucket,
file_name=file_name,
fp=fp,
)
warmup_pool.join()
# main work
print "Starting main worker loop."
print "Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev)
print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers)
group = gevent.pool.Group()
rand_writer = random.Random(seeds['writer'])
# Don't create random files if deterministic_files_names is set and true
if not config.readwrite.get('deterministic_file_names'):
for x in xrange(config.readwrite.writers):
this_rand = random.Random(rand_writer.randrange(2**32))
group.spawn(
writer,
bucket=bucket,
worker_id=x,
file_names=file_names,
files=files,
queue=q,
rand=this_rand,
)
# Since the loop generating readers already uses config.readwrite.readers
# and the file names are already generated (randomly or deterministically),
# this loop needs no additional qualifiers. If zero readers are specified,
# it will behave as expected (no data is read)
rand_reader = random.Random(seeds['reader'])
for x in xrange(config.readwrite.readers):
this_rand = random.Random(rand_reader.randrange(2**32))
group.spawn(
reader,
bucket=bucket,
worker_id=x,
file_names=file_names,
queue=q,
rand=this_rand,
)
def stop():
group.kill(block=True)
q.put(StopIteration)
gevent.spawn_later(config.readwrite.duration, stop)
# wait for all the tests to finish
group.join()
print 'post-join, queue size {size}'.format(size=q.qsize())
if q.qsize() > 0:
for temp_dict in q:
if 'error' in temp_dict:
raise Exception('exception:\n\t{msg}\n\t{trace}'.format(
msg=temp_dict['error']['msg'],
trace=temp_dict['error']['traceback'])
)
else:
yaml.safe_dump(temp_dict, stream=real_stdout)
finally:
# cleanup
if options.cleanup:
if bucket is not None:
common.nuke_bucket(bucket)

View file

@ -1,281 +0,0 @@
import hashlib
import random
import string
import struct
import time
import math
import tempfile
import shutil
import os
NANOSECOND = int(1e9)
def generate_file_contents(size):
"""
A helper function to generate binary contents for a given size, and
calculates the md5 hash of the contents appending itself at the end of the
blob.
It uses sha1's hexdigest which is 40 chars long. So any binary generated
should remove the last 40 chars from the blob to retrieve the original hash
and binary so that validity can be proved.
"""
size = int(size)
contents = os.urandom(size)
content_hash = hashlib.sha1(contents).hexdigest()
return contents + content_hash
class FileValidator(object):
def __init__(self, f=None):
self._file = tempfile.SpooledTemporaryFile()
self.original_hash = None
self.new_hash = None
if f:
f.seek(0)
shutil.copyfileobj(f, self._file)
def valid(self):
"""
Returns True if this file looks valid. The file is valid if the end
of the file has the md5 digest for the first part of the file.
"""
self._file.seek(0)
contents = self._file.read()
self.original_hash, binary = contents[-40:], contents[:-40]
self.new_hash = hashlib.sha1(binary).hexdigest()
if not self.new_hash == self.original_hash:
print 'original hash: ', self.original_hash
print 'new hash: ', self.new_hash
print 'size: ', self._file.tell()
return False
return True
# XXX not sure if we need all of these
def seek(self, offset, whence=os.SEEK_SET):
self._file.seek(offset, whence)
def tell(self):
return self._file.tell()
def read(self, size=-1):
return self._file.read(size)
def write(self, data):
self._file.write(data)
self._file.seek(0)
class RandomContentFile(object):
def __init__(self, size, seed):
self.size = size
self.seed = seed
self.random = random.Random(self.seed)
# Boto likes to seek once more after it's done reading, so we need to save the last chunks/seek value.
self.last_chunks = self.chunks = None
self.last_seek = None
# Let seek initialize the rest of it, rather than dup code
self.seek(0)
def _mark_chunk(self):
self.chunks.append([self.offset, int(round((time.time() - self.last_seek) * NANOSECOND))])
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.offset = offset
elif whence == os.SEEK_END:
self.offset = self.size + offset;
elif whence == os.SEEK_CUR:
self.offset += offset
assert self.offset == 0
self.random.seed(self.seed)
self.buffer = ''
self.hash = hashlib.md5()
self.digest_size = self.hash.digest_size
self.digest = None
# Save the last seek time as our start time, and the last chunks
self.last_chunks = self.chunks
# Before emptying.
self.last_seek = time.time()
self.chunks = []
def tell(self):
return self.offset
def _generate(self):
# generate and return a chunk of pseudorandom data
size = min(self.size, 1*1024*1024) # generate at most 1 MB at a time
chunks = int(math.ceil(size/8.0)) # number of 8-byte chunks to create
l = [self.random.getrandbits(64) for _ in xrange(chunks)]
s = struct.pack(chunks*'Q', *l)
return s
def read(self, size=-1):
if size < 0:
size = self.size - self.offset
r = []
random_count = min(size, self.size - self.offset - self.digest_size)
if random_count > 0:
while len(self.buffer) < random_count:
self.buffer += self._generate()
self.offset += random_count
size -= random_count
data, self.buffer = self.buffer[:random_count], self.buffer[random_count:]
if self.hash is not None:
self.hash.update(data)
r.append(data)
digest_count = min(size, self.size - self.offset)
if digest_count > 0:
if self.digest is None:
self.digest = self.hash.digest()
self.hash = None
self.offset += digest_count
size -= digest_count
data = self.digest[:digest_count]
r.append(data)
self._mark_chunk()
return ''.join(r)
class PrecomputedContentFile(object):
def __init__(self, f):
self._file = tempfile.SpooledTemporaryFile()
f.seek(0)
shutil.copyfileobj(f, self._file)
self.last_chunks = self.chunks = None
self.seek(0)
def seek(self, offset, whence=os.SEEK_SET):
self._file.seek(offset, whence)
if self.tell() == 0:
# only reset the chunks when seeking to the beginning
self.last_chunks = self.chunks
self.last_seek = time.time()
self.chunks = []
def tell(self):
return self._file.tell()
def read(self, size=-1):
data = self._file.read(size)
self._mark_chunk()
return data
def _mark_chunk(self):
elapsed = time.time() - self.last_seek
elapsed_nsec = int(round(elapsed * NANOSECOND))
self.chunks.append([self.tell(), elapsed_nsec])
class FileVerifier(object):
def __init__(self):
self.size = 0
self.hash = hashlib.md5()
self.buf = ''
self.created_at = time.time()
self.chunks = []
def _mark_chunk(self):
self.chunks.append([self.size, int(round((time.time() - self.created_at) * NANOSECOND))])
def write(self, data):
self.size += len(data)
self.buf += data
digsz = -1*self.hash.digest_size
new_data, self.buf = self.buf[0:digsz], self.buf[digsz:]
self.hash.update(new_data)
self._mark_chunk()
def valid(self):
"""
Returns True if this file looks valid. The file is valid if the end
of the file has the md5 digest for the first part of the file.
"""
if self.size < self.hash.digest_size:
return self.hash.digest().startswith(self.buf)
return self.buf == self.hash.digest()
def files(mean, stddev, seed=None):
"""
Yields file-like objects with effectively random contents, where
the size of each file follows the normal distribution with `mean`
and `stddev`.
Beware, the file-likeness is very shallow. You can use boto's
`key.set_contents_from_file` to send these to S3, but they are not
full file objects.
The last 128 bits are the MD5 digest of the previous bytes, for
verifying round-trip data integrity. For example, if you
re-download the object and place the contents into a file called
``foo``, the following should print two identical lines:
python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo
Except for objects shorter than 16 bytes, where the second line
will be proportionally shorter.
"""
rand = random.Random(seed)
while True:
while True:
size = int(rand.normalvariate(mean, stddev))
if size >= 0:
break
yield RandomContentFile(size=size, seed=rand.getrandbits(32))
def files2(mean, stddev, seed=None, numfiles=10):
"""
Yields file objects with effectively random contents, where the
size of each file follows the normal distribution with `mean` and
`stddev`.
Rather than continuously generating new files, this pre-computes and
stores `numfiles` files and yields them in a loop.
"""
# pre-compute all the files (and save with TemporaryFiles)
fs = []
for _ in xrange(numfiles):
t = tempfile.SpooledTemporaryFile()
t.write(generate_file_contents(random.normalvariate(mean, stddev)))
t.seek(0)
fs.append(t)
while True:
for f in fs:
yield f
def names(mean, stddev, charset=None, seed=None):
"""
Yields strings that are somewhat plausible as file names, where
the lenght of each filename follows the normal distribution with
`mean` and `stddev`.
"""
if charset is None:
charset = string.ascii_lowercase
rand = random.Random(seed)
while True:
while True:
length = int(rand.normalvariate(mean, stddev))
if length > 0:
break
name = ''.join(rand.choice(charset) for _ in xrange(length))
yield name

View file

@ -1,219 +0,0 @@
import gevent
import gevent.pool
import gevent.queue
import gevent.monkey; gevent.monkey.patch_all()
import itertools
import optparse
import os
import sys
import time
import traceback
import random
import yaml
import realistic
import common
NANOSECOND = int(1e9)
def writer(bucket, objname, fp, queue):
key = bucket.new_key(objname)
result = dict(
type='w',
bucket=bucket.name,
key=key.name,
)
start = time.time()
try:
key.set_contents_from_file(fp, rewind=True)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.last_chunks,
)
queue.put(result)
def reader(bucket, objname, queue):
key = bucket.new_key(objname)
fp = realistic.FileVerifier()
result = dict(
type='r',
bucket=bucket.name,
key=key.name,
)
start = time.time()
try:
key.get_contents_to_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
if not fp.valid():
result.update(
error=dict(
msg='md5sum check failed',
),
)
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.chunks,
)
queue.put(result)
def parse_options():
parser = optparse.OptionParser(
usage='%prog [OPTS] <CONFIG_YAML',
)
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
help="skip cleaning up all created buckets", default=True)
return parser.parse_args()
def main():
# parse options
(options, args) = parse_options()
if os.isatty(sys.stdin.fileno()):
raise RuntimeError('Need configuration in stdin.')
config = common.read_config(sys.stdin)
conn = common.connect(config.s3)
bucket = None
try:
# setup
real_stdout = sys.stdout
sys.stdout = sys.stderr
# verify all required config items are present
if 'roundtrip' not in config:
raise RuntimeError('roundtrip section not found in config')
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
if item not in config.roundtrip:
raise RuntimeError("Missing roundtrip config item: {item}".format(item=item))
for item in ['num', 'size', 'stddev']:
if item not in config.roundtrip.files:
raise RuntimeError("Missing roundtrip config item: files.{item}".format(item=item))
seeds = dict(config.roundtrip.get('random_seed', {}))
seeds.setdefault('main', random.randrange(2**32))
rand = random.Random(seeds['main'])
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.roundtrip.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
objnames = realistic.names(
mean=15,
stddev=4,
seed=seeds['names'],
)
objnames = itertools.islice(objnames, config.roundtrip.files.num)
objnames = list(objnames)
files = realistic.files(
mean=1024 * config.roundtrip.files.size,
stddev=1024 * config.roundtrip.files.stddev,
seed=seeds['contents'],
)
q = gevent.queue.Queue()
logger_g = gevent.spawn(yaml.safe_dump_all, q, stream=real_stdout)
print "Writing {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.writers,
)
pool = gevent.pool.Pool(size=config.roundtrip.writers)
start = time.time()
for objname in objnames:
fp = next(files)
pool.spawn(
writer,
bucket=bucket,
objname=objname,
fp=fp,
queue=q,
)
pool.join()
stop = time.time()
elapsed = stop - start
q.put(dict(
type='write_done',
duration=int(round(elapsed * NANOSECOND)),
))
print "Reading {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.readers,
)
# avoid accessing them in the same order as the writing
rand.shuffle(objnames)
pool = gevent.pool.Pool(size=config.roundtrip.readers)
start = time.time()
for objname in objnames:
pool.spawn(
reader,
bucket=bucket,
objname=objname,
queue=q,
)
pool.join()
stop = time.time()
elapsed = stop - start
q.put(dict(
type='read_done',
duration=int(round(elapsed * NANOSECOND)),
))
q.put(StopIteration)
logger_g.get()
finally:
# cleanup
if options.cleanup:
if bucket is not None:
common.nuke_bucket(bucket)

View file

@ -1,79 +0,0 @@
from s3tests import realistic
import shutil
import tempfile
# XXX not used for now
def create_files(mean=2000):
return realistic.files2(
mean=1024 * mean,
stddev=1024 * 500,
seed=1256193726,
numfiles=4,
)
class TestFiles(object):
# the size and seed is what we can get when generating a bunch of files
# with pseudo random numbers based on sttdev, seed, and mean.
# this fails, demonstrating the (current) problem
#def test_random_file_invalid(self):
# size = 2506764
# seed = 3391518755
# source = realistic.RandomContentFile(size=size, seed=seed)
# t = tempfile.SpooledTemporaryFile()
# shutil.copyfileobj(source, t)
# precomputed = realistic.PrecomputedContentFile(t)
# assert precomputed.valid()
# verifier = realistic.FileVerifier()
# shutil.copyfileobj(precomputed, verifier)
# assert verifier.valid()
# this passes
def test_random_file_valid(self):
size = 2506001
seed = 3391518755
source = realistic.RandomContentFile(size=size, seed=seed)
t = tempfile.SpooledTemporaryFile()
shutil.copyfileobj(source, t)
precomputed = realistic.PrecomputedContentFile(t)
verifier = realistic.FileVerifier()
shutil.copyfileobj(precomputed, verifier)
assert verifier.valid()
# new implementation
class TestFileValidator(object):
def test_new_file_is_valid(self):
size = 2506001
contents = realistic.generate_file_contents(size)
t = tempfile.SpooledTemporaryFile()
t.write(contents)
t.seek(0)
fp = realistic.FileValidator(t)
assert fp.valid()
def test_new_file_is_valid_when_size_is_1(self):
size = 1
contents = realistic.generate_file_contents(size)
t = tempfile.SpooledTemporaryFile()
t.write(contents)
t.seek(0)
fp = realistic.FileValidator(t)
assert fp.valid()
def test_new_file_is_valid_on_several_calls(self):
size = 2506001
contents = realistic.generate_file_contents(size)
t = tempfile.SpooledTemporaryFile()
t.write(contents)
t.seek(0)
fp = realistic.FileValidator(t)
assert fp.valid()
assert fp.valid()

View file

@ -16,19 +16,8 @@ setup(
'boto >=2.0b4',
'boto3 >=1.0.0',
'PyYAML',
'bunch >=1.0.0',
'munch >=2.0.0',
'gevent >=1.0',
'isodate >=0.4.4',
],
entry_points={
'console_scripts': [
's3tests-generate-objects = s3tests.generate_objects:main',
's3tests-test-readwrite = s3tests.readwrite:main',
's3tests-test-roundtrip = s3tests.roundtrip:main',
's3tests-fuzz-headers = s3tests.fuzz.headers:main',
's3tests-analysis-rwstats = s3tests.analysis.rwstats:main',
],
},
)

View file

@ -1,382 +0,0 @@
# Updated by Siege 2.69, May-24-2010
# Copyright 2000-2007 by Jeffrey Fulmer, et al.
#
# Siege configuration file -- edit as necessary
# For more information about configuring and running
# this program, visit: http://www.joedog.org/
#
# Variable declarations. You can set variables here
# for use in the directives below. Example:
# PROXY = proxy.joedog.org
# Reference variables inside ${} or $(), example:
# proxy-host = ${PROXY}
# You can also reference ENVIRONMENT variables without
# actually declaring them, example:
# logfile = $(HOME)/var/siege.log
#
# Signify verbose mode, true turns on verbose output
# ex: verbose = true|false
#
verbose = true
#
# CSV Verbose format: with this option, you can choose
# to format verbose output in traditional siege format
# or comma separated format. The latter will allow you
# to redirect output to a file for import into a spread
# sheet, i.e., siege > file.csv
# ex: csv = true|false (default false)
#
csv = true
#
# Full URL verbose format: By default siege displays
# the URL path and not the full URL. With this option,
# you # can instruct siege to show the complete URL.
# ex: fullurl = true|false (default false)
#
# fullurl = true
#
# Display id: in verbose mode, display the siege user
# id associated with the HTTP transaction information
# ex: display-id = true|false
#
# display-id =
#
# Show logfile location. By default, siege displays the
# logfile location at the end of every run when logging
# You can turn this message off with this directive.
# ex: show-logfile = false
#
show-logfile = true
#
# Default logging status, true turns logging on.
# ex: logging = true|false
#
logging = true
#
# Logfile, the default siege logfile is $PREFIX/var/siege.log
# This directive allows you to choose an alternative log file.
# Environment variables may be used as shown in the examples:
# ex: logfile = /home/jeff/var/log/siege.log
# logfile = ${HOME}/var/log/siege.log
# logfile = ${LOGFILE}
#
logfile = ./siege.log
#
# HTTP protocol. Options HTTP/1.1 and HTTP/1.0.
# Some webservers have broken implementation of the
# 1.1 protocol which skews throughput evaluations.
# If you notice some siege clients hanging for
# extended periods of time, change this to HTTP/1.0
# ex: protocol = HTTP/1.1
# protocol = HTTP/1.0
#
protocol = HTTP/1.1
#
# Chunked encoding is required by HTTP/1.1 protocol
# but siege allows you to turn it off as desired.
#
# ex: chunked = true
#
chunked = true
#
# Cache revalidation.
# Siege supports cache revalidation for both ETag and
# Last-modified headers. If a copy is still fresh, the
# server responds with 304.
# HTTP/1.1 200 0.00 secs: 2326 bytes ==> /apache_pb.gif
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
#
# ex: cache = true
#
cache = false
#
# Connection directive. Options "close" and "keep-alive"
# Starting with release 2.57b3, siege implements persistent
# connections in accordance to RFC 2068 using both chunked
# encoding and content-length directives to determine the
# page size. To run siege with persistent connections set
# the connection directive to keep-alive. (Default close)
# CAUTION: use the keep-alive directive with care.
# DOUBLE CAUTION: this directive does not work well on HPUX
# TRIPLE CAUTION: don't use keep-alives until further notice
# ex: connection = close
# connection = keep-alive
#
connection = close
#
# Default number of simulated concurrent users
# ex: concurrent = 25
#
concurrent = 15
#
# Default duration of the siege. The right hand argument has
# a modifier which specifies the time units, H=hours, M=minutes,
# and S=seconds. If a modifier is not specified, then minutes
# are assumed.
# ex: time = 50M
#
# time =
#
# Repetitions. The length of siege may be specified in client
# reps rather then a time duration. Instead of specifying a time
# span, you can tell each siege instance to hit the server X number
# of times. So if you chose 'reps = 20' and you've selected 10
# concurrent users, then siege will hit the server 200 times.
# ex: reps = 20
#
# reps =
#
# Default URLs file, set at configuration time, the default
# file is PREFIX/etc/urls.txt. So if you configured siege
# with --prefix=/usr/local then the urls.txt file is installed
# int /usr/local/etc/urls.txt. Use the "file = " directive to
# configure an alternative URLs file. You may use environment
# variables as shown in the examples below:
# ex: file = /export/home/jdfulmer/MYURLS.txt
# file = $HOME/etc/urls.txt
# file = $URLSFILE
#
file = ./urls.txt
#
# Default URL, this is a single URL that you want to test. This
# is usually set at the command line with the -u option. When
# used, this option overrides the urls.txt (-f FILE/--file=FILE)
# option. You will HAVE to comment this out for in order to use
# the urls.txt file option.
# ex: url = https://shemp.whoohoo.com/docs/index.jsp
#
# url =
#
# Default delay value, see the siege(1) man page.
# This value is used for load testing, it is not used
# for benchmarking.
# ex: delay = 3
#
delay = 1
#
# Connection timeout value. Set the value in seconds for
# socket connection timeouts. The default value is 30 seconds.
# ex: timeout = 30
#
# timeout =
#
# Session expiration: This directive allows you to delete all
# cookies after you pass through the URLs. This means siege will
# grab a new session with each run through its URLs. The default
# value is false.
# ex: expire-session = true
#
# expire-session =
#
# Failures: This is the number of total connection failures allowed
# before siege aborts. Connection failures (timeouts, socket failures,
# etc.) are combined with 400 and 500 level errors in the final stats,
# but those errors do not count against the abort total. If you set
# this total to 10, then siege will abort after ten socket timeouts,
# but it will NOT abort after ten 404s. This is designed to prevent
# a run-away mess on an unattended siege. The default value is 1024
# ex: failures = 50
#
# failures =
#
# Internet simulation. If true, siege clients will hit
# the URLs in the urls.txt file randomly, thereby simulating
# internet usage. If false, siege will run through the
# urls.txt file in order from first to last and back again.
# ex: internet = true
#
internet = false
#
# Default benchmarking value, If true, there is NO delay
# between server requests, siege runs as fast as the web
# server and the network will let it. Set this to false
# for load testing.
# ex: benchmark = true
#
benchmark = false
#
# Set the siege User-Agent to identify yourself at the
# host, the default is: JoeDog/1.00 [en] (X11; I; Siege #.##)
# But that wreaks of corporate techno speak. Feel free
# to make it more interesting :-) Since Limey is recovering
# from minor surgery as I write this, I'll dedicate the
# example to him...
# ex: user-agent = Limey The Bulldog
#
# user-agent =
#
# Accept-encoding. This option allows you to specify
# acceptable encodings returned by the server. Use this
# directive to turn on compression. By default we accept
# gzip compression.
#
# ex: accept-encoding = *
# accept-encoding = gzip
# accept-encoding = compress;q=0.5;gzip;q=1
accept-encoding = gzip
#
# TURN OFF THAT ANNOYING SPINNER!
# Siege spawns a thread and runs a spinner to entertain you
# as it collects and computes its stats. If you don't like
# this feature, you may turn it off here.
# ex: spinner = false
#
spinner = true
#
# WWW-Authenticate login. When siege hits a webpage
# that requires basic authentication, it will search its
# logins for authentication which matches the specific realm
# requested by the server. If it finds a match, it will send
# that login information. If it fails to match the realm, it
# will send the default login information. (Default is "all").
# You may configure siege with several logins as long as no
# two realms match. The format for logins is:
# username:password[:realm] where "realm" is optional.
# If you do not supply a realm, then it will default to "all"
# ex: login = jdfulmer:topsecret:Admin
# login = jeff:supersecret
#
# login =
#
# WWW-Authenticate username and password. When siege
# hits a webpage that requires authentication, it will
# send this user name and password to the server. Note
# this is NOT form based authentication. You will have
# to construct URLs for that.
# ex: username = jdfulmer
# password = whoohoo
#
# username =
# password =
#
# ssl-cert
# This optional feature allows you to specify a path to a client
# certificate. It is not neccessary to specify a certificate in
# order to use https. If you don't know why you would want one,
# then you probably don't need this feature. Use openssl to
# generate a certificate and key with the following command:
# $ openssl req -nodes -new -days 365 -newkey rsa:1024 \
# -keyout key.pem -out cert.pem
# Specify a path to cert.pem as follows:
# ex: ssl-cert = /home/jeff/.certs/cert.pem
#
# ssl-cert =
#
# ssl-key
# Use this option to specify the key you generated with the command
# above. ex: ssl-key = /home/jeff/.certs/key.pem
# You may actually skip this option and combine both your cert and
# your key in a single file:
# $ cat key.pem > client.pem
# $ cat cert.pem >> client.pem
# Now set the path for ssl-cert:
# ex: ssl-cert = /home/jeff/.certs/client.pem
# (in this scenario, you comment out ssl-key)
#
# ssl-key =
#
# ssl-timeout
# This option sets a connection timeout for the ssl library
# ex: ssl-timeout = 30
#
# ssl-timeout =
#
# ssl-ciphers
# You can use this feature to select a specific ssl cipher
# for HTTPs. To view the ones available with your library run
# the following command: openssl ciphers
# ex: ssl-ciphers = EXP-RC4-MD5
#
# ssl-ciphers =
#
# Login URL. This is the first URL to be hit by every siege
# client. This feature was designed to allow you to login to
# a server and establish a session. It will only be hit once
# so if you need to hit this URL more then once, make sure it
# also appears in your urls.txt file.
#
# ex: login-url = http://eos.haha.com/login.jsp POST name=jeff&pass=foo
#
# login-url =
#
# Proxy protocol. This option allows you to select a proxy
# server stress testing. The proxy will request the URL(s)
# specified by -u"my.url.org" OR from the urls.txt file.
#
# ex: proxy-host = proxy.whoohoo.org
# proxy-port = 8080
#
# proxy-host =
# proxy-port =
#
# Proxy-Authenticate. When scout hits a proxy server which
# requires username and password authentication, it will this
# username and password to the server. The format is username,
# password and optional realm each separated by a colon. You
# may enter more than one proxy-login as long as each one has
# a different realm. If you do not enter a realm, then scout
# will send that login information to all proxy challenges. If
# you have more than one proxy-login, then scout will attempt
# to match the login to the realm.
# ex: proxy-login: jeff:secret:corporate
# proxy-login: jeff:whoohoo
#
# proxy-login =
#
# Redirection support. This option allows to to control
# whether a Location: hint will be followed. Most users
# will want to follow redirection information, but sometimes
# it's desired to just get the Location information.
#
# ex: follow-location = false
#
# follow-location =
# Zero-length data. siege can be configured to disregard
# results in which zero bytes are read after the headers.
# Alternatively, such results can be counted in the final
# tally of outcomes.
#
# ex: zero-data-ok = false
#
# zero-data-ok =
#
# end of siegerc