Port functional tests from python 2 to python 3

Add fails_on_rgw to tests not passing. Some
tests from the master branch do not pass on the
rgw yet. Others waiting on rgw tracker issues to
be resolved.

Signed-off-by: Ali Maredia <amaredia@redhat.com>
This commit is contained in:
Adam C. Emerson 2019-03-22 13:58:30 -04:00 committed by Ali Maredia
parent 92f056532b
commit be9935ba1a
30 changed files with 597 additions and 561 deletions

View file

@ -4,56 +4,52 @@ set -e
virtualenv="virtualenv"
declare -a packages
if [ -f /etc/debian_version ]; then
packages=(debianutils python-pip python-virtualenv python-dev libevent-dev libffi-dev libxml2-dev libxslt-dev zlib1g-dev)
packages=(debianutils python3-pip python3-virtualenv python3-dev libevent-dev libffi-dev libxml2-dev libxslt-dev zlib1g-dev)
for package in ${packages[@]}; do
if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
# add a space after old values
missing="${missing:+$missing }$package"
fi
done
if [ -n "$missing" ]; then
echo "$0: missing required DEB packages. Installing via sudo." 1>&2
sudo apt-get -y install $missing
fi
else
packages=(which libevent-devel libffi-devel libxml2-devel libxslt-devel zlib-devel)
if [ -f /etc/fedora-release ]; then
packages+=(python2-pip python2-virtualenv python2-devel)
elif [ -f /etc/redhat-release ]; then
unset ${GREP_OPTIONS}
eval $(cat /etc/os-release | grep VERSION_ID)
if [ ${VERSION_ID:0:1} -lt 8 ]; then
packages+=(python-virtualenv python-devel)
else
packages+=(python2-virtualenv python2-devel)
virtualenv="virtualenv-2"
fi
fi
elif [ -f /etc/redhat-release ]; then
packages=(which python3-virtualenv python36-devel libevent-devel libffi-devel libxml2-devel libxslt-devel zlib-devel)
for package in ${packages[@]}; do
# When the package is python36-devel we change it to python3-devel on Fedora
if [[ ${package} == "python36-devel" && -f /etc/fedora-release ]]; then
package=python36
fi
if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
missing="${missing:+$missing }$package"
fi
done
if [ -n "$missing" ]; then
echo "$0: missing required RPM packages. Installing via sudo." 1>&2
echo "$0: Missing required RPM packages: ${missing}." 1>&2
sudo yum -y install $missing
fi
else
echo "s3-tests can only be run on Red Hat, Centos, Fedora, Ubunutu, or Debian platforms"
exit 1
fi
${virtualenv} --python=$(which python2) --no-site-packages --distribute virtualenv
# s3-tests only works on python 3.6 not newer versions of python3
${virtualenv} --python=$(which python3.6) --no-site-packages --distribute virtualenv
# avoid pip bugs
./virtualenv/bin/pip install --upgrade pip
./virtualenv/bin/pip3 install --upgrade pip
# slightly old version of setuptools; newer fails w/ requests 0.14.0
./virtualenv/bin/pip install setuptools==32.3.1
./virtualenv/bin/pip3 install setuptools==32.3.1
./virtualenv/bin/pip install -r requirements.txt
./virtualenv/bin/pip3 install -r requirements.txt
# forbid setuptools from using the network because it'll try to use
# easy_install, and we really wanted pip; next line will fail if pip
# requirements.txt does not match setup.py requirements -- sucky but
# good enough for now
./virtualenv/bin/python setup.py develop
./virtualenv/bin/python3 setup.py develop

View file

@ -2,12 +2,11 @@ PyYAML
nose >=1.0.0
boto >=2.6.0
boto3 >=1.0.0
bunch >=1.0.0
munch >=2.0.0
# 0.14 switches to libev, that means bootstrap needs to change too
gevent >=1.0
isodate >=0.4.4
requests >=0.14.0
pytz >=2011k
ordereddict
httplib2
lxml

View file

@ -57,7 +57,7 @@ def main():
def calculate_stats(options, total, durations, min_time, max_time, errors,
success):
print 'Calculating statistics...'
print('Calculating statistics...')
f = sys.stdin
if options.input:
@ -81,13 +81,13 @@ def calculate_stats(options, total, durations, min_time, max_time, errors,
end = start + duration / float(NANOSECONDS)
if options.verbose:
print "[{type}] POSIX time: {start:>18.2f} - {end:<18.2f} " \
print("[{type}] POSIX time: {start:>18.2f} - {end:<18.2f} " \
"{data:>11.2f} KB".format(
type=type_,
start=start,
end=end,
data=data_size / 1024.0, # convert to KB
)
))
# update time boundaries
prev = min_time.setdefault(type_, start)
@ -106,7 +106,7 @@ def calculate_stats(options, total, durations, min_time, max_time, errors,
total[type_] = total.get(type_, 0) + data_size
def print_results(total, durations, min_time, max_time, errors, success):
for type_ in total.keys():
for type_ in list(total.keys()):
trans_success = success.get(type_, 0)
trans_fail = errors.get(type_, 0)
trans = trans_success + trans_fail
@ -121,7 +121,7 @@ def print_results(total, durations, min_time, max_time, errors, success):
trans_long = max(durations[type_]) / float(NANOSECONDS)
trans_short = min(durations[type_]) / float(NANOSECONDS)
print OUTPUT_FORMAT.format(
print(OUTPUT_FORMAT.format(
type=type_,
trans_success=trans_success,
trans_fail=trans_fail,
@ -135,7 +135,7 @@ def print_results(total, durations, min_time, max_time, errors, success):
conc=conc,
trans_long=trans_long,
trans_short=trans_short,
)
))
if __name__ == '__main__':
main()

View file

@ -1,5 +1,5 @@
import boto.s3.connection
import bunch
import munch
import itertools
import os
import random
@ -11,8 +11,8 @@ from lxml import etree
from doctest import Example
from lxml.doctestcompare import LXMLOutputChecker
s3 = bunch.Bunch()
config = bunch.Bunch()
s3 = munch.Munch()
config = munch.Munch()
prefix = ''
bucket_counter = itertools.count(1)
@ -51,10 +51,10 @@ def nuke_bucket(bucket):
while deleted_cnt:
deleted_cnt = 0
for key in bucket.list():
print 'Cleaning bucket {bucket} key {key}'.format(
print('Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
))
key.set_canned_acl('private')
key.delete()
deleted_cnt += 1
@ -67,26 +67,26 @@ def nuke_bucket(bucket):
and e.body == ''):
e.error_code = 'AccessDenied'
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
print('GOT UNWANTED ERROR', e.error_code)
raise
# seems like we're not the owner of the bucket; ignore
pass
def nuke_prefixed_buckets():
for name, conn in s3.items():
print 'Cleaning buckets from connection {name}'.format(name=name)
for name, conn in list(s3.items()):
print('Cleaning buckets from connection {name}'.format(name=name))
for bucket in conn.get_all_buckets():
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
print('Cleaning bucket {bucket}'.format(bucket=bucket))
nuke_bucket(bucket)
print 'Done with cleanup of test buckets.'
print('Done with cleanup of test buckets.')
def read_config(fp):
config = bunch.Bunch()
config = munch.Munch()
g = yaml.safe_load_all(fp)
for new in g:
config.update(bunch.bunchify(new))
config.update(munch.Munchify(new))
return config
def connect(conf):
@ -97,7 +97,7 @@ def connect(conf):
access_key='aws_access_key_id',
secret_key='aws_secret_access_key',
)
kwargs = dict((mapping[k],v) for (k,v) in conf.iteritems() if k in mapping)
kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
#process calling_format argument
calling_formats = dict(
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
@ -105,7 +105,7 @@ def connect(conf):
vhost=boto.s3.connection.VHostCallingFormat(),
)
kwargs['calling_format'] = calling_formats['ordinary']
if conf.has_key('calling_format'):
if 'calling_format' in conf:
raw_calling_format = conf['calling_format']
try:
kwargs['calling_format'] = calling_formats[raw_calling_format]
@ -146,7 +146,7 @@ def setup():
raise RuntimeError("Empty Prefix! Aborting!")
defaults = config.s3.defaults
for section in config.s3.keys():
for section in list(config.s3.keys()):
if section == 'defaults':
continue
@ -258,9 +258,10 @@ def with_setup_kwargs(setup, teardown=None):
# yield _test_gen
def trim_xml(xml_str):
p = etree.XMLParser(remove_blank_text=True)
p = etree.XMLParser(encoding="utf-8", remove_blank_text=True)
xml_str = bytes(xml_str, "utf-8")
elem = etree.XML(xml_str, parser=p)
return etree.tostring(elem)
return etree.tostring(elem, encoding="unicode")
def normalize_xml(xml, pretty_print=True):
if xml is None:
@ -282,7 +283,7 @@ def normalize_xml(xml, pretty_print=True):
for parent in root.xpath('//*[./*]'): # Search for parent elements
parent[:] = sorted(parent,key=lambda x: x.tag)
xmlstr = etree.tostring(root, encoding="utf-8", xml_declaration=True, pretty_print=pretty_print)
xmlstr = etree.tostring(root, encoding="unicode", pretty_print=pretty_print)
# there are two different DTD URIs
xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)

View file

@ -1,21 +1,20 @@
from __future__ import print_function
import sys
import ConfigParser
import configparser
import boto.exception
import boto.s3.connection
import bunch
import munch
import itertools
import os
import random
import string
from httplib import HTTPConnection, HTTPSConnection
from urlparse import urlparse
from http.client import HTTPConnection, HTTPSConnection
from urllib.parse import urlparse
from .utils import region_sync_meta
s3 = bunch.Bunch()
config = bunch.Bunch()
targets = bunch.Bunch()
s3 = munch.Munch()
config = munch.Munch()
targets = munch.Munch()
# this will be assigned by setup()
prefix = None
@ -69,7 +68,7 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
if bucket.name.startswith(prefix):
print('Cleaning bucket {bucket}'.format(bucket=bucket))
success = False
for i in xrange(2):
for i in range(2):
try:
try:
iterator = iter(bucket.list_versions())
@ -116,12 +115,12 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
def nuke_prefixed_buckets(prefix):
# If no regions are specified, use the simple method
if targets.main.master == None:
for name, conn in s3.items():
for name, conn in list(s3.items()):
print('Deleting buckets on {name}'.format(name=name))
nuke_prefixed_buckets_on_conn(prefix, name, conn)
else:
# First, delete all buckets on the master connection
for name, conn in s3.items():
for name, conn in list(s3.items()):
if conn == targets.main.master.connection:
print('Deleting buckets on {name} (master)'.format(name=name))
nuke_prefixed_buckets_on_conn(prefix, name, conn)
@ -131,7 +130,7 @@ def nuke_prefixed_buckets(prefix):
print('region-sync in nuke_prefixed_buckets')
# Now delete remaining buckets on any other connection
for name, conn in s3.items():
for name, conn in list(s3.items()):
if conn != targets.main.master.connection:
print('Deleting buckets on {name} (non-master)'.format(name=name))
nuke_prefixed_buckets_on_conn(prefix, name, conn)
@ -149,46 +148,46 @@ class TargetConfig:
self.sync_meta_wait = 0
try:
self.api_name = cfg.get(section, 'api_name')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
self.port = cfg.getint(section, 'port')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
try:
self.host=cfg.get(section, 'host')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
raise RuntimeError(
'host not specified for section {s}'.format(s=section)
)
try:
self.is_master=cfg.getboolean(section, 'is_master')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
try:
self.is_secure=cfg.getboolean(section, 'is_secure')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
try:
raw_calling_format = cfg.get(section, 'calling_format')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
raw_calling_format = 'ordinary'
try:
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
pass
@ -208,7 +207,7 @@ class TargetConnection:
class RegionsInfo:
def __init__(self):
self.m = bunch.Bunch()
self.m = munch.Munch()
self.master = None
self.secondaries = []
@ -226,21 +225,21 @@ class RegionsInfo:
return self.m[name]
def get(self):
return self.m
def iteritems(self):
return self.m.iteritems()
def items(self):
return self.m.items()
regions = RegionsInfo()
class RegionsConn:
def __init__(self):
self.m = bunch.Bunch()
self.m = munch.Munch()
self.default = None
self.master = None
self.secondaries = []
def iteritems(self):
return self.m.iteritems()
def items(self):
return self.m.items()
def set_default(self, conn):
self.default = conn
@ -260,7 +259,7 @@ _multiprocess_can_split_ = True
def setup():
cfg = ConfigParser.RawConfigParser()
cfg = configparser.RawConfigParser()
try:
path = os.environ['S3TEST_CONF']
except KeyError:
@ -268,8 +267,7 @@ def setup():
'To run tests, point environment '
+ 'variable S3TEST_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
cfg.read(path)
global prefix
global targets
@ -277,19 +275,19 @@ def setup():
try:
template = cfg.get('fixtures', 'bucket prefix')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
template = 'test-{random}-'
prefix = choose_bucket_prefix(template=template)
try:
slow_backend = cfg.getboolean('fixtures', 'slow backend')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
slow_backend = False
# pull the default_region out, if it exists
try:
default_region = cfg.get('fixtures', 'default_region')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
default_region = None
s3.clear()
@ -315,7 +313,7 @@ def setup():
if len(regions.get()) == 0:
regions.add("default", TargetConfig(cfg, section))
config[name] = bunch.Bunch()
config[name] = munch.Munch()
for var in [
'user_id',
'display_name',
@ -329,12 +327,12 @@ def setup():
]:
try:
config[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
pass
targets[name] = RegionsConn()
for (k, conf) in regions.iteritems():
for (k, conf) in regions.items():
conn = boto.s3.connection.S3Connection(
aws_access_key_id=cfg.get(section, 'access_key'),
aws_secret_access_key=cfg.get(section, 'secret_key'),
@ -475,7 +473,7 @@ def _make_raw_request(host, port, method, path, body=None, request_headers=None,
if request_headers is None:
request_headers = {}
c = class_(host, port, strict=True, timeout=timeout)
c = class_(host, port=port, timeout=timeout)
# TODO: We might have to modify this in future if we need to interact with
# how httplib.request handles Accept-Encoding and Host.

View file

@ -1,10 +1,9 @@
from cStringIO import StringIO
from io import StringIO
import boto.connection
import boto.exception
import boto.s3.connection
import boto.s3.acl
import boto.utils
import bunch
import nose
import operator
import random
@ -15,7 +14,7 @@ import os
import re
from email.utils import formatdate
from urlparse import urlparse
from urllib.parse import urlparse
from boto.s3.connection import S3Connection
@ -24,7 +23,7 @@ from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from .utils import assert_raises
import AnonymousAuth
from . import AnonymousAuth
from email.header import decode_header

View file

@ -1,9 +1,8 @@
from cStringIO import StringIO
from io import StringIO
import boto.exception
import boto.s3.connection
import boto.s3.acl
import boto.s3.lifecycle
import bunch
import datetime
import time
import email.utils
@ -16,7 +15,6 @@ import os
import requests
import base64
import hmac
import sha
import pytz
import json
import httplib2
@ -27,13 +25,13 @@ import random
import re
from collections import defaultdict
from urlparse import urlparse
from urllib.parse import urlparse
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import utils
from . import utils
from .utils import assert_raises
from .policy import Policy, Statement, make_json_policy
@ -117,7 +115,7 @@ def check_configure_versioning_retry(bucket, status, expected_string):
read_status = None
for i in xrange(5):
for i in range(5):
try:
read_status = bucket.get_versioning_status()['Versioning']
except KeyError:
@ -330,26 +328,26 @@ def generate_lifecycle_body(rules):
body = '<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration>'
for rule in rules:
body += '<Rule><ID>%s</ID><Status>%s</Status>' % (rule['ID'], rule['Status'])
if 'Prefix' in rule.keys():
if 'Prefix' in list(rule.keys()):
body += '<Prefix>%s</Prefix>' % rule['Prefix']
if 'Filter' in rule.keys():
if 'Filter' in list(rule.keys()):
prefix_str= '' # AWS supports empty filters
if 'Prefix' in rule['Filter'].keys():
if 'Prefix' in list(rule['Filter'].keys()):
prefix_str = '<Prefix>%s</Prefix>' % rule['Filter']['Prefix']
body += '<Filter>%s</Filter>' % prefix_str
if 'Expiration' in rule.keys():
if 'ExpiredObjectDeleteMarker' in rule['Expiration'].keys():
if 'Expiration' in list(rule.keys()):
if 'ExpiredObjectDeleteMarker' in list(rule['Expiration'].keys()):
body += '<Expiration><ExpiredObjectDeleteMarker>%s</ExpiredObjectDeleteMarker></Expiration>' \
% rule['Expiration']['ExpiredObjectDeleteMarker']
elif 'Date' in rule['Expiration'].keys():
elif 'Date' in list(rule['Expiration'].keys()):
body += '<Expiration><Date>%s</Date></Expiration>' % rule['Expiration']['Date']
else:
body += '<Expiration><Days>%d</Days></Expiration>' % rule['Expiration']['Days']
if 'NoncurrentVersionExpiration' in rule.keys():
if 'NoncurrentVersionExpiration' in list(rule.keys()):
body += '<NoncurrentVersionExpiration><NoncurrentDays>%d</NoncurrentDays></NoncurrentVersionExpiration>' % \
rule['NoncurrentVersionExpiration']['NoncurrentDays']
if 'NoncurrentVersionTransition' in rule.keys():
if 'NoncurrentVersionTransition' in list(rule.keys()):
for t in rule['NoncurrentVersionTransition']:
body += '<NoncurrentVersionTransition>'
body += '<NoncurrentDays>%d</NoncurrentDays>' % \
@ -357,7 +355,7 @@ def generate_lifecycle_body(rules):
body += '<StorageClass>%s</StorageClass>' % \
t['StorageClass']
body += '</NoncurrentVersionTransition>'
if 'AbortIncompleteMultipartUpload' in rule.keys():
if 'AbortIncompleteMultipartUpload' in list(rule.keys()):
body += '<AbortIncompleteMultipartUpload><DaysAfterInitiation>%d</DaysAfterInitiation>' \
'</AbortIncompleteMultipartUpload>' % rule['AbortIncompleteMultipartUpload']['DaysAfterInitiation']
body += '</Rule>'
@ -491,11 +489,11 @@ def generate_random(size, part_size=5*1024*1024):
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
for y in range(this_part_size // chunk):
s = s + strpart
if this_part_size > len(s):
s = s + strpart[0:this_part_size - len(s)]
@ -535,7 +533,7 @@ def _populate_key(bucket, keyname, size=7*1024*1024, storage_class=None):
key = bucket.new_key(keyname)
if storage_class:
key.storage_class = storage_class
data_str = str(generate_random(size, size).next())
data_str = str(next(generate_random(size, size)))
data = StringIO(data_str)
key.set_contents_from_file(fp=data)
return (key, data_str)
@ -754,7 +752,7 @@ class FakeFile(object):
"""
def __init__(self, char='A', interrupt=None):
self.offset = 0
self.char = char
self.char = bytes(char, 'utf-8')
self.interrupt = interrupt
def seek(self, offset, whence=os.SEEK_SET):
@ -801,7 +799,7 @@ class FakeFileVerifier(object):
if self.char == None:
self.char = data[0]
self.size += size
eq(data, self.char*size)
eq(data.decode(), self.char*size)
def _verify_atomic_key_data(key, size=-1, char=None):
"""

View file

@ -1,4 +1,4 @@
from __future__ import print_function
import sys
import collections
import nose
@ -8,7 +8,7 @@ from pprint import pprint
import time
import boto.exception
from urlparse import urlparse
from urllib.parse import urlparse
from nose.tools import eq_ as eq, ok_ as ok
from nose.plugins.attrib import attr
@ -110,7 +110,7 @@ def get_website_url(**kwargs):
def _test_website_populate_fragment(xml_fragment, fields):
for k in ['RoutingRules']:
if k in fields.keys() and len(fields[k]) > 0:
if k in list(fields.keys()) and len(fields[k]) > 0:
fields[k] = '<%s>%s</%s>' % (k, fields[k], k)
f = {
'IndexDocument_Suffix': choose_bucket_prefix(template='index-{random}.html', max_len=32),
@ -185,7 +185,7 @@ def __website_expected_reponse_status(res, status, reason):
def _website_expected_default_html(**kwargs):
fields = []
for k in kwargs.keys():
for k in list(kwargs.keys()):
# AmazonS3 seems to be inconsistent, some HTML errors include BucketName, but others do not.
if k is 'BucketName':
continue
@ -217,6 +217,7 @@ def _website_expected_error_response(res, bucket_name, status, reason, code, con
content = set([content])
for f in content:
if f is not IGNORE_FIELD and f is not None:
f = bytes(f, 'utf-8')
ok(f in body, 'HTML should contain "%s"' % (f, ))
def _website_expected_redirect_response(res, status, reason, new_url):
@ -237,7 +238,7 @@ def _website_request(bucket_name, path, connect_hostname=None, method='GET', tim
request_headers={}
request_headers['Host'] = o.hostname
request_headers['Accept'] = '*/*'
print('Request: {method} {path}\n{headers}'.format(method=method, path=path, headers=''.join(map(lambda t: t[0]+':'+t[1]+"\n", request_headers.items()))))
print('Request: {method} {path}\n{headers}'.format(method=method, path=path, headers=''.join([t[0]+':'+t[1]+"\n" for t in list(request_headers.items())])))
res = _make_raw_request(connect_hostname, config.main.port, method, path, request_headers=request_headers, secure=False, timeout=timeout)
for (k,v) in res.getheaders():
print(k,v)
@ -293,6 +294,7 @@ def test_website_public_bucket_list_public_index():
res = _website_request(bucket.name, '')
body = res.read()
print(body)
indexstring = bytes(indexstring, 'utf-8')
eq(body, indexstring) # default content should match index.html set content
__website_expected_reponse_status(res, 200, 'OK')
indexhtml.delete()
@ -321,6 +323,7 @@ def test_website_private_bucket_list_public_index():
__website_expected_reponse_status(res, 200, 'OK')
body = res.read()
print(body)
indexstring = bytes(indexstring, 'utf-8')
eq(body, indexstring, 'default content should match index.html set content')
indexhtml.delete()
bucket.delete()
@ -511,6 +514,7 @@ def test_website_private_bucket_list_empty_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should NOT match error.html set content')
errorhtml.delete()
@ -537,6 +541,7 @@ def test_website_public_bucket_list_empty_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should match error.html set content')
errorhtml.delete()
@ -568,6 +573,7 @@ def test_website_public_bucket_list_private_index_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should match error.html set content')
indexhtml.delete()
@ -600,6 +606,7 @@ def test_website_private_bucket_list_private_index_blockederrordoc():
body = res.read()
print(body)
_website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
errorstring = bytes(errorstring, 'utf-8')
ok(errorstring not in body, 'error content should match error.html set content')
indexhtml.delete()
@ -1013,7 +1020,7 @@ ROUTING_RULES = {
""",
}
for k in ROUTING_RULES.keys():
for k in list(ROUTING_RULES.keys()):
if len(ROUTING_RULES[k]) > 0:
ROUTING_RULES[k] = "<!-- %s -->\n%s" % (k, ROUTING_RULES[k])
@ -1142,7 +1149,7 @@ def routing_check(*args, **kwargs):
#body = res.read()
#print(body)
#eq(body, args['content'], 'default content should match index.html set content')
ok(res.getheader('Content-Length', -1) > 0)
ok(int(res.getheader('Content-Length', -1)) > 0)
elif args['code'] >= 300 and args['code'] < 400:
_website_expected_redirect_response(res, args['code'], IGNORE_FIELD, new_url)
elif args['code'] >= 400:

View file

@ -1,6 +1,6 @@
from nose.tools import eq_ as eq
import utils
from . import utils
def test_generate():
FIVE_MB = 5 * 1024 * 1024

View file

@ -28,11 +28,11 @@ def generate_random(size, part_size=5*1024*1024):
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
for y in range(this_part_size // chunk):
s = s + strpart
s = s + strpart[:(this_part_size % chunk)]
yield s
@ -42,7 +42,7 @@ def generate_random(size, part_size=5*1024*1024):
# syncs all the regions except for the one passed in
def region_sync_meta(targets, region):
for (k, r) in targets.iteritems():
for (k, r) in targets.items():
if r == region:
continue
conf = r.conf

View file

@ -1,7 +1,7 @@
from boto.s3.connection import S3Connection
from boto.exception import BotoServerError
from boto.s3.key import Key
from httplib import BadStatusLine
from http.client import BadStatusLine
from optparse import OptionParser
from .. import common
@ -59,7 +59,7 @@ def descend_graph(decision_graph, node_name, prng):
except IndexError:
decision = {}
for key, choices in node['set'].iteritems():
for key, choices in node['set'].items():
if key in decision:
raise DecisionGraphError("Node %s tried to set '%s', but that key was already set by a lower node!" %(node_name, key))
decision[key] = make_choice(choices, prng)
@ -85,7 +85,7 @@ def descend_graph(decision_graph, node_name, prng):
num_reps = prng.randint(size_min, size_max)
if header in [h for h, v in decision['headers']]:
raise DecisionGraphError("Node %s tried to add header '%s', but that header already exists!" %(node_name, header))
for _ in xrange(num_reps):
for _ in range(num_reps):
decision['headers'].append([header, value])
return decision
@ -113,7 +113,7 @@ def make_choice(choices, prng):
if value == 'null' or value == 'None':
value = ''
for _ in xrange(weight):
for _ in range(weight):
weighted_choices.append(value)
return prng.choice(weighted_choices)
@ -137,7 +137,8 @@ def expand(decision, value, prng):
class RepeatExpandingFormatter(string.Formatter):
charsets = {
'printable_no_whitespace': string.printable.translate(None, string.whitespace),
'printable_no_whitespace': string.printable.translate(
"".maketrans('', '', string.whitespace)),
'printable': string.printable,
'punctuation': string.punctuation,
'whitespace': string.whitespace,
@ -188,14 +189,15 @@ class RepeatExpandingFormatter(string.Formatter):
if charset_arg == 'binary' or charset_arg == 'binary_no_whitespace':
num_bytes = length + 8
tmplist = [self.prng.getrandbits(64) for _ in xrange(num_bytes / 8)]
tmpstring = struct.pack((num_bytes / 8) * 'Q', *tmplist)
tmplist = [self.prng.getrandbits(64) for _ in range(num_bytes // 8)]
tmpstring = struct.pack((num_bytes // 8) * 'Q', *tmplist)
if charset_arg == 'binary_no_whitespace':
tmpstring = ''.join(c for c in tmpstring if c not in string.whitespace)
tmpstring = b''.join([c] for c in tmpstring if c not in bytes(
string.whitespace, 'utf-8'))
return tmpstring[0:length]
else:
charset = self.charsets[charset_arg]
return ''.join([self.prng.choice(charset) for _ in xrange(length)]) # Won't scale nicely
return ''.join([self.prng.choice(charset) for _ in range(length)]) # Won't scale nicely
def parse_options():
@ -281,29 +283,29 @@ def _main():
if options.seedfile:
FH = open(options.seedfile, 'r')
request_seeds = [int(line) for line in FH if line != '\n']
print>>OUT, 'Seedfile: %s' %options.seedfile
print>>OUT, 'Number of requests: %d' %len(request_seeds)
print('Seedfile: %s' %options.seedfile, file=OUT)
print('Number of requests: %d' %len(request_seeds), file=OUT)
else:
if options.seed:
print>>OUT, 'Initial Seed: %d' %options.seed
print>>OUT, 'Number of requests: %d' %options.num_requests
print('Initial Seed: %d' %options.seed, file=OUT)
print('Number of requests: %d' %options.num_requests, file=OUT)
random_list = randomlist(options.seed)
request_seeds = itertools.islice(random_list, options.num_requests)
print>>OUT, 'Decision Graph: %s' %options.graph_filename
print('Decision Graph: %s' %options.graph_filename, file=OUT)
graph_file = open(options.graph_filename, 'r')
decision_graph = yaml.safe_load(graph_file)
constants = populate_buckets(s3_connection, alt_connection)
print>>VERBOSE, "Test Buckets/Objects:"
for key, value in constants.iteritems():
print>>VERBOSE, "\t%s: %s" %(key, value)
print("Test Buckets/Objects:", file=VERBOSE)
for key, value in constants.items():
print("\t%s: %s" %(key, value), file=VERBOSE)
print>>OUT, "Begin Fuzzing..."
print>>VERBOSE, '='*80
print("Begin Fuzzing...", file=OUT)
print('='*80, file=VERBOSE)
for request_seed in request_seeds:
print>>VERBOSE, 'Seed is: %r' %request_seed
print('Seed is: %r' %request_seed, file=VERBOSE)
prng = random.Random(request_seed)
decision = assemble_decision(decision_graph, prng)
decision.update(constants)
@ -321,46 +323,46 @@ def _main():
except KeyError:
headers = {}
print>>VERBOSE, "%r %r" %(method[:100], path[:100])
for h, v in headers.iteritems():
print>>VERBOSE, "%r: %r" %(h[:50], v[:50])
print>>VERBOSE, "%r\n" % body[:100]
print("%r %r" %(method[:100], path[:100]), file=VERBOSE)
for h, v in headers.items():
print("%r: %r" %(h[:50], v[:50]), file=VERBOSE)
print("%r\n" % body[:100], file=VERBOSE)
print>>DEBUG, 'FULL REQUEST'
print>>DEBUG, 'Method: %r' %method
print>>DEBUG, 'Path: %r' %path
print>>DEBUG, 'Headers:'
for h, v in headers.iteritems():
print>>DEBUG, "\t%r: %r" %(h, v)
print>>DEBUG, 'Body: %r\n' %body
print('FULL REQUEST', file=DEBUG)
print('Method: %r' %method, file=DEBUG)
print('Path: %r' %path, file=DEBUG)
print('Headers:', file=DEBUG)
for h, v in headers.items():
print("\t%r: %r" %(h, v), file=DEBUG)
print('Body: %r\n' %body, file=DEBUG)
failed = False # Let's be optimistic, shall we?
try:
response = s3_connection.make_request(method, path, data=body, headers=headers, override_num_retries=1)
body = response.read()
except BotoServerError, e:
except BotoServerError as e:
response = e
body = e.body
failed = True
except BadStatusLine, e:
print>>OUT, 'FAILED: failed to parse response (BadStatusLine); probably a NUL byte in your request?'
print>>VERBOSE, '='*80
except BadStatusLine as e:
print('FAILED: failed to parse response (BadStatusLine); probably a NUL byte in your request?', file=OUT)
print('='*80, file=VERBOSE)
continue
if failed:
print>>OUT, 'FAILED:'
print('FAILED:', file=OUT)
OLD_VERBOSE = VERBOSE
OLD_DEBUG = DEBUG
VERBOSE = DEBUG = OUT
print>>VERBOSE, 'Seed was: %r' %request_seed
print>>VERBOSE, 'Response status code: %d %s' %(response.status, response.reason)
print>>DEBUG, 'Body:\n%s' %body
print>>VERBOSE, '='*80
print('Seed was: %r' %request_seed, file=VERBOSE)
print('Response status code: %d %s' %(response.status, response.reason), file=VERBOSE)
print('Body:\n%s' %body, file=DEBUG)
print('='*80, file=VERBOSE)
if failed:
VERBOSE = OLD_VERBOSE
DEBUG = OLD_DEBUG
print>>OUT, '...done fuzzing'
print('...done fuzzing', file=OUT)
if options.cleanup:
common.teardown()

View file

@ -25,6 +25,7 @@ from nose.tools import assert_true
from nose.plugins.attrib import attr
from ...functional.utils import assert_raises
from functools import reduce
_decision_graph = {}
@ -173,21 +174,21 @@ def test_expand_random_binary():
def test_expand_random_printable_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 500 printable_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace and x in string.printable for x in got]))
def test_expand_random_binary_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 500 binary_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace for x in got]))
def test_expand_random_no_args():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random}', prng)
assert_true(0 <= len(got) <= 1000)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
@ -195,7 +196,7 @@ def test_expand_random_no_args():
def test_expand_random_no_charset():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 10-30}', prng)
assert_true(10 <= len(got) <= 30)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
@ -203,7 +204,7 @@ def test_expand_random_no_charset():
def test_expand_random_exact_length():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 10 digits}', prng)
assert_true(len(got) == 10)
assert_true(reduce(lambda x, y: x and y, [x in string.digits for x in got]))
@ -300,9 +301,9 @@ def test_weighted_choices():
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
for _ in range(1000):
choice = make_choice(graph['weighted_node']['choices'], prng)
if choices_made.has_key(choice):
if choice in choices_made:
choices_made[choice] += 1
else:
choices_made[choice] = 1
@ -344,9 +345,9 @@ def test_weighted_set():
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
for _ in range(1000):
choice = make_choice(graph['weighted_node']['set']['k1'], prng)
if choices_made.has_key(choice):
if choice in choices_made:
choices_made[choice] += 1
else:
choices_made[choice] = 1
@ -392,7 +393,7 @@ def test_expand_headers():
decision = descend_graph(graph, 'node1', prng)
expanded_headers = expand_headers(decision, prng)
for header, value in expanded_headers.iteritems():
for header, value in expanded_headers.items():
if header == 'my-header':
assert_true(value in ['h1', 'h2', 'h3'])
elif header.startswith('random-header-'):

View file

@ -27,7 +27,7 @@ def get_random_files(quantity, mean, stddev, seed):
list of file handles
"""
file_generator = realistic.files(mean, stddev, seed)
return [file_generator.next() for _ in xrange(quantity)]
return [next(file_generator) for _ in range(quantity)]
def upload_objects(bucket, files, seed):
@ -43,9 +43,9 @@ def upload_objects(bucket, files, seed):
name_generator = realistic.names(15, 4, seed=seed)
for fp in files:
print >> sys.stderr, 'sending file with size %dB' % fp.size
print('sending file with size %dB' % fp.size, file=sys.stderr)
key = Key(bucket)
key.key = name_generator.next()
key.key = next(name_generator)
key.set_contents_from_file(fp, rewind=True)
key.set_acl('public-read')
keys.append(key)
@ -94,18 +94,18 @@ def _main():
bucket.set_acl('public-read')
keys = []
print >> OUTFILE, 'bucket: %s' % bucket.name
print >> sys.stderr, 'setup complete, generating files'
print('bucket: %s' % bucket.name, file=OUTFILE)
print('setup complete, generating files', file=sys.stderr)
for profile in common.config.file_generation.groups:
seed = random.random()
files = get_random_files(profile[0], profile[1], profile[2], seed)
keys += upload_objects(bucket, files, seed)
print >> sys.stderr, 'finished sending files. generating urls'
print('finished sending files. generating urls', file=sys.stderr)
for key in keys:
print >> OUTFILE, key.generate_url(0, query_auth=False)
print(key.generate_url(0, query_auth=False), file=OUTFILE)
print >> sys.stderr, 'done'
print('done', file=sys.stderr)
def main():

View file

@ -11,8 +11,8 @@ import traceback
import random
import yaml
import realistic
import common
from . import realistic
from . import common
NANOSECOND = int(1e9)
@ -57,7 +57,7 @@ def reader(bucket, worker_id, file_names, queue, rand):
traceback=traceback.format_exc(),
),
)
print "ERROR:", m
print("ERROR:", m)
else:
elapsed = end - start
result.update(
@ -158,16 +158,16 @@ def main():
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
print('Using random seeds: {seeds}'.format(seeds=seeds))
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
print("Created bucket: {name}".format(name=bucket.name))
# check flag for deterministic file name creation
if not config.readwrite.get('deterministic_file_names'):
print 'Creating random file names'
print('Creating random file names')
file_names = realistic.names(
mean=15,
stddev=4,
@ -176,9 +176,9 @@ def main():
file_names = itertools.islice(file_names, config.readwrite.files.num)
file_names = list(file_names)
else:
print 'Creating file names that are deterministic'
print('Creating file names that are deterministic')
file_names = []
for x in xrange(config.readwrite.files.num):
for x in range(config.readwrite.files.num):
file_names.append('test_file_{num}'.format(num=x))
files = realistic.files2(
@ -191,7 +191,7 @@ def main():
# warmup - get initial set of files uploaded if there are any writers specified
if config.readwrite.writers > 0:
print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
print("Uploading initial set of {num} files".format(num=config.readwrite.files.num))
warmup_pool = gevent.pool.Pool(size=100)
for file_name in file_names:
fp = next(files)
@ -204,15 +204,15 @@ def main():
warmup_pool.join()
# main work
print "Starting main worker loop."
print "Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev)
print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers)
print("Starting main worker loop.")
print("Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev))
print("Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers))
group = gevent.pool.Group()
rand_writer = random.Random(seeds['writer'])
# Don't create random files if deterministic_files_names is set and true
if not config.readwrite.get('deterministic_file_names'):
for x in xrange(config.readwrite.writers):
for x in range(config.readwrite.writers):
this_rand = random.Random(rand_writer.randrange(2**32))
group.spawn(
writer,
@ -229,7 +229,7 @@ def main():
# this loop needs no additional qualifiers. If zero readers are specified,
# it will behave as expected (no data is read)
rand_reader = random.Random(seeds['reader'])
for x in xrange(config.readwrite.readers):
for x in range(config.readwrite.readers):
this_rand = random.Random(rand_reader.randrange(2**32))
group.spawn(
reader,
@ -246,7 +246,7 @@ def main():
# wait for all the tests to finish
group.join()
print 'post-join, queue size {size}'.format(size=q.qsize())
print('post-join, queue size {size}'.format(size=q.qsize()))
if q.qsize() > 0:
for temp_dict in q:

View file

@ -47,9 +47,9 @@ class FileValidator(object):
self.original_hash, binary = contents[-40:], contents[:-40]
self.new_hash = hashlib.sha1(binary).hexdigest()
if not self.new_hash == self.original_hash:
print 'original hash: ', self.original_hash
print 'new hash: ', self.new_hash
print 'size: ', self._file.tell()
print('original hash: ', self.original_hash)
print('new hash: ', self.new_hash)
print('size: ', self._file.tell())
return False
return True
@ -115,7 +115,7 @@ class RandomContentFile(object):
size = min(self.size, 1*1024*1024) # generate at most 1 MB at a time
chunks = int(math.ceil(size/8.0)) # number of 8-byte chunks to create
l = [self.random.getrandbits(64) for _ in xrange(chunks)]
l = [self.random.getrandbits(64) for _ in range(chunks)]
s = struct.pack(chunks*'Q', *l)
return s
@ -252,7 +252,7 @@ def files2(mean, stddev, seed=None, numfiles=10):
"""
# pre-compute all the files (and save with TemporaryFiles)
fs = []
for _ in xrange(numfiles):
for _ in range(numfiles):
t = tempfile.SpooledTemporaryFile()
t.write(generate_file_contents(random.normalvariate(mean, stddev)))
t.seek(0)
@ -277,5 +277,5 @@ def names(mean, stddev, charset=None, seed=None):
length = int(rand.normalvariate(mean, stddev))
if length > 0:
break
name = ''.join(rand.choice(charset) for _ in xrange(length))
name = ''.join(rand.choice(charset) for _ in range(length))
yield name

View file

@ -11,8 +11,8 @@ import traceback
import random
import yaml
import realistic
import common
from . import realistic
from . import common
NANOSECOND = int(1e9)
@ -141,12 +141,12 @@ def main():
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
print('Using random seeds: {seeds}'.format(seeds=seeds))
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.roundtrip.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
print("Created bucket: {name}".format(name=bucket.name))
objnames = realistic.names(
mean=15,
stddev=4,
@ -163,10 +163,10 @@ def main():
logger_g = gevent.spawn(yaml.safe_dump_all, q, stream=real_stdout)
print "Writing {num} objects with {w} workers...".format(
print("Writing {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.writers,
)
))
pool = gevent.pool.Pool(size=config.roundtrip.writers)
start = time.time()
for objname in objnames:
@ -186,10 +186,10 @@ def main():
duration=int(round(elapsed * NANOSECOND)),
))
print "Reading {num} objects with {w} workers...".format(
print("Reading {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.readers,
)
))
# avoid accessing them in the same order as the writing
rand.shuffle(objnames)
pool = gevent.pool.Pool(size=config.roundtrip.readers)

View file

@ -57,7 +57,7 @@ def main():
def calculate_stats(options, total, durations, min_time, max_time, errors,
success):
print 'Calculating statistics...'
print('Calculating statistics...')
f = sys.stdin
if options.input:
@ -81,13 +81,13 @@ def calculate_stats(options, total, durations, min_time, max_time, errors,
end = start + duration / float(NANOSECONDS)
if options.verbose:
print "[{type}] POSIX time: {start:>18.2f} - {end:<18.2f} " \
print("[{type}] POSIX time: {start:>18.2f} - {end:<18.2f} " \
"{data:>11.2f} KB".format(
type=type_,
start=start,
end=end,
data=data_size / 1024.0, # convert to KB
)
))
# update time boundaries
prev = min_time.setdefault(type_, start)
@ -106,7 +106,7 @@ def calculate_stats(options, total, durations, min_time, max_time, errors,
total[type_] = total.get(type_, 0) + data_size
def print_results(total, durations, min_time, max_time, errors, success):
for type_ in total.keys():
for type_ in list(total.keys()):
trans_success = success.get(type_, 0)
trans_fail = errors.get(type_, 0)
trans = trans_success + trans_fail
@ -121,7 +121,7 @@ def print_results(total, durations, min_time, max_time, errors, success):
trans_long = max(durations[type_]) / float(NANOSECONDS)
trans_short = min(durations[type_]) / float(NANOSECONDS)
print OUTPUT_FORMAT.format(
print(OUTPUT_FORMAT.format(
type=type_,
trans_success=trans_success,
trans_fail=trans_fail,
@ -135,7 +135,7 @@ def print_results(total, durations, min_time, max_time, errors, success):
conc=conc,
trans_long=trans_long,
trans_short=trans_short,
)
))
if __name__ == '__main__':
main()

View file

@ -1,5 +1,5 @@
import boto.s3.connection
import bunch
import munch
import itertools
import os
import random
@ -11,8 +11,8 @@ from lxml import etree
from doctest import Example
from lxml.doctestcompare import LXMLOutputChecker
s3 = bunch.Bunch()
config = bunch.Bunch()
s3 = munch.Munch()
config = munch.Munch()
prefix = ''
bucket_counter = itertools.count(1)
@ -51,10 +51,10 @@ def nuke_bucket(bucket):
while deleted_cnt:
deleted_cnt = 0
for key in bucket.list():
print 'Cleaning bucket {bucket} key {key}'.format(
print('Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
))
key.set_canned_acl('private')
key.delete()
deleted_cnt += 1
@ -67,26 +67,26 @@ def nuke_bucket(bucket):
and e.body == ''):
e.error_code = 'AccessDenied'
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
print('GOT UNWANTED ERROR', e.error_code)
raise
# seems like we're not the owner of the bucket; ignore
pass
def nuke_prefixed_buckets():
for name, conn in s3.items():
print 'Cleaning buckets from connection {name}'.format(name=name)
for name, conn in list(s3.items()):
print('Cleaning buckets from connection {name}'.format(name=name))
for bucket in conn.get_all_buckets():
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
print('Cleaning bucket {bucket}'.format(bucket=bucket))
nuke_bucket(bucket)
print 'Done with cleanup of test buckets.'
print('Done with cleanup of test buckets.')
def read_config(fp):
config = bunch.Bunch()
config = munch.Munch()
g = yaml.safe_load_all(fp)
for new in g:
config.update(bunch.bunchify(new))
config.update(munch.Munchify(new))
return config
def connect(conf):
@ -97,7 +97,7 @@ def connect(conf):
access_key='aws_access_key_id',
secret_key='aws_secret_access_key',
)
kwargs = dict((mapping[k],v) for (k,v) in conf.iteritems() if k in mapping)
kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
#process calling_format argument
calling_formats = dict(
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
@ -105,7 +105,7 @@ def connect(conf):
vhost=boto.s3.connection.VHostCallingFormat(),
)
kwargs['calling_format'] = calling_formats['ordinary']
if conf.has_key('calling_format'):
if 'calling_format' in conf:
raw_calling_format = conf['calling_format']
try:
kwargs['calling_format'] = calling_formats[raw_calling_format]
@ -146,7 +146,7 @@ def setup():
raise RuntimeError("Empty Prefix! Aborting!")
defaults = config.s3.defaults
for section in config.s3.keys():
for section in list(config.s3.keys()):
if section == 'defaults':
continue

View file

@ -3,14 +3,14 @@ from botocore import UNSIGNED
from botocore.client import Config
from botocore.exceptions import ClientError
from botocore.handlers import disable_signing
import ConfigParser
import configparser
import os
import bunch
import munch
import random
import string
import itertools
config = bunch.Bunch
config = munch.Munch
# this will be assigned by setup()
prefix = None
@ -125,17 +125,17 @@ def nuke_prefixed_buckets(prefix, client=None):
for obj in delete_markers:
response = client.delete_object(Bucket=bucket_name,Key=obj[0],VersionId=obj[1])
try:
client.delete_bucket(Bucket=bucket_name)
except ClientError, e:
response = client.delete_bucket(Bucket=bucket_name)
except ClientError:
# if DELETE times out, the retry may see NoSuchBucket
if e.response['Error']['Code'] != 'NoSuchBucket':
raise e
if response['Error']['Code'] != 'NoSuchBucket':
raise ClientError
pass
print('Done with cleanup of buckets in tests.')
def setup():
cfg = ConfigParser.RawConfigParser()
cfg = configparser.RawConfigParser()
try:
path = os.environ['S3TEST_CONF']
except KeyError:
@ -143,8 +143,7 @@ def setup():
'To run tests, point environment '
+ 'variable S3TEST_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
cfg.read(path)
if not cfg.defaults():
raise RuntimeError('Your config file is missing the DEFAULT section!')
@ -175,16 +174,17 @@ def setup():
config.main_email = cfg.get('s3 main',"email")
try:
config.main_kms_keyid = cfg.get('s3 main',"kms_keyid")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
config.main_kms_keyid = 'testkey-1'
try:
config.main_kms_keyid2 = cfg.get('s3 main',"kms_keyid2")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
config.main_kms_keyid2 = 'testkey-2'
try:
config.main_api_name = cfg.get('s3 main',"api_name")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
except (configparser.NoSectionError, configparser.NoOptionError):
config.main_api_name = ""
pass
@ -203,7 +203,7 @@ def setup():
# vars from the fixtures section
try:
template = cfg.get('fixtures', "bucket prefix")
except (ConfigParser.NoOptionError):
except (configparser.NoOptionError):
template = 'test-{random}-'
prefix = choose_bucket_prefix(template=template)

View file

@ -289,7 +289,7 @@ def test_object_create_bad_contentlength_mismatch_above():
key_name = 'foo'
headers = {'Content-Length': str(length)}
add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
client.meta.events.register('before-sign.s3.PutObject', add_headers)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body=content)
status, error_code = _get_status_and_error_code(e.response)

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
from nose.tools import eq_ as eq
import utils
from . import utils
def test_generate():
FIVE_MB = 5 * 1024 * 1024

View file

@ -28,11 +28,11 @@ def generate_random(size, part_size=5*1024*1024):
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
for y in range(this_part_size // chunk):
s = s + strpart
s = s + strpart[:(this_part_size % chunk)]
yield s

View file

@ -1,7 +1,7 @@
from boto.s3.connection import S3Connection
from boto.exception import BotoServerError
from boto.s3.key import Key
from httplib import BadStatusLine
from http.client import BadStatusLine
from optparse import OptionParser
from .. import common
@ -59,7 +59,7 @@ def descend_graph(decision_graph, node_name, prng):
except IndexError:
decision = {}
for key, choices in node['set'].iteritems():
for key, choices in node['set'].items():
if key in decision:
raise DecisionGraphError("Node %s tried to set '%s', but that key was already set by a lower node!" %(node_name, key))
decision[key] = make_choice(choices, prng)
@ -85,7 +85,7 @@ def descend_graph(decision_graph, node_name, prng):
num_reps = prng.randint(size_min, size_max)
if header in [h for h, v in decision['headers']]:
raise DecisionGraphError("Node %s tried to add header '%s', but that header already exists!" %(node_name, header))
for _ in xrange(num_reps):
for _ in range(num_reps):
decision['headers'].append([header, value])
return decision
@ -113,7 +113,7 @@ def make_choice(choices, prng):
if value == 'null' or value == 'None':
value = ''
for _ in xrange(weight):
for _ in range(weight):
weighted_choices.append(value)
return prng.choice(weighted_choices)
@ -137,7 +137,8 @@ def expand(decision, value, prng):
class RepeatExpandingFormatter(string.Formatter):
charsets = {
'printable_no_whitespace': string.printable.translate(None, string.whitespace),
'printable_no_whitespace': string.printable.translate(
"".maketrans('', '', string.whitespace)),
'printable': string.printable,
'punctuation': string.punctuation,
'whitespace': string.whitespace,
@ -188,14 +189,15 @@ class RepeatExpandingFormatter(string.Formatter):
if charset_arg == 'binary' or charset_arg == 'binary_no_whitespace':
num_bytes = length + 8
tmplist = [self.prng.getrandbits(64) for _ in xrange(num_bytes / 8)]
tmpstring = struct.pack((num_bytes / 8) * 'Q', *tmplist)
tmplist = [self.prng.getrandbits(64) for _ in range(num_bytes // 8)]
tmpstring = struct.pack((num_bytes // 8) * 'Q', *tmplist)
if charset_arg == 'binary_no_whitespace':
tmpstring = ''.join(c for c in tmpstring if c not in string.whitespace)
tmpstring = ''.join([c] for c in tmpstring if c not in bytes(
string.whitespace, 'utf-8'))
return tmpstring[0:length]
else:
charset = self.charsets[charset_arg]
return ''.join([self.prng.choice(charset) for _ in xrange(length)]) # Won't scale nicely
return ''.join([self.prng.choice(charset) for _ in range(length)]) # Won't scale nicely
def parse_options():
@ -281,29 +283,29 @@ def _main():
if options.seedfile:
FH = open(options.seedfile, 'r')
request_seeds = [int(line) for line in FH if line != '\n']
print>>OUT, 'Seedfile: %s' %options.seedfile
print>>OUT, 'Number of requests: %d' %len(request_seeds)
print('Seedfile: %s' %options.seedfile, file=OUT)
print('Number of requests: %d' %len(request_seeds), file=OUT)
else:
if options.seed:
print>>OUT, 'Initial Seed: %d' %options.seed
print>>OUT, 'Number of requests: %d' %options.num_requests
print('Initial Seed: %d' %options.seed, file=OUT)
print('Number of requests: %d' %options.num_requests, file=OUT)
random_list = randomlist(options.seed)
request_seeds = itertools.islice(random_list, options.num_requests)
print>>OUT, 'Decision Graph: %s' %options.graph_filename
print('Decision Graph: %s' %options.graph_filename, file=OUT)
graph_file = open(options.graph_filename, 'r')
decision_graph = yaml.safe_load(graph_file)
constants = populate_buckets(s3_connection, alt_connection)
print>>VERBOSE, "Test Buckets/Objects:"
for key, value in constants.iteritems():
print>>VERBOSE, "\t%s: %s" %(key, value)
print("Test Buckets/Objects:", file=VERBOSE)
for key, value in constants.items():
print("\t%s: %s" %(key, value), file=VERBOSE)
print>>OUT, "Begin Fuzzing..."
print>>VERBOSE, '='*80
print("Begin Fuzzing...", file=OUT)
print('='*80, file=VERBOSE)
for request_seed in request_seeds:
print>>VERBOSE, 'Seed is: %r' %request_seed
print('Seed is: %r' %request_seed, file=VERBOSE)
prng = random.Random(request_seed)
decision = assemble_decision(decision_graph, prng)
decision.update(constants)
@ -321,46 +323,46 @@ def _main():
except KeyError:
headers = {}
print>>VERBOSE, "%r %r" %(method[:100], path[:100])
for h, v in headers.iteritems():
print>>VERBOSE, "%r: %r" %(h[:50], v[:50])
print>>VERBOSE, "%r\n" % body[:100]
print("%r %r" %(method[:100], path[:100]), file=VERBOSE)
for h, v in headers.items():
print("%r: %r" %(h[:50], v[:50]), file=VERBOSE)
print("%r\n" % body[:100], file=VERBOSE)
print>>DEBUG, 'FULL REQUEST'
print>>DEBUG, 'Method: %r' %method
print>>DEBUG, 'Path: %r' %path
print>>DEBUG, 'Headers:'
for h, v in headers.iteritems():
print>>DEBUG, "\t%r: %r" %(h, v)
print>>DEBUG, 'Body: %r\n' %body
print('FULL REQUEST', file=DEBUG)
print('Method: %r' %method, file=DEBUG)
print('Path: %r' %path, file=DEBUG)
print('Headers:', file=DEBUG)
for h, v in headers.items():
print("\t%r: %r" %(h, v), file=DEBUG)
print('Body: %r\n' %body, file=DEBUG)
failed = False # Let's be optimistic, shall we?
try:
response = s3_connection.make_request(method, path, data=body, headers=headers, override_num_retries=1)
body = response.read()
except BotoServerError, e:
except BotoServerError as e:
response = e
body = e.body
failed = True
except BadStatusLine, e:
print>>OUT, 'FAILED: failed to parse response (BadStatusLine); probably a NUL byte in your request?'
print>>VERBOSE, '='*80
except BadStatusLine as e:
print('FAILED: failed to parse response (BadStatusLine); probably a NUL byte in your request?', file=OUT)
print('='*80, file=VERBOSE)
continue
if failed:
print>>OUT, 'FAILED:'
print('FAILED:', file=OUT)
OLD_VERBOSE = VERBOSE
OLD_DEBUG = DEBUG
VERBOSE = DEBUG = OUT
print>>VERBOSE, 'Seed was: %r' %request_seed
print>>VERBOSE, 'Response status code: %d %s' %(response.status, response.reason)
print>>DEBUG, 'Body:\n%s' %body
print>>VERBOSE, '='*80
print('Seed was: %r' %request_seed, file=VERBOSE)
print('Response status code: %d %s' %(response.status, response.reason), file=VERBOSE)
print('Body:\n%s' %body, file=DEBUG)
print('='*80, file=VERBOSE)
if failed:
VERBOSE = OLD_VERBOSE
DEBUG = OLD_DEBUG
print>>OUT, '...done fuzzing'
print('...done fuzzing', file=OUT)
if options.cleanup:
common.teardown()

View file

@ -25,6 +25,7 @@ from nose.tools import assert_true
from nose.plugins.attrib import attr
from ...functional.utils import assert_raises
from functools import reduce
_decision_graph = {}
@ -173,21 +174,21 @@ def test_expand_random_binary():
def test_expand_random_printable_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 500 printable_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace and x in string.printable for x in got]))
def test_expand_random_binary_no_whitespace():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 500 binary_no_whitespace}', prng)
assert_true(reduce(lambda x, y: x and y, [x not in string.whitespace for x in got]))
def test_expand_random_no_args():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random}', prng)
assert_true(0 <= len(got) <= 1000)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
@ -195,7 +196,7 @@ def test_expand_random_no_args():
def test_expand_random_no_charset():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 10-30}', prng)
assert_true(10 <= len(got) <= 30)
assert_true(reduce(lambda x, y: x and y, [x in string.printable for x in got]))
@ -203,7 +204,7 @@ def test_expand_random_no_charset():
def test_expand_random_exact_length():
prng = random.Random(1)
for _ in xrange(1000):
for _ in range(1000):
got = expand({}, '{random 10 digits}', prng)
assert_true(len(got) == 10)
assert_true(reduce(lambda x, y: x and y, [x in string.digits for x in got]))
@ -300,9 +301,9 @@ def test_weighted_choices():
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
for _ in range(1000):
choice = make_choice(graph['weighted_node']['choices'], prng)
if choices_made.has_key(choice):
if choice in choices_made:
choices_made[choice] += 1
else:
choices_made[choice] = 1
@ -344,9 +345,9 @@ def test_weighted_set():
prng = random.Random(1)
choices_made = {}
for _ in xrange(1000):
for _ in range(1000):
choice = make_choice(graph['weighted_node']['set']['k1'], prng)
if choices_made.has_key(choice):
if choice in choices_made:
choices_made[choice] += 1
else:
choices_made[choice] = 1
@ -392,7 +393,7 @@ def test_expand_headers():
decision = descend_graph(graph, 'node1', prng)
expanded_headers = expand_headers(decision, prng)
for header, value in expanded_headers.iteritems():
for header, value in expanded_headers.items():
if header == 'my-header':
assert_true(value in ['h1', 'h2', 'h3'])
elif header.startswith('random-header-'):

View file

@ -27,7 +27,7 @@ def get_random_files(quantity, mean, stddev, seed):
list of file handles
"""
file_generator = realistic.files(mean, stddev, seed)
return [file_generator.next() for _ in xrange(quantity)]
return [next(file_generator) for _ in range(quantity)]
def upload_objects(bucket, files, seed):
@ -43,9 +43,9 @@ def upload_objects(bucket, files, seed):
name_generator = realistic.names(15, 4, seed=seed)
for fp in files:
print >> sys.stderr, 'sending file with size %dB' % fp.size
print('sending file with size %dB' % fp.size, file=sys.stderr)
key = Key(bucket)
key.key = name_generator.next()
key.key = next(name_generator)
key.set_contents_from_file(fp, rewind=True)
key.set_acl('public-read')
keys.append(key)
@ -94,18 +94,18 @@ def _main():
bucket.set_acl('public-read')
keys = []
print >> OUTFILE, 'bucket: %s' % bucket.name
print >> sys.stderr, 'setup complete, generating files'
print('bucket: %s' % bucket.name, file=OUTFILE)
print('setup complete, generating files', file=sys.stderr)
for profile in common.config.file_generation.groups:
seed = random.random()
files = get_random_files(profile[0], profile[1], profile[2], seed)
keys += upload_objects(bucket, files, seed)
print >> sys.stderr, 'finished sending files. generating urls'
print('finished sending files. generating urls', file=sys.stderr)
for key in keys:
print >> OUTFILE, key.generate_url(0, query_auth=False)
print(key.generate_url(0, query_auth=False), file=OUTFILE)
print >> sys.stderr, 'done'
print('done', file=sys.stderr)
def main():

View file

@ -11,8 +11,8 @@ import traceback
import random
import yaml
import realistic
import common
from . import realistic
from . import common
NANOSECOND = int(1e9)
@ -57,7 +57,7 @@ def reader(bucket, worker_id, file_names, queue, rand):
traceback=traceback.format_exc(),
),
)
print "ERROR:", m
print("ERROR:", m)
else:
elapsed = end - start
result.update(
@ -158,16 +158,16 @@ def main():
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
print('Using random seeds: {seeds}'.format(seeds=seeds))
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
print("Created bucket: {name}".format(name=bucket.name))
# check flag for deterministic file name creation
if not config.readwrite.get('deterministic_file_names'):
print 'Creating random file names'
print('Creating random file names')
file_names = realistic.names(
mean=15,
stddev=4,
@ -176,9 +176,9 @@ def main():
file_names = itertools.islice(file_names, config.readwrite.files.num)
file_names = list(file_names)
else:
print 'Creating file names that are deterministic'
print('Creating file names that are deterministic')
file_names = []
for x in xrange(config.readwrite.files.num):
for x in range(config.readwrite.files.num):
file_names.append('test_file_{num}'.format(num=x))
files = realistic.files2(
@ -191,7 +191,7 @@ def main():
# warmup - get initial set of files uploaded if there are any writers specified
if config.readwrite.writers > 0:
print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
print("Uploading initial set of {num} files".format(num=config.readwrite.files.num))
warmup_pool = gevent.pool.Pool(size=100)
for file_name in file_names:
fp = next(files)
@ -204,15 +204,15 @@ def main():
warmup_pool.join()
# main work
print "Starting main worker loop."
print "Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev)
print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers)
print("Starting main worker loop.")
print("Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev))
print("Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers))
group = gevent.pool.Group()
rand_writer = random.Random(seeds['writer'])
# Don't create random files if deterministic_files_names is set and true
if not config.readwrite.get('deterministic_file_names'):
for x in xrange(config.readwrite.writers):
for x in range(config.readwrite.writers):
this_rand = random.Random(rand_writer.randrange(2**32))
group.spawn(
writer,
@ -229,7 +229,7 @@ def main():
# this loop needs no additional qualifiers. If zero readers are specified,
# it will behave as expected (no data is read)
rand_reader = random.Random(seeds['reader'])
for x in xrange(config.readwrite.readers):
for x in range(config.readwrite.readers):
this_rand = random.Random(rand_reader.randrange(2**32))
group.spawn(
reader,
@ -246,7 +246,7 @@ def main():
# wait for all the tests to finish
group.join()
print 'post-join, queue size {size}'.format(size=q.qsize())
print('post-join, queue size {size}'.format(size=q.qsize()))
if q.qsize() > 0:
for temp_dict in q:

View file

@ -47,9 +47,9 @@ class FileValidator(object):
self.original_hash, binary = contents[-40:], contents[:-40]
self.new_hash = hashlib.sha1(binary).hexdigest()
if not self.new_hash == self.original_hash:
print 'original hash: ', self.original_hash
print 'new hash: ', self.new_hash
print 'size: ', self._file.tell()
print('original hash: ', self.original_hash)
print('new hash: ', self.new_hash)
print('size: ', self._file.tell())
return False
return True
@ -115,7 +115,7 @@ class RandomContentFile(object):
size = min(self.size, 1*1024*1024) # generate at most 1 MB at a time
chunks = int(math.ceil(size/8.0)) # number of 8-byte chunks to create
l = [self.random.getrandbits(64) for _ in xrange(chunks)]
l = [self.random.getrandbits(64) for _ in range(chunks)]
s = struct.pack(chunks*'Q', *l)
return s
@ -252,7 +252,7 @@ def files2(mean, stddev, seed=None, numfiles=10):
"""
# pre-compute all the files (and save with TemporaryFiles)
fs = []
for _ in xrange(numfiles):
for _ in range(numfiles):
t = tempfile.SpooledTemporaryFile()
t.write(generate_file_contents(random.normalvariate(mean, stddev)))
t.seek(0)
@ -277,5 +277,5 @@ def names(mean, stddev, charset=None, seed=None):
length = int(rand.normalvariate(mean, stddev))
if length > 0:
break
name = ''.join(rand.choice(charset) for _ in xrange(length))
name = ''.join(rand.choice(charset) for _ in range(length))
yield name

View file

@ -11,8 +11,8 @@ import traceback
import random
import yaml
import realistic
import common
from . import realistic
from . import common
NANOSECOND = int(1e9)
@ -141,12 +141,12 @@ def main():
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
print('Using random seeds: {seeds}'.format(seeds=seeds))
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.roundtrip.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
print("Created bucket: {name}".format(name=bucket.name))
objnames = realistic.names(
mean=15,
stddev=4,
@ -163,10 +163,10 @@ def main():
logger_g = gevent.spawn(yaml.safe_dump_all, q, stream=real_stdout)
print "Writing {num} objects with {w} workers...".format(
print("Writing {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.writers,
)
))
pool = gevent.pool.Pool(size=config.roundtrip.writers)
start = time.time()
for objname in objnames:
@ -186,10 +186,10 @@ def main():
duration=int(round(elapsed * NANOSECOND)),
))
print "Reading {num} objects with {w} workers...".format(
print("Reading {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.readers,
)
))
# avoid accessing them in the same order as the writing
rand.shuffle(objnames)
pool = gevent.pool.Pool(size=config.roundtrip.readers)

View file

@ -16,7 +16,7 @@ setup(
'boto >=2.0b4',
'boto3 >=1.0.0',
'PyYAML',
'bunch >=1.0.0',
'munch >=2.0.0',
'gevent >=1.0',
'isodate >=0.4.4',
],