Whitespace cleanup.

This commit is contained in:
Tommi Virtanen 2011-07-11 13:19:54 -07:00
parent e91a3aea66
commit 3fe595abd4
6 changed files with 58 additions and 52 deletions

View file

@ -1,5 +1,5 @@
from boto.auth_handler import AuthHandler from boto.auth_handler import AuthHandler
class AnonymousAuthHandler(AuthHandler): class AnonymousAuthHandler(AuthHandler):
def add_auth(self, http_request, **kwargs): def add_auth(self, http_request, **kwargs):
return # Nothing to do for anonymous access! return # Nothing to do for anonymous access!

View file

@ -15,7 +15,7 @@ bucket_counter = itertools.count(1)
def choose_bucket_prefix(template, max_len=30): def choose_bucket_prefix(template, max_len=30):
""" """
Choose a prefix for our test buckets, so they're easy to identify. Choose a prefix for our test buckets, so they're easy to identify.
Use template and feed it more and more random filler, until it's Use template and feed it more and more random filler, until it's
as long as possible but still below max_len. as long as possible but still below max_len.
""" """
@ -23,13 +23,13 @@ def choose_bucket_prefix(template, max_len=30):
random.choice(string.ascii_lowercase + string.digits) random.choice(string.ascii_lowercase + string.digits)
for c in range(255) for c in range(255)
) )
while rand: while rand:
s = template.format(random=rand) s = template.format(random=rand)
if len(s) <= max_len: if len(s) <= max_len:
return s return s
rand = rand[:-1] rand = rand[:-1]
raise RuntimeError( raise RuntimeError(
'Bucket prefix template is impossible to fulfill: {template!r}'.format( 'Bucket prefix template is impossible to fulfill: {template!r}'.format(
template=template, template=template,
@ -64,14 +64,14 @@ def nuke_prefixed_buckets():
raise raise
# seems like we're not the owner of the bucket; ignore # seems like we're not the owner of the bucket; ignore
pass pass
print 'Done with cleanup of test buckets.' print 'Done with cleanup of test buckets.'
def setup(): def setup():
global s3, config, prefix global s3, config, prefix
s3.clear() s3.clear()
config.clear() config.clear()
try: try:
path = os.environ['S3TEST_CONF'] path = os.environ['S3TEST_CONF']
except KeyError: except KeyError:
@ -83,7 +83,7 @@ def setup():
g = yaml.safe_load_all(f) g = yaml.safe_load_all(f)
for new in g: for new in g:
config.update(bunch.bunchify(new)) config.update(bunch.bunchify(new))
# These 3 should always be present. # These 3 should always be present.
if not config.has_key('s3'): if not config.has_key('s3'):
raise RuntimeError('Your config file is missing the s3 section!'); raise RuntimeError('Your config file is missing the s3 section!');
@ -91,7 +91,7 @@ def setup():
raise RuntimeError('Your config file is missing the s3.defaults section!'); raise RuntimeError('Your config file is missing the s3.defaults section!');
if not config.has_key('fixtures'): if not config.has_key('fixtures'):
raise RuntimeError('Your config file is missing the fixtures section!'); raise RuntimeError('Your config file is missing the fixtures section!');
if config.fixtures.has_key('bucket prefix'): if config.fixtures.has_key('bucket prefix'):
template = config.fixtures['bucket prefix'] template = config.fixtures['bucket prefix']
else: else:
@ -99,40 +99,40 @@ def setup():
prefix = choose_bucket_prefix(template=template) prefix = choose_bucket_prefix(template=template)
if prefix == '': if prefix == '':
raise RuntimeError, "Empty Prefix! Aborting!" raise RuntimeError, "Empty Prefix! Aborting!"
defaults = config.s3.defaults defaults = config.s3.defaults
for section in config.s3.keys(): for section in config.s3.keys():
if section == 'defaults': if section == 'defaults':
continue continue
section_config = config.s3[section] section_config = config.s3[section]
kwargs = bunch.Bunch() kwargs = bunch.Bunch()
conn_args = bunch.Bunch( conn_args = bunch.Bunch(
port = 'port', port='port',
host = 'host', host='host',
is_secure = 'is_secure', is_secure='is_secure',
access_key = 'aws_access_key_id', access_key='aws_access_key_id',
secret_key = 'aws_secret_access_key', secret_key='aws_secret_access_key',
) )
for cfg_key in conn_args.keys(): for cfg_key in conn_args.keys():
conn_key = conn_args[cfg_key] conn_key = conn_args[cfg_key]
if section_config.has_key(cfg_key): if section_config.has_key(cfg_key):
kwargs[conn_key] = section_config[cfg_key] kwargs[conn_key] = section_config[cfg_key]
elif defaults.has_key(cfg_key): elif defaults.has_key(cfg_key):
kwargs[conn_key] = defaults[cfg_key] kwargs[conn_key] = defaults[cfg_key]
conn = boto.s3.connection.S3Connection( conn = boto.s3.connection.S3Connection(
# TODO support & test all variations # TODO support & test all variations
calling_format=boto.s3.connection.OrdinaryCallingFormat(), calling_format=boto.s3.connection.OrdinaryCallingFormat(),
**kwargs **kwargs
) )
s3[section] = conn s3[section] = conn
# WARNING! we actively delete all buckets we see with the prefix # WARNING! we actively delete all buckets we see with the prefix
# we've chosen! Choose your prefix with care, and don't reuse # we've chosen! Choose your prefix with care, and don't reuse
# credentials! # credentials!
# We also assume nobody else is going to use buckets with that # We also assume nobody else is going to use buckets with that
# prefix. This is racy but given enough randomness, should not # prefix. This is racy but given enough randomness, should not
# really fail. # really fail.
@ -141,7 +141,7 @@ def setup():
def get_new_bucket(connection=None): def get_new_bucket(connection=None):
""" """
Get a bucket that exists and is empty. Get a bucket that exists and is empty.
Always recreates a bucket from scratch. This is useful to also Always recreates a bucket from scratch. This is useful to also
reset ACLs and such. reset ACLs and such.
""" """
@ -158,5 +158,4 @@ def get_new_bucket(connection=None):
return bucket return bucket
def teardown(): def teardown():
nuke_prefixed_buckets() nuke_prefixed_buckets()

View file

@ -11,8 +11,8 @@ import sys
def parse_opts(): def parse_opts():
parser = OptionParser(); parser = OptionParser();
parser.add_option('-O' , '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE') parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
parser.add_option('-b' , '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET') parser.add_option('-b', '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET')
parser.add_option('--seed', dest='seed', help='optional seed for the random number generator') parser.add_option('--seed', dest='seed', help='optional seed for the random number generator')
return parser.parse_args() return parser.parse_args()
@ -42,7 +42,7 @@ def upload_objects(bucket, files, seed):
list of boto S3 key objects list of boto S3 key objects
""" """
keys = [] keys = []
name_generator = realistic.names(15, 4,seed=seed) name_generator = realistic.names(15, 4, seed=seed)
for fp in files: for fp in files:
print >> sys.stderr, 'sending file with size %dB' % fp.size print >> sys.stderr, 'sending file with size %dB' % fp.size
@ -113,4 +113,3 @@ if __name__ == '__main__':
except Exception as e: except Exception as e:
traceback.print_exc() traceback.print_exc()
common.teardown() common.teardown()

View file

@ -12,9 +12,10 @@ import realistic
import common import common
class Result: class Result:
TYPE_NONE = 0 TYPE_NONE = 0
TYPE_READER = 1 TYPE_READER = 1
TYPE_WRITER = 2 TYPE_WRITER = 2
def __init__(self, name, type=TYPE_NONE, time=0, success=True, size=0, details=''): def __init__(self, name, type=TYPE_NONE, time=0, success=True, size=0, details=''):
self.name = name self.name = name
self.type = type self.type = type
@ -24,7 +25,7 @@ class Result:
self.details = details self.details = details
def __repr__(self): def __repr__(self):
type_dict = {Result.TYPE_NONE : 'None', Result.TYPE_READER : 'Reader', Result.TYPE_WRITER : 'Writer'} type_dict = {Result.TYPE_NONE: 'None', Result.TYPE_READER: 'Reader', Result.TYPE_WRITER: 'Writer'}
type_s = type_dict[self.type] type_s = type_dict[self.type]
if self.success: if self.success:
status = 'Success' status = 'Success'
@ -37,7 +38,7 @@ class Result:
name=self.name, name=self.name,
size=self.size, size=self.size,
time=self.time, time=self.time,
mbps=(self.size/self.time/1024.0), mbps=self.size / self.time / 1024.0,
details=self.details details=self.details
) )
@ -52,13 +53,15 @@ def reader(seconds, bucket, name=None, queue=None):
end = time.clock() end = time.clock()
elapsed = end - start elapsed = end - start
if queue: if queue:
queue.put(Result(name, queue.put(
type=Result.TYPE_READER, Result(
time=elapsed, name,
success=fp.valid(), type=Result.TYPE_READER,
size=(fp.size/1024) time=elapsed,
success=fp.valid(),
size=fp.size / 1024,
),
) )
)
count += 1 count += 1
if count == 0: if count == 0:
gevent.sleep(1) gevent.sleep(1)
@ -71,7 +74,12 @@ def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file
if file_name_seed != None: if file_name_seed != None:
r2 = file_name_seed r2 = file_name_seed
files = generate_objects.get_random_files(quantity, 1024*file_size, 1024*file_stddev, r) files = generate_objects.get_random_files(
quantity=quantity,
mean=1024 * file_size,
stddev=1024 * file_stddev,
seed=r,
)
start = time.clock() start = time.clock()
generate_objects.upload_objects(bucket, files, r2) generate_objects.upload_objects(bucket, files, r2)
@ -79,7 +87,7 @@ def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file
elapsed = end - start elapsed = end - start
if queue: if queue:
queue.put(Result(name, queue.put(Result(name,
type=Result.TYPE_WRITER, type=Result.TYPE_WRITER,
time=elapsed, time=elapsed,
size=sum([(file.size/1024) for file in files]), size=sum([(file.size/1024) for file in files]),
@ -108,16 +116,16 @@ def parse_options():
return parser.parse_args() return parser.parse_args()
def main(): def main():
# parse options # parse options
(options, args) = parse_options() (options, args) = parse_options()
try: try:
# setup # setup
common.setup() common.setup()
bucket = common.get_new_bucket() bucket = common.get_new_bucket()
print "Created bucket: {name}".format(name=bucket.name) print "Created bucket: {name}".format(name=bucket.name)
r = None r = None
if (options.rewrite): if (options.rewrite):
r = random.randint(0, 65535) r = random.randint(0, 65535)
q = gevent.queue.Queue() q = gevent.queue.Queue()
@ -189,4 +197,3 @@ def main():
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View file

@ -95,7 +95,7 @@ def files(mean, stddev, seed=None):
re-download the object and place the contents into a file called re-download the object and place the contents into a file called
``foo``, the following should print two identical lines: ``foo``, the following should print two identical lines:
python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo
Except for objects shorter than 16 bytes, where the second line Except for objects shorter than 16 bytes, where the second line
will be proportionally shorter. will be proportionally shorter.

View file

@ -200,7 +200,7 @@ def check_grants(got, want):
eq(len(got), len(want)) eq(len(got), len(want))
got = sorted(got, key=operator.attrgetter('id')) got = sorted(got, key=operator.attrgetter('id'))
want = sorted(want, key=operator.itemgetter('id')) want = sorted(want, key=operator.itemgetter('id'))
for g,w in zip(got, want): for g, w in zip(got, want):
w = dict(w) w = dict(w)
eq(g.permission, w.pop('permission')) eq(g.permission, w.pop('permission'))
eq(g.id, w.pop('id')) eq(g.id, w.pop('id'))
@ -944,7 +944,7 @@ def generate_random(mb_size):
s = '' s = ''
left = mb_size - x left = mb_size - x
this_part_size = min(left, part_size_mb) this_part_size = min(left, part_size_mb)
for y in range(this_part_size * mb / chunk): for y in range(this_part_size * mb / chunk):
s = s + strpart s = s + strpart
yield s yield s
if (x == mb_size): if (x == mb_size):
@ -989,9 +989,10 @@ def test_list_multipart_upload():
def _simple_http_req_100_cont(host, port, method, resource): def _simple_http_req_100_cont(host, port, method, resource):
req = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format( req = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
method = method, method=method,
resource = resource, resource=resource,
host = host) host=host,
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5) s.settimeout(5)
@ -1014,7 +1015,7 @@ def _simple_http_req_100_cont(host, port, method, resource):
def test_100_continue(): def test_100_continue():
bucket = get_new_bucket() bucket = get_new_bucket()
objname = 'testobj' objname = 'testobj'
resource = '/{bucket}/{obj}'.format(bucket = bucket.name, obj = objname) resource = '/{bucket}/{obj}'.format(bucket=bucket.name, obj=objname)
status = _simple_http_req_100_cont(s3.main.host, s3.main.port, 'PUT', resource) status = _simple_http_req_100_cont(s3.main.host, s3.main.port, 'PUT', resource)
eq(status, '403') eq(status, '403')
@ -1075,7 +1076,7 @@ def _test_atomic_write(file_size):
# create <file_size> file of A's # create <file_size> file of A's
fp_a = FakeFile(file_size, 'A') fp_a = FakeFile(file_size, 'A')
key.set_contents_from_file(fp_a) key.set_contents_from_file(fp_a)
# verify A's # verify A's
_verify_atomic_key_data(key, file_size, 'A') _verify_atomic_key_data(key, file_size, 'A')
@ -1102,7 +1103,7 @@ def _test_atomic_dual_write(file_size):
bucket = get_new_bucket() bucket = get_new_bucket()
objname = 'testobj' objname = 'testobj'
key = bucket.new_key(objname) key = bucket.new_key(objname)
# get a second key object (for the same key) # get a second key object (for the same key)
# so both can be writing without interfering # so both can be writing without interfering
key2 = bucket.new_key(objname) key2 = bucket.new_key(objname)