pep8 cleanup.

This commit is contained in:
Tommi Virtanen 2011-07-19 09:09:51 -07:00
parent e0575e4efe
commit fe749adebf
6 changed files with 21 additions and 23 deletions

View file

@ -13,11 +13,11 @@ prefix = ''
# For those scripts that use a context, these are pretty univerally needed. # For those scripts that use a context, these are pretty univerally needed.
context = bunch.Bunch( context = bunch.Bunch(
bucket = None, bucket=None,
# Save stdout/stderr in case they get fudged with. # Save stdout/stderr in case they get fudged with.
real_stdout = sys.stdout, real_stdout=sys.stdout,
real_stderr = sys.stderr, real_stderr=sys.stderr,
) )
bucket_counter = itertools.count(1) bucket_counter = itertools.count(1)
@ -56,7 +56,7 @@ def nuke_prefixed_buckets():
try: try:
bucket.set_canned_acl('private') bucket.set_canned_acl('private')
# TODO: deleted_cnt and the while loop is a work around for rgw # TODO: deleted_cnt and the while loop is a work around for rgw
# not sending the # not sending the
deleted_cnt = 1 deleted_cnt = 1
while deleted_cnt: while deleted_cnt:
deleted_cnt = 0 deleted_cnt = 0
@ -181,4 +181,3 @@ def fill_pools(*args):
def get_next_key(bucket=None): def get_next_key(bucket=None):
return bucket.new_key("seqkey-{num}".format(num=next(key_counter))) return bucket.new_key("seqkey-{num}".format(num=next(key_counter)))

View file

@ -11,9 +11,9 @@ from ..realistic import FileVerifier
# Make sure context has somewhere to store what we need # Make sure context has somewhere to store what we need
context.update(bunch.Bunch( context.update(bunch.Bunch(
needs_first_read = collections.deque(), needs_first_read=collections.deque(),
all_keys = [], all_keys=[],
files_iter = None, files_iter=None,
)) ))

View file

@ -8,7 +8,7 @@ import yaml
from ..common import context from ..common import context
context.update(bunch.Bunch( context.update(bunch.Bunch(
result_queue = collections.deque(), result_queue=collections.deque(),
)) ))
@ -108,4 +108,3 @@ class ResultsLogger(gevent.Greenlet):
result = context.result_queue.popleft() result = context.result_queue.popleft()
yrep = yaml.dump(result) yrep = yaml.dump(result)
self.outfile.write(yrep + "---\n") self.outfile.write(yrep + "---\n")

View file

@ -44,7 +44,7 @@ class Result:
size=self.size, size=self.size,
time=self.time, time=self.time,
mbps=mbps, mbps=mbps,
details=self.details details=self.details,
) )
def reader(seconds, bucket, name=None, queue=None): def reader(seconds, bucket, name=None, queue=None):
@ -144,11 +144,11 @@ def main():
file_size=options.file_size, file_size=options.file_size,
file_stddev=options.stddev, file_stddev=options.stddev,
quantity=options.quantity, quantity=options.quantity,
file_name_seed=r file_name_seed=r,
) for x in xrange(options.num_writers)] ) for x in xrange(options.num_writers)]
greenlets += [gevent.spawn(reader, options.duration, bucket, greenlets += [gevent.spawn(reader, options.duration, bucket,
name=x, name=x,
queue=q queue=q,
) for x in xrange(options.num_readers)] ) for x in xrange(options.num_readers)]
gevent.spawn_later(options.duration, lambda: q.put(StopIteration)) gevent.spawn_later(options.duration, lambda: q.put(StopIteration))
@ -177,19 +177,19 @@ def main():
print "--- Stats ---" print "--- Stats ---"
print "Total Read: {read} MB ({mbps} MB/s)".format( print "Total Read: {read} MB ({mbps} MB/s)".format(
read=(total_read/1024.0), read=(total_read/1024.0),
mbps=(total_read/1024.0/options.duration) mbps=(total_read/1024.0/options.duration),
) )
print "Total Write: {write} MB ({mbps} MB/s)".format( print "Total Write: {write} MB ({mbps} MB/s)".format(
write=(total_write/1024.0), write=(total_write/1024.0),
mbps=(total_write/1024.0/options.duration) mbps=(total_write/1024.0/options.duration),
) )
print "Read filures: {num} ({percent}%)".format( print "Read filures: {num} ({percent}%)".format(
num=read_failure, num=read_failure,
percent=(100.0*read_failure/max(read_failure+read_success, 1)) percent=(100.0*read_failure/max(read_failure+read_success, 1)),
) )
print "Write failures: {num} ({percent}%)".format( print "Write failures: {num} ({percent}%)".format(
num=write_failure, num=write_failure,
percent=(100.0*write_failure/max(write_failure+write_success, 1)) percent=(100.0*write_failure/max(write_failure+write_success, 1)),
) )
gevent.joinall(greenlets, timeout=1) gevent.joinall(greenlets, timeout=1)

View file

@ -165,13 +165,13 @@ def files_varied(groups, unlimited=False):
# Build the sets for our yield # Build the sets for our yield
for num, size, stddev in groups: for num, size, stddev in groups:
assert num and size assert num and size #TODO
file_sets.append(bunch.Bunch( file_sets.append(bunch.Bunch(
num = num, num = num,
size = size, size = size,
stddev = stddev, stddev = stddev,
files = files(size, stddev, time.time()) files = files(size, stddev, time.time()),
)) ))
total_num += num total_num += num
@ -199,4 +199,4 @@ def files_varied(groups, unlimited=False):
yield next(file_set.files) yield next(file_set.files)
if not ok: if not ok:
raise RuntimeError, "Couldn't find a match." raise RuntimeError("Couldn't find a match.")

View file

@ -17,16 +17,16 @@ from common.results import ResultsLogger
# Set up the common context to use our information. Wee. # Set up the common context to use our information. Wee.
context.update(bunch.Bunch( context.update(bunch.Bunch(
# Set to False when it's time to exit main loop. # Set to False when it's time to exit main loop.
running = True, running=True,
# The pools our tasks run in. # The pools our tasks run in.
greenlet_pools = bunch.Bunch( greenlet_pools=bunch.Bunch(
writer=None, writer=None,
reader=None, reader=None,
), ),
# The greenlet that keeps logs going. # The greenlet that keeps logs going.
results_logger = None, results_logger=None,
)) ))