mirror of
https://github.com/ceph/s3-tests.git
synced 2024-11-29 03:41:48 +00:00
Move end test logic out of individual workers.
Parent waits for the duration anyway, just make it unconditionally kill the workers. They already had timeouts aborting them; they may be in the middle of an operation, but we really don't care, time's up. This also avoids oddities with the queue where there may be items after the StopIteration. This also avoids workers potentially not exiting within the 1-second timeout the old code had.
This commit is contained in:
parent
dff6b568e4
commit
71cfd4956c
1 changed files with 49 additions and 49 deletions
|
@ -48,8 +48,7 @@ class Result:
|
||||||
details=self.details,
|
details=self.details,
|
||||||
)
|
)
|
||||||
|
|
||||||
def reader(seconds, bucket, name=None, queue=None):
|
def reader(bucket, name=None, queue=None):
|
||||||
with gevent.Timeout(seconds, False):
|
|
||||||
while (1):
|
while (1):
|
||||||
count = 0
|
count = 0
|
||||||
for key in bucket.list():
|
for key in bucket.list():
|
||||||
|
@ -72,8 +71,7 @@ def reader(seconds, bucket, name=None, queue=None):
|
||||||
if count == 0:
|
if count == 0:
|
||||||
gevent.sleep(1)
|
gevent.sleep(1)
|
||||||
|
|
||||||
def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file_stddev=0, file_name_seed=None):
|
def writer(bucket, name=None, queue=None, quantity=1, file_size=1, file_stddev=0, file_name_seed=None):
|
||||||
with gevent.Timeout(seconds, False):
|
|
||||||
while (1):
|
while (1):
|
||||||
r = random.randint(0, 65535)
|
r = random.randint(0, 65535)
|
||||||
r2 = r
|
r2 = r
|
||||||
|
@ -140,7 +138,7 @@ def main():
|
||||||
print "Spawning {r} readers and {w} writers...".format(r=options.num_readers, w=options.num_writers)
|
print "Spawning {r} readers and {w} writers...".format(r=options.num_readers, w=options.num_writers)
|
||||||
group = gevent.pool.Group()
|
group = gevent.pool.Group()
|
||||||
for x in xrange(options.num_writers):
|
for x in xrange(options.num_writers):
|
||||||
group.spawn(writer, options.duration, bucket,
|
group.spawn(writer, bucket,
|
||||||
name=x,
|
name=x,
|
||||||
queue=q,
|
queue=q,
|
||||||
file_size=options.file_size,
|
file_size=options.file_size,
|
||||||
|
@ -149,11 +147,14 @@ def main():
|
||||||
file_name_seed=r,
|
file_name_seed=r,
|
||||||
)
|
)
|
||||||
for x in xrange(options.num_readers):
|
for x in xrange(options.num_readers):
|
||||||
group.spawn(reader, options.duration, bucket,
|
group.spawn(reader, bucket,
|
||||||
name=x,
|
name=x,
|
||||||
queue=q,
|
queue=q,
|
||||||
)
|
)
|
||||||
gevent.spawn_later(options.duration, lambda: q.put(StopIteration))
|
def stop():
|
||||||
|
group.kill(block=True)
|
||||||
|
q.put(StopIteration)
|
||||||
|
gevent.spawn_later(options.duration, stop)
|
||||||
|
|
||||||
total_read = 0
|
total_read = 0
|
||||||
total_write = 0
|
total_write = 0
|
||||||
|
@ -195,7 +196,6 @@ def main():
|
||||||
percent=(100.0*write_failure/max(write_failure+write_success, 1)),
|
percent=(100.0*write_failure/max(write_failure+write_success, 1)),
|
||||||
)
|
)
|
||||||
|
|
||||||
group.join(timeout=1)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print e
|
print e
|
||||||
finally:
|
finally:
|
||||||
|
|
Loading…
Reference in a new issue