mirror of
https://github.com/ceph/s3-tests.git
synced 2025-01-11 21:20:37 +00:00
Merge remote branch 'newdream/utilities'
This commit is contained in:
commit
2edd78ebbc
10 changed files with 1036 additions and 0 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -9,3 +9,5 @@
|
||||||
*.pyo
|
*.pyo
|
||||||
|
|
||||||
/virtualenv
|
/virtualenv
|
||||||
|
|
||||||
|
config.yml
|
||||||
|
|
162
common.py
Normal file
162
common.py
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
import boto.s3.connection
|
||||||
|
import bunch
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
s3 = bunch.Bunch()
|
||||||
|
config = bunch.Bunch()
|
||||||
|
prefix = ''
|
||||||
|
|
||||||
|
bucket_counter = itertools.count(1)
|
||||||
|
|
||||||
|
def choose_bucket_prefix(template, max_len=30):
|
||||||
|
"""
|
||||||
|
Choose a prefix for our test buckets, so they're easy to identify.
|
||||||
|
|
||||||
|
Use template and feed it more and more random filler, until it's
|
||||||
|
as long as possible but still below max_len.
|
||||||
|
"""
|
||||||
|
rand = ''.join(
|
||||||
|
random.choice(string.ascii_lowercase + string.digits)
|
||||||
|
for c in range(255)
|
||||||
|
)
|
||||||
|
|
||||||
|
while rand:
|
||||||
|
s = template.format(random=rand)
|
||||||
|
if len(s) <= max_len:
|
||||||
|
return s
|
||||||
|
rand = rand[:-1]
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
|
||||||
|
template=template,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def nuke_prefixed_buckets():
|
||||||
|
for name, conn in s3.items():
|
||||||
|
print 'Cleaning buckets from connection {name}'.format(name=name)
|
||||||
|
for bucket in conn.get_all_buckets():
|
||||||
|
if bucket.name.startswith(prefix):
|
||||||
|
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
|
||||||
|
try:
|
||||||
|
bucket.set_canned_acl('private')
|
||||||
|
for key in bucket.list():
|
||||||
|
print 'Cleaning bucket {bucket} key {key}'.format(
|
||||||
|
bucket=bucket,
|
||||||
|
key=key,
|
||||||
|
)
|
||||||
|
key.set_canned_acl('private')
|
||||||
|
key.delete()
|
||||||
|
bucket.delete()
|
||||||
|
except boto.exception.S3ResponseError as e:
|
||||||
|
# TODO workaround for buggy rgw that fails to send
|
||||||
|
# error_code, remove
|
||||||
|
if (e.status == 403
|
||||||
|
and e.error_code is None
|
||||||
|
and e.body == ''):
|
||||||
|
e.error_code = 'AccessDenied'
|
||||||
|
if e.error_code != 'AccessDenied':
|
||||||
|
print 'GOT UNWANTED ERROR', e.error_code
|
||||||
|
raise
|
||||||
|
# seems like we're not the owner of the bucket; ignore
|
||||||
|
pass
|
||||||
|
|
||||||
|
print 'Done with cleanup of test buckets.'
|
||||||
|
|
||||||
|
def setup():
|
||||||
|
global s3, config, prefix
|
||||||
|
s3.clear()
|
||||||
|
config.clear()
|
||||||
|
|
||||||
|
try:
|
||||||
|
path = os.environ['S3TEST_CONF']
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(
|
||||||
|
'To run tests, point environment '
|
||||||
|
+ 'variable S3TEST_CONF to a config file.',
|
||||||
|
)
|
||||||
|
with file(path) as f:
|
||||||
|
g = yaml.safe_load_all(f)
|
||||||
|
for new in g:
|
||||||
|
config.update(bunch.bunchify(new))
|
||||||
|
|
||||||
|
# These 3 should always be present.
|
||||||
|
if not config.has_key('s3'):
|
||||||
|
raise RuntimeError('Your config file is missing the s3 section!');
|
||||||
|
if not config.s3.has_key('defaults'):
|
||||||
|
raise RuntimeError('Your config file is missing the s3.defaults section!');
|
||||||
|
if not config.has_key('fixtures'):
|
||||||
|
raise RuntimeError('Your config file is missing the fixtures section!');
|
||||||
|
|
||||||
|
if config.fixtures.has_key('bucket prefix'):
|
||||||
|
template = config.fixtures['bucket prefix']
|
||||||
|
else:
|
||||||
|
template = 'test-{random}-'
|
||||||
|
prefix = choose_bucket_prefix(template=template)
|
||||||
|
if prefix == '':
|
||||||
|
raise RuntimeError, "Empty Prefix! Aborting!"
|
||||||
|
|
||||||
|
defaults = config.s3.defaults
|
||||||
|
for section in config.s3.keys():
|
||||||
|
if section == 'defaults':
|
||||||
|
continue
|
||||||
|
section_config = config.s3[section]
|
||||||
|
|
||||||
|
kwargs = bunch.Bunch()
|
||||||
|
conn_args = bunch.Bunch(
|
||||||
|
port = 'port',
|
||||||
|
host = 'host',
|
||||||
|
is_secure = 'is_secure',
|
||||||
|
access_key = 'aws_access_key_id',
|
||||||
|
secret_key = 'aws_secret_access_key',
|
||||||
|
)
|
||||||
|
for cfg_key in conn_args.keys():
|
||||||
|
conn_key = conn_args[cfg_key]
|
||||||
|
|
||||||
|
if section_config.has_key(cfg_key):
|
||||||
|
kwargs[conn_key] = section_config[cfg_key]
|
||||||
|
elif defaults.has_key(cfg_key):
|
||||||
|
kwargs[conn_key] = defaults[cfg_key]
|
||||||
|
|
||||||
|
conn = boto.s3.connection.S3Connection(
|
||||||
|
# TODO support & test all variations
|
||||||
|
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
s3[section] = conn
|
||||||
|
|
||||||
|
# WARNING! we actively delete all buckets we see with the prefix
|
||||||
|
# we've chosen! Choose your prefix with care, and don't reuse
|
||||||
|
# credentials!
|
||||||
|
|
||||||
|
# We also assume nobody else is going to use buckets with that
|
||||||
|
# prefix. This is racy but given enough randomness, should not
|
||||||
|
# really fail.
|
||||||
|
nuke_prefixed_buckets()
|
||||||
|
|
||||||
|
def get_new_bucket(connection=None):
|
||||||
|
"""
|
||||||
|
Get a bucket that exists and is empty.
|
||||||
|
|
||||||
|
Always recreates a bucket from scratch. This is useful to also
|
||||||
|
reset ACLs and such.
|
||||||
|
"""
|
||||||
|
if connection is None:
|
||||||
|
connection = s3.main
|
||||||
|
name = '{prefix}{num}'.format(
|
||||||
|
prefix=prefix,
|
||||||
|
num=next(bucket_counter),
|
||||||
|
)
|
||||||
|
# the only way for this to fail with a pre-existing bucket is if
|
||||||
|
# someone raced us between setup nuke_prefixed_buckets and here;
|
||||||
|
# ignore that as astronomically unlikely
|
||||||
|
bucket = connection.create_bucket(name)
|
||||||
|
return bucket
|
||||||
|
|
||||||
|
def teardown():
|
||||||
|
nuke_prefixed_buckets()
|
||||||
|
|
59
config.yml.SAMPLE
Normal file
59
config.yml.SAMPLE
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
fixtures:
|
||||||
|
## All the buckets created will start with this prefix;
|
||||||
|
## {random} will be filled with random characters to pad
|
||||||
|
## the prefix to 30 characters long, and avoid collisions
|
||||||
|
bucket prefix: YOURNAMEHERE-{random}-
|
||||||
|
|
||||||
|
file_generation:
|
||||||
|
groups:
|
||||||
|
## File generation works by creating N groups of files. Each group of
|
||||||
|
## files is defined by three elements: number of files, avg(filesize),
|
||||||
|
## and stddev(filesize) -- in that order.
|
||||||
|
- [1, 2, 3]
|
||||||
|
- [4, 5, 6]
|
||||||
|
|
||||||
|
s3:
|
||||||
|
## This section contains all the connection information
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
## This section contains the defaults for all of the other connections
|
||||||
|
## below. You can also place these variables directly there.
|
||||||
|
|
||||||
|
## Replace with e.g. "localhost" to run against local software
|
||||||
|
host: s3.amazonaws.com
|
||||||
|
|
||||||
|
## Uncomment the port to use soemthing other than 80
|
||||||
|
# port: 8080
|
||||||
|
|
||||||
|
## Say "no" to disable TLS.
|
||||||
|
is_secure: yes
|
||||||
|
|
||||||
|
## The tests assume two accounts are defined, "main" and "alt". You
|
||||||
|
## may add other connections to be instantianted as well, however
|
||||||
|
## any additional ones will not be used unless your tests use them.
|
||||||
|
|
||||||
|
main:
|
||||||
|
|
||||||
|
## The User ID that the S3 provider gives you. For AWS, this is
|
||||||
|
## typically a 64-char hexstring.
|
||||||
|
user_id: AWS_USER_ID
|
||||||
|
|
||||||
|
## Display name typically looks more like a unix login, "jdoe" etc
|
||||||
|
display_name: AWS_DISPLAY_NAME
|
||||||
|
|
||||||
|
## The email for this account.
|
||||||
|
email: AWS_EMAIL
|
||||||
|
|
||||||
|
## Replace these with your access keys.
|
||||||
|
access_key: AWS_ACCESS_KEY
|
||||||
|
secret_key: AWS_SECRET_KEY
|
||||||
|
|
||||||
|
alt:
|
||||||
|
## Another user accout, used for ACL-related tests.
|
||||||
|
|
||||||
|
user_id: AWS_USER_ID
|
||||||
|
display_name: AWS_DISPLAY_NAME
|
||||||
|
email: AWS_EMAIL
|
||||||
|
access_key: AWS_ACCESS_KEY
|
||||||
|
secret_key: AWS_SECRET_KEY
|
||||||
|
|
2
generate_objects.conf
Normal file
2
generate_objects.conf
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
- [10, 2000, 200]
|
||||||
|
|
121
generate_objects.py
Executable file
121
generate_objects.py
Executable file
|
@ -0,0 +1,121 @@
|
||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
from boto.s3.connection import OrdinaryCallingFormat
|
||||||
|
from boto.s3.connection import S3Connection
|
||||||
|
from boto.s3.key import Key
|
||||||
|
from optparse import OptionParser
|
||||||
|
from realistic import RandomContentFile
|
||||||
|
import realistic
|
||||||
|
import traceback
|
||||||
|
import random
|
||||||
|
import common
|
||||||
|
import yaml
|
||||||
|
import boto
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def parse_opts():
|
||||||
|
parser = OptionParser();
|
||||||
|
parser.add_option('-O' , '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
|
||||||
|
parser.add_option('-b' , '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET')
|
||||||
|
parser.add_option('--seed', dest='seed', help='optional seed for the random number generator')
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def get_random_files(quantity, mean, stddev, seed):
|
||||||
|
"""Create file-like objects with pseudorandom contents.
|
||||||
|
IN:
|
||||||
|
number of files to create
|
||||||
|
mean file size in bytes
|
||||||
|
standard deviation from mean file size
|
||||||
|
seed for PRNG
|
||||||
|
OUT:
|
||||||
|
list of file handles
|
||||||
|
"""
|
||||||
|
file_generator = realistic.files(mean, stddev, seed)
|
||||||
|
return [file_generator.next() for _ in xrange(quantity)]
|
||||||
|
|
||||||
|
|
||||||
|
def upload_objects(bucket, files, seed):
|
||||||
|
"""Upload a bunch of files to an S3 bucket
|
||||||
|
IN:
|
||||||
|
boto S3 bucket object
|
||||||
|
list of file handles to upload
|
||||||
|
seed for PRNG
|
||||||
|
OUT:
|
||||||
|
list of boto S3 key objects
|
||||||
|
"""
|
||||||
|
keys = []
|
||||||
|
name_generator = realistic.names(15, 4,seed=seed)
|
||||||
|
|
||||||
|
for fp in files:
|
||||||
|
print >> sys.stderr, 'sending file with size %dB' % fp.size
|
||||||
|
key = Key(bucket)
|
||||||
|
key.key = name_generator.next()
|
||||||
|
key.set_contents_from_file(fp)
|
||||||
|
keys.append(key)
|
||||||
|
|
||||||
|
return keys
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
'''To run the static content load test, make sure you've bootstrapped your
|
||||||
|
test environment and set up your config.yml file, then run the following:
|
||||||
|
S3TEST_CONF=config.yml virtualenv/bin/python generate_objects.py -O urls.txt --seed 1234
|
||||||
|
|
||||||
|
This creates a bucket with your S3 credentials (from config.yml) and
|
||||||
|
fills it with garbage objects as described in generate_objects.conf.
|
||||||
|
It writes a list of URLS to those objects to ./urls.txt.
|
||||||
|
|
||||||
|
Once you have objcts in your bucket, run the siege benchmarking program:
|
||||||
|
siege -rc ./siege.conf -r 5
|
||||||
|
|
||||||
|
This tells siege to read the ./siege.conf config file which tells it to
|
||||||
|
use the urls in ./urls.txt and log to ./siege.log. It hits each url in
|
||||||
|
urls.txt 5 times (-r flag).
|
||||||
|
|
||||||
|
Results are printed to the terminal and written in CSV format to
|
||||||
|
./siege.log
|
||||||
|
'''
|
||||||
|
(options, args) = parse_opts();
|
||||||
|
|
||||||
|
#SETUP
|
||||||
|
random.seed(options.seed if options.seed else None)
|
||||||
|
conn = common.s3.main
|
||||||
|
|
||||||
|
if options.outfile:
|
||||||
|
OUTFILE = open(options.outfile, 'w')
|
||||||
|
elif common.config.file_generation.url_file:
|
||||||
|
OUTFILE = open(common.config.file_generation.url_file, 'w')
|
||||||
|
else:
|
||||||
|
OUTFILE = sys.stdout
|
||||||
|
|
||||||
|
if options.bucket:
|
||||||
|
bucket = conn.create_bucket(options.bucket)
|
||||||
|
else:
|
||||||
|
bucket = common.get_new_bucket()
|
||||||
|
|
||||||
|
keys = []
|
||||||
|
print >> OUTFILE, 'bucket: %s' % bucket.name
|
||||||
|
print >> sys.stderr, 'setup complete, generating files'
|
||||||
|
for profile in common.config.file_generation.groups:
|
||||||
|
seed = random.random()
|
||||||
|
files = get_random_files(profile[0], profile[1], profile[2], seed)
|
||||||
|
keys += upload_objects(bucket, files, seed)
|
||||||
|
|
||||||
|
print >> sys.stderr, 'finished sending files. generating urls'
|
||||||
|
for key in keys:
|
||||||
|
print >> OUTFILE, key.generate_url(30758400) #valid for 1 year
|
||||||
|
|
||||||
|
print >> sys.stderr, 'done'
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
common.setup()
|
||||||
|
try:
|
||||||
|
main()
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
common.teardown()
|
||||||
|
|
192
rand_readwrite.py
Executable file
192
rand_readwrite.py
Executable file
|
@ -0,0 +1,192 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
import gevent.queue
|
||||||
|
import gevent.monkey; gevent.monkey.patch_all()
|
||||||
|
import optparse
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
|
||||||
|
import generate_objects
|
||||||
|
import realistic
|
||||||
|
import common
|
||||||
|
|
||||||
|
class Result:
|
||||||
|
TYPE_NONE = 0
|
||||||
|
TYPE_READER = 1
|
||||||
|
TYPE_WRITER = 2
|
||||||
|
def __init__(self, name, type=TYPE_NONE, time=0, success=True, size=0, details=''):
|
||||||
|
self.name = name
|
||||||
|
self.type = type
|
||||||
|
self.time = time
|
||||||
|
self.success = success
|
||||||
|
self.size = size
|
||||||
|
self.details = details
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
type_dict = {Result.TYPE_NONE : 'None', Result.TYPE_READER : 'Reader', Result.TYPE_WRITER : 'Writer'}
|
||||||
|
type_s = type_dict[self.type]
|
||||||
|
if self.success:
|
||||||
|
status = 'Success'
|
||||||
|
else:
|
||||||
|
status = 'FAILURE'
|
||||||
|
|
||||||
|
return "<Result: [{success}] {type}{name} -- {size} KB in {time}s = {mbps} MB/s {details}>".format(
|
||||||
|
success=status,
|
||||||
|
type=type_s,
|
||||||
|
name=self.name,
|
||||||
|
size=self.size,
|
||||||
|
time=self.time,
|
||||||
|
mbps=(self.size/self.time/1024.0),
|
||||||
|
details=self.details
|
||||||
|
)
|
||||||
|
|
||||||
|
def reader(seconds, bucket, name=None, queue=None):
|
||||||
|
with gevent.Timeout(seconds, False):
|
||||||
|
while (1):
|
||||||
|
count = 0
|
||||||
|
for key in bucket.list():
|
||||||
|
fp = realistic.FileVerifier()
|
||||||
|
start = time.clock()
|
||||||
|
key.get_contents_to_file(fp)
|
||||||
|
end = time.clock()
|
||||||
|
elapsed = end - start
|
||||||
|
if queue:
|
||||||
|
queue.put(Result(name,
|
||||||
|
type=Result.TYPE_READER,
|
||||||
|
time=elapsed,
|
||||||
|
success=fp.valid(),
|
||||||
|
size=(fp.size/1024)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
count += 1
|
||||||
|
if count == 0:
|
||||||
|
gevent.sleep(1)
|
||||||
|
|
||||||
|
def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file_stddev=0, file_name_seed=None):
|
||||||
|
with gevent.Timeout(seconds, False):
|
||||||
|
while (1):
|
||||||
|
r = random.randint(0, 65535)
|
||||||
|
r2 = r
|
||||||
|
if file_name_seed != None:
|
||||||
|
r2 = file_name_seed
|
||||||
|
|
||||||
|
files = generate_objects.get_random_files(quantity, 1024*file_size, 1024*file_stddev, r)
|
||||||
|
|
||||||
|
start = time.clock()
|
||||||
|
keys = generate_objects.upload_objects(bucket, files, r2)
|
||||||
|
end = time.clock()
|
||||||
|
elapsed = end - start
|
||||||
|
|
||||||
|
if queue:
|
||||||
|
queue.put(Result(name,
|
||||||
|
type=Result.TYPE_WRITER,
|
||||||
|
time=elapsed,
|
||||||
|
size=sum([(file.size/1024) for file in files]),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def parse_options():
|
||||||
|
parser = optparse.OptionParser()
|
||||||
|
parser.add_option("-t", "--time", dest="duration", type="float",
|
||||||
|
help="duration to run tests (seconds)", default=5, metavar="SECS")
|
||||||
|
parser.add_option("-r", "--read", dest="num_readers", type="int",
|
||||||
|
help="number of reader threads", default=0, metavar="NUM")
|
||||||
|
parser.add_option("-w", "--write", dest="num_writers", type="int",
|
||||||
|
help="number of writer threads", default=2, metavar="NUM")
|
||||||
|
parser.add_option("-s", "--size", dest="file_size", type="float",
|
||||||
|
help="file size to use, in kb", default=1024, metavar="KB")
|
||||||
|
parser.add_option("-q", "--quantity", dest="quantity", type="int",
|
||||||
|
help="number of files per batch", default=1, metavar="NUM")
|
||||||
|
parser.add_option("-d", "--stddev", dest="stddev", type="float",
|
||||||
|
help="stddev of file size", default=0, metavar="KB")
|
||||||
|
parser.add_option("-W", "--rewrite", dest="rewrite", action="store_true",
|
||||||
|
help="rewrite the same files (total=quantity)")
|
||||||
|
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
|
||||||
|
help="skip cleaning up all created buckets", default=True)
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# parse options
|
||||||
|
(options, args) = parse_options()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# setup
|
||||||
|
common.setup()
|
||||||
|
bucket = common.get_new_bucket()
|
||||||
|
print "Created bucket: {name}".format(name=bucket.name)
|
||||||
|
r = None
|
||||||
|
if (options.rewrite):
|
||||||
|
r = random.randint(0, 65535)
|
||||||
|
q = gevent.queue.Queue()
|
||||||
|
|
||||||
|
# main work
|
||||||
|
print "Using file size: {size} +- {stddev}".format(size=options.file_size, stddev=options.stddev)
|
||||||
|
print "Spawning {r} readers and {w} writers...".format(r=options.num_readers, w=options.num_writers)
|
||||||
|
greenlets = []
|
||||||
|
greenlets += [gevent.spawn(writer, options.duration, bucket,
|
||||||
|
name=x,
|
||||||
|
queue=q,
|
||||||
|
file_size=options.file_size,
|
||||||
|
file_stddev=options.stddev,
|
||||||
|
quantity=options.quantity,
|
||||||
|
file_name_seed=r
|
||||||
|
) for x in xrange(options.num_writers)]
|
||||||
|
greenlets += [gevent.spawn(reader, options.duration, bucket,
|
||||||
|
name=x,
|
||||||
|
queue=q
|
||||||
|
) for x in xrange(options.num_readers)]
|
||||||
|
gevent.spawn_later(options.duration, lambda: q.put(StopIteration))
|
||||||
|
|
||||||
|
total_read = 0
|
||||||
|
total_write = 0
|
||||||
|
read_success = 0
|
||||||
|
read_failure = 0
|
||||||
|
write_success = 0
|
||||||
|
write_failure = 0
|
||||||
|
for item in q:
|
||||||
|
print item
|
||||||
|
if item.type == Result.TYPE_READER:
|
||||||
|
if item.success:
|
||||||
|
read_success += 1
|
||||||
|
total_read += item.size
|
||||||
|
else:
|
||||||
|
read_failure += 1
|
||||||
|
elif item.type == Result.TYPE_WRITER:
|
||||||
|
if item.success:
|
||||||
|
write_success += 1
|
||||||
|
total_write += item.size
|
||||||
|
else:
|
||||||
|
write_failure += 1
|
||||||
|
|
||||||
|
# overall stats
|
||||||
|
print "--- Stats ---"
|
||||||
|
print "Total Read: {read} MB ({mbps} MB/s)".format(
|
||||||
|
read=(total_read/1024.0),
|
||||||
|
mbps=(total_read/1024.0/options.duration)
|
||||||
|
)
|
||||||
|
print "Total Write: {write} MB ({mbps} MB/s)".format(
|
||||||
|
write=(total_write/1024.0),
|
||||||
|
mbps=(total_write/1024.0/options.duration)
|
||||||
|
)
|
||||||
|
print "Read filures: {num} ({percent}%)".format(
|
||||||
|
num=read_failure,
|
||||||
|
percent=(100.0*read_failure/max(read_failure+read_success, 1))
|
||||||
|
)
|
||||||
|
print "Write failures: {num} ({percent}%)".format(
|
||||||
|
num=write_failure,
|
||||||
|
percent=(100.0*write_failure/max(write_failure+write_success, 1))
|
||||||
|
)
|
||||||
|
|
||||||
|
gevent.joinall(greenlets, timeout=1)
|
||||||
|
except Exception as e:
|
||||||
|
print e
|
||||||
|
finally:
|
||||||
|
# cleanup
|
||||||
|
if options.cleanup:
|
||||||
|
common.teardown()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
113
realistic.py
Normal file
113
realistic.py
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
import hashlib
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
|
||||||
|
class RandomContentFile(object):
|
||||||
|
def __init__(self, size, seed):
|
||||||
|
self.seed = seed
|
||||||
|
self.random = random.Random(self.seed)
|
||||||
|
self.offset = 0
|
||||||
|
self.size = size
|
||||||
|
self.hash = hashlib.md5()
|
||||||
|
self.digest_size = self.hash.digest_size
|
||||||
|
self.digest = None
|
||||||
|
|
||||||
|
def seek(self, offset):
|
||||||
|
assert offset == 0
|
||||||
|
self.random.seed(self.seed)
|
||||||
|
self.offset = offset
|
||||||
|
|
||||||
|
def tell(self):
|
||||||
|
return self.offset
|
||||||
|
|
||||||
|
def read(self, size=-1):
|
||||||
|
if size < 0:
|
||||||
|
size = self.size - self.offset
|
||||||
|
|
||||||
|
r = []
|
||||||
|
|
||||||
|
random_count = min(size, self.size - self.offset - self.digest_size)
|
||||||
|
if random_count > 0:
|
||||||
|
self.offset += random_count
|
||||||
|
size -= random_count
|
||||||
|
data = ''.join(chr(self.random.getrandbits(8)) for _ in xrange(random_count))
|
||||||
|
if self.hash is not None:
|
||||||
|
self.hash.update(data)
|
||||||
|
r.append(data)
|
||||||
|
|
||||||
|
digest_count = min(size, self.size - self.offset)
|
||||||
|
if digest_count > 0:
|
||||||
|
if self.digest is None:
|
||||||
|
self.digest = self.hash.digest()
|
||||||
|
self.hash = None
|
||||||
|
self.offset += digest_count
|
||||||
|
size -= digest_count
|
||||||
|
data = self.digest[:digest_count]
|
||||||
|
r.append(data)
|
||||||
|
|
||||||
|
return ''.join(r)
|
||||||
|
|
||||||
|
class FileVerifier(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.size = 0
|
||||||
|
self.hash = hashlib.md5()
|
||||||
|
self.buf = ''
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
self.size += len(data)
|
||||||
|
self.buf += data
|
||||||
|
digsz = -1*self.hash.digest_size
|
||||||
|
new_data, self.buf = self.buf[0:digsz], self.buf[digsz:]
|
||||||
|
self.hash.update(new_data)
|
||||||
|
|
||||||
|
def valid(self):
|
||||||
|
"""
|
||||||
|
Returns True if this file looks valid. The file is valid if the end
|
||||||
|
of the file has the md5 digest for the first part of the file.
|
||||||
|
"""
|
||||||
|
return self.buf == self.hash.digest()
|
||||||
|
|
||||||
|
def files(mean, stddev, seed=None):
|
||||||
|
"""
|
||||||
|
Yields file-like objects with effectively random contents, where
|
||||||
|
the size of each file follows the normal distribution with `mean`
|
||||||
|
and `stddev`.
|
||||||
|
|
||||||
|
Beware, the file-likeness is very shallow. You can use boto's
|
||||||
|
`key.set_contents_from_file` to send these to S3, but they are not
|
||||||
|
full file objects.
|
||||||
|
|
||||||
|
The last 128 bits are the MD5 digest of the previous bytes, for
|
||||||
|
verifying round-trip data integrity. For example, if you
|
||||||
|
re-download the object and place the contents into a file called
|
||||||
|
``foo``, the following should print two identical lines:
|
||||||
|
|
||||||
|
python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo
|
||||||
|
|
||||||
|
Except for objects shorter than 16 bytes, where the second line
|
||||||
|
will be proportionally shorter.
|
||||||
|
"""
|
||||||
|
rand = random.Random(seed)
|
||||||
|
while True:
|
||||||
|
while True:
|
||||||
|
size = int(rand.normalvariate(mean, stddev))
|
||||||
|
if size >= 0:
|
||||||
|
break
|
||||||
|
yield RandomContentFile(size=size, seed=rand.getrandbits(32))
|
||||||
|
|
||||||
|
def names(mean, stddev, charset=None, seed=None):
|
||||||
|
"""
|
||||||
|
Yields strings that are somewhat plausible as file names, where
|
||||||
|
the lenght of each filename follows the normal distribution with
|
||||||
|
`mean` and `stddev`.
|
||||||
|
"""
|
||||||
|
if charset is None:
|
||||||
|
charset = string.ascii_lowercase
|
||||||
|
rand = random.Random(seed)
|
||||||
|
while True:
|
||||||
|
while True:
|
||||||
|
length = int(rand.normalvariate(mean, stddev))
|
||||||
|
if length >= 0:
|
||||||
|
break
|
||||||
|
name = ''.join(rand.choice(charset) for _ in xrange(length))
|
||||||
|
yield name
|
|
@ -1,3 +1,4 @@
|
||||||
|
PyYAML
|
||||||
nose >=1.0.0
|
nose >=1.0.0
|
||||||
boto >=2.0b4
|
boto >=2.0b4
|
||||||
bunch >=1.0.0
|
bunch >=1.0.0
|
||||||
|
|
382
siege.conf
Normal file
382
siege.conf
Normal file
|
@ -0,0 +1,382 @@
|
||||||
|
# Updated by Siege 2.69, May-24-2010
|
||||||
|
# Copyright 2000-2007 by Jeffrey Fulmer, et al.
|
||||||
|
#
|
||||||
|
# Siege configuration file -- edit as necessary
|
||||||
|
# For more information about configuring and running
|
||||||
|
# this program, visit: http://www.joedog.org/
|
||||||
|
|
||||||
|
#
|
||||||
|
# Variable declarations. You can set variables here
|
||||||
|
# for use in the directives below. Example:
|
||||||
|
# PROXY = proxy.joedog.org
|
||||||
|
# Reference variables inside ${} or $(), example:
|
||||||
|
# proxy-host = ${PROXY}
|
||||||
|
# You can also reference ENVIRONMENT variables without
|
||||||
|
# actually declaring them, example:
|
||||||
|
# logfile = $(HOME)/var/siege.log
|
||||||
|
|
||||||
|
#
|
||||||
|
# Signify verbose mode, true turns on verbose output
|
||||||
|
# ex: verbose = true|false
|
||||||
|
#
|
||||||
|
verbose = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# CSV Verbose format: with this option, you can choose
|
||||||
|
# to format verbose output in traditional siege format
|
||||||
|
# or comma separated format. The latter will allow you
|
||||||
|
# to redirect output to a file for import into a spread
|
||||||
|
# sheet, i.e., siege > file.csv
|
||||||
|
# ex: csv = true|false (default false)
|
||||||
|
#
|
||||||
|
csv = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Full URL verbose format: By default siege displays
|
||||||
|
# the URL path and not the full URL. With this option,
|
||||||
|
# you # can instruct siege to show the complete URL.
|
||||||
|
# ex: fullurl = true|false (default false)
|
||||||
|
#
|
||||||
|
# fullurl = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Display id: in verbose mode, display the siege user
|
||||||
|
# id associated with the HTTP transaction information
|
||||||
|
# ex: display-id = true|false
|
||||||
|
#
|
||||||
|
# display-id =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Show logfile location. By default, siege displays the
|
||||||
|
# logfile location at the end of every run when logging
|
||||||
|
# You can turn this message off with this directive.
|
||||||
|
# ex: show-logfile = false
|
||||||
|
#
|
||||||
|
show-logfile = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default logging status, true turns logging on.
|
||||||
|
# ex: logging = true|false
|
||||||
|
#
|
||||||
|
logging = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Logfile, the default siege logfile is $PREFIX/var/siege.log
|
||||||
|
# This directive allows you to choose an alternative log file.
|
||||||
|
# Environment variables may be used as shown in the examples:
|
||||||
|
# ex: logfile = /home/jeff/var/log/siege.log
|
||||||
|
# logfile = ${HOME}/var/log/siege.log
|
||||||
|
# logfile = ${LOGFILE}
|
||||||
|
#
|
||||||
|
logfile = ./siege.log
|
||||||
|
|
||||||
|
#
|
||||||
|
# HTTP protocol. Options HTTP/1.1 and HTTP/1.0.
|
||||||
|
# Some webservers have broken implementation of the
|
||||||
|
# 1.1 protocol which skews throughput evaluations.
|
||||||
|
# If you notice some siege clients hanging for
|
||||||
|
# extended periods of time, change this to HTTP/1.0
|
||||||
|
# ex: protocol = HTTP/1.1
|
||||||
|
# protocol = HTTP/1.0
|
||||||
|
#
|
||||||
|
protocol = HTTP/1.1
|
||||||
|
|
||||||
|
#
|
||||||
|
# Chunked encoding is required by HTTP/1.1 protocol
|
||||||
|
# but siege allows you to turn it off as desired.
|
||||||
|
#
|
||||||
|
# ex: chunked = true
|
||||||
|
#
|
||||||
|
chunked = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Cache revalidation.
|
||||||
|
# Siege supports cache revalidation for both ETag and
|
||||||
|
# Last-modified headers. If a copy is still fresh, the
|
||||||
|
# server responds with 304.
|
||||||
|
# HTTP/1.1 200 0.00 secs: 2326 bytes ==> /apache_pb.gif
|
||||||
|
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
|
||||||
|
# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif
|
||||||
|
#
|
||||||
|
# ex: cache = true
|
||||||
|
#
|
||||||
|
cache = false
|
||||||
|
|
||||||
|
#
|
||||||
|
# Connection directive. Options "close" and "keep-alive"
|
||||||
|
# Starting with release 2.57b3, siege implements persistent
|
||||||
|
# connections in accordance to RFC 2068 using both chunked
|
||||||
|
# encoding and content-length directives to determine the
|
||||||
|
# page size. To run siege with persistent connections set
|
||||||
|
# the connection directive to keep-alive. (Default close)
|
||||||
|
# CAUTION: use the keep-alive directive with care.
|
||||||
|
# DOUBLE CAUTION: this directive does not work well on HPUX
|
||||||
|
# TRIPLE CAUTION: don't use keep-alives until further notice
|
||||||
|
# ex: connection = close
|
||||||
|
# connection = keep-alive
|
||||||
|
#
|
||||||
|
connection = close
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default number of simulated concurrent users
|
||||||
|
# ex: concurrent = 25
|
||||||
|
#
|
||||||
|
concurrent = 15
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default duration of the siege. The right hand argument has
|
||||||
|
# a modifier which specifies the time units, H=hours, M=minutes,
|
||||||
|
# and S=seconds. If a modifier is not specified, then minutes
|
||||||
|
# are assumed.
|
||||||
|
# ex: time = 50M
|
||||||
|
#
|
||||||
|
# time =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Repetitions. The length of siege may be specified in client
|
||||||
|
# reps rather then a time duration. Instead of specifying a time
|
||||||
|
# span, you can tell each siege instance to hit the server X number
|
||||||
|
# of times. So if you chose 'reps = 20' and you've selected 10
|
||||||
|
# concurrent users, then siege will hit the server 200 times.
|
||||||
|
# ex: reps = 20
|
||||||
|
#
|
||||||
|
# reps =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default URLs file, set at configuration time, the default
|
||||||
|
# file is PREFIX/etc/urls.txt. So if you configured siege
|
||||||
|
# with --prefix=/usr/local then the urls.txt file is installed
|
||||||
|
# int /usr/local/etc/urls.txt. Use the "file = " directive to
|
||||||
|
# configure an alternative URLs file. You may use environment
|
||||||
|
# variables as shown in the examples below:
|
||||||
|
# ex: file = /export/home/jdfulmer/MYURLS.txt
|
||||||
|
# file = $HOME/etc/urls.txt
|
||||||
|
# file = $URLSFILE
|
||||||
|
#
|
||||||
|
file = ./urls.txt
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default URL, this is a single URL that you want to test. This
|
||||||
|
# is usually set at the command line with the -u option. When
|
||||||
|
# used, this option overrides the urls.txt (-f FILE/--file=FILE)
|
||||||
|
# option. You will HAVE to comment this out for in order to use
|
||||||
|
# the urls.txt file option.
|
||||||
|
# ex: url = https://shemp.whoohoo.com/docs/index.jsp
|
||||||
|
#
|
||||||
|
# url =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default delay value, see the siege(1) man page.
|
||||||
|
# This value is used for load testing, it is not used
|
||||||
|
# for benchmarking.
|
||||||
|
# ex: delay = 3
|
||||||
|
#
|
||||||
|
delay = 1
|
||||||
|
|
||||||
|
#
|
||||||
|
# Connection timeout value. Set the value in seconds for
|
||||||
|
# socket connection timeouts. The default value is 30 seconds.
|
||||||
|
# ex: timeout = 30
|
||||||
|
#
|
||||||
|
# timeout =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Session expiration: This directive allows you to delete all
|
||||||
|
# cookies after you pass through the URLs. This means siege will
|
||||||
|
# grab a new session with each run through its URLs. The default
|
||||||
|
# value is false.
|
||||||
|
# ex: expire-session = true
|
||||||
|
#
|
||||||
|
# expire-session =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Failures: This is the number of total connection failures allowed
|
||||||
|
# before siege aborts. Connection failures (timeouts, socket failures,
|
||||||
|
# etc.) are combined with 400 and 500 level errors in the final stats,
|
||||||
|
# but those errors do not count against the abort total. If you set
|
||||||
|
# this total to 10, then siege will abort after ten socket timeouts,
|
||||||
|
# but it will NOT abort after ten 404s. This is designed to prevent
|
||||||
|
# a run-away mess on an unattended siege. The default value is 1024
|
||||||
|
# ex: failures = 50
|
||||||
|
#
|
||||||
|
# failures =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Internet simulation. If true, siege clients will hit
|
||||||
|
# the URLs in the urls.txt file randomly, thereby simulating
|
||||||
|
# internet usage. If false, siege will run through the
|
||||||
|
# urls.txt file in order from first to last and back again.
|
||||||
|
# ex: internet = true
|
||||||
|
#
|
||||||
|
internet = false
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default benchmarking value, If true, there is NO delay
|
||||||
|
# between server requests, siege runs as fast as the web
|
||||||
|
# server and the network will let it. Set this to false
|
||||||
|
# for load testing.
|
||||||
|
# ex: benchmark = true
|
||||||
|
#
|
||||||
|
benchmark = false
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set the siege User-Agent to identify yourself at the
|
||||||
|
# host, the default is: JoeDog/1.00 [en] (X11; I; Siege #.##)
|
||||||
|
# But that wreaks of corporate techno speak. Feel free
|
||||||
|
# to make it more interesting :-) Since Limey is recovering
|
||||||
|
# from minor surgery as I write this, I'll dedicate the
|
||||||
|
# example to him...
|
||||||
|
# ex: user-agent = Limey The Bulldog
|
||||||
|
#
|
||||||
|
# user-agent =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Accept-encoding. This option allows you to specify
|
||||||
|
# acceptable encodings returned by the server. Use this
|
||||||
|
# directive to turn on compression. By default we accept
|
||||||
|
# gzip compression.
|
||||||
|
#
|
||||||
|
# ex: accept-encoding = *
|
||||||
|
# accept-encoding = gzip
|
||||||
|
# accept-encoding = compress;q=0.5;gzip;q=1
|
||||||
|
accept-encoding = gzip
|
||||||
|
|
||||||
|
#
|
||||||
|
# TURN OFF THAT ANNOYING SPINNER!
|
||||||
|
# Siege spawns a thread and runs a spinner to entertain you
|
||||||
|
# as it collects and computes its stats. If you don't like
|
||||||
|
# this feature, you may turn it off here.
|
||||||
|
# ex: spinner = false
|
||||||
|
#
|
||||||
|
spinner = true
|
||||||
|
|
||||||
|
#
|
||||||
|
# WWW-Authenticate login. When siege hits a webpage
|
||||||
|
# that requires basic authentication, it will search its
|
||||||
|
# logins for authentication which matches the specific realm
|
||||||
|
# requested by the server. If it finds a match, it will send
|
||||||
|
# that login information. If it fails to match the realm, it
|
||||||
|
# will send the default login information. (Default is "all").
|
||||||
|
# You may configure siege with several logins as long as no
|
||||||
|
# two realms match. The format for logins is:
|
||||||
|
# username:password[:realm] where "realm" is optional.
|
||||||
|
# If you do not supply a realm, then it will default to "all"
|
||||||
|
# ex: login = jdfulmer:topsecret:Admin
|
||||||
|
# login = jeff:supersecret
|
||||||
|
#
|
||||||
|
# login =
|
||||||
|
|
||||||
|
#
|
||||||
|
# WWW-Authenticate username and password. When siege
|
||||||
|
# hits a webpage that requires authentication, it will
|
||||||
|
# send this user name and password to the server. Note
|
||||||
|
# this is NOT form based authentication. You will have
|
||||||
|
# to construct URLs for that.
|
||||||
|
# ex: username = jdfulmer
|
||||||
|
# password = whoohoo
|
||||||
|
#
|
||||||
|
# username =
|
||||||
|
# password =
|
||||||
|
|
||||||
|
#
|
||||||
|
# ssl-cert
|
||||||
|
# This optional feature allows you to specify a path to a client
|
||||||
|
# certificate. It is not neccessary to specify a certificate in
|
||||||
|
# order to use https. If you don't know why you would want one,
|
||||||
|
# then you probably don't need this feature. Use openssl to
|
||||||
|
# generate a certificate and key with the following command:
|
||||||
|
# $ openssl req -nodes -new -days 365 -newkey rsa:1024 \
|
||||||
|
# -keyout key.pem -out cert.pem
|
||||||
|
# Specify a path to cert.pem as follows:
|
||||||
|
# ex: ssl-cert = /home/jeff/.certs/cert.pem
|
||||||
|
#
|
||||||
|
# ssl-cert =
|
||||||
|
|
||||||
|
#
|
||||||
|
# ssl-key
|
||||||
|
# Use this option to specify the key you generated with the command
|
||||||
|
# above. ex: ssl-key = /home/jeff/.certs/key.pem
|
||||||
|
# You may actually skip this option and combine both your cert and
|
||||||
|
# your key in a single file:
|
||||||
|
# $ cat key.pem > client.pem
|
||||||
|
# $ cat cert.pem >> client.pem
|
||||||
|
# Now set the path for ssl-cert:
|
||||||
|
# ex: ssl-cert = /home/jeff/.certs/client.pem
|
||||||
|
# (in this scenario, you comment out ssl-key)
|
||||||
|
#
|
||||||
|
# ssl-key =
|
||||||
|
|
||||||
|
#
|
||||||
|
# ssl-timeout
|
||||||
|
# This option sets a connection timeout for the ssl library
|
||||||
|
# ex: ssl-timeout = 30
|
||||||
|
#
|
||||||
|
# ssl-timeout =
|
||||||
|
|
||||||
|
#
|
||||||
|
# ssl-ciphers
|
||||||
|
# You can use this feature to select a specific ssl cipher
|
||||||
|
# for HTTPs. To view the ones available with your library run
|
||||||
|
# the following command: openssl ciphers
|
||||||
|
# ex: ssl-ciphers = EXP-RC4-MD5
|
||||||
|
#
|
||||||
|
# ssl-ciphers =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Login URL. This is the first URL to be hit by every siege
|
||||||
|
# client. This feature was designed to allow you to login to
|
||||||
|
# a server and establish a session. It will only be hit once
|
||||||
|
# so if you need to hit this URL more then once, make sure it
|
||||||
|
# also appears in your urls.txt file.
|
||||||
|
#
|
||||||
|
# ex: login-url = http://eos.haha.com/login.jsp POST name=jeff&pass=foo
|
||||||
|
#
|
||||||
|
# login-url =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Proxy protocol. This option allows you to select a proxy
|
||||||
|
# server stress testing. The proxy will request the URL(s)
|
||||||
|
# specified by -u"my.url.org" OR from the urls.txt file.
|
||||||
|
#
|
||||||
|
# ex: proxy-host = proxy.whoohoo.org
|
||||||
|
# proxy-port = 8080
|
||||||
|
#
|
||||||
|
# proxy-host =
|
||||||
|
# proxy-port =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Proxy-Authenticate. When scout hits a proxy server which
|
||||||
|
# requires username and password authentication, it will this
|
||||||
|
# username and password to the server. The format is username,
|
||||||
|
# password and optional realm each separated by a colon. You
|
||||||
|
# may enter more than one proxy-login as long as each one has
|
||||||
|
# a different realm. If you do not enter a realm, then scout
|
||||||
|
# will send that login information to all proxy challenges. If
|
||||||
|
# you have more than one proxy-login, then scout will attempt
|
||||||
|
# to match the login to the realm.
|
||||||
|
# ex: proxy-login: jeff:secret:corporate
|
||||||
|
# proxy-login: jeff:whoohoo
|
||||||
|
#
|
||||||
|
# proxy-login =
|
||||||
|
|
||||||
|
#
|
||||||
|
# Redirection support. This option allows to to control
|
||||||
|
# whether a Location: hint will be followed. Most users
|
||||||
|
# will want to follow redirection information, but sometimes
|
||||||
|
# it's desired to just get the Location information.
|
||||||
|
#
|
||||||
|
# ex: follow-location = false
|
||||||
|
#
|
||||||
|
# follow-location =
|
||||||
|
|
||||||
|
# Zero-length data. siege can be configured to disregard
|
||||||
|
# results in which zero bytes are read after the headers.
|
||||||
|
# Alternatively, such results can be counted in the final
|
||||||
|
# tally of outcomes.
|
||||||
|
#
|
||||||
|
# ex: zero-data-ok = false
|
||||||
|
#
|
||||||
|
# zero-data-ok =
|
||||||
|
|
||||||
|
#
|
||||||
|
# end of siegerc
|
|
@ -62,11 +62,13 @@ def nuke_prefixed_buckets(prefix):
|
||||||
if bucket.name.startswith(prefix):
|
if bucket.name.startswith(prefix):
|
||||||
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
|
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
|
||||||
try:
|
try:
|
||||||
|
bucket.set_canned_acl('private')
|
||||||
for key in bucket.list():
|
for key in bucket.list():
|
||||||
print 'Cleaning bucket {bucket} key {key}'.format(
|
print 'Cleaning bucket {bucket} key {key}'.format(
|
||||||
bucket=bucket,
|
bucket=bucket,
|
||||||
key=key,
|
key=key,
|
||||||
)
|
)
|
||||||
|
key.set_canned_acl('private')
|
||||||
key.delete()
|
key.delete()
|
||||||
bucket.delete()
|
bucket.delete()
|
||||||
except boto.exception.S3ResponseError as e:
|
except boto.exception.S3ResponseError as e:
|
||||||
|
|
Loading…
Reference in a new issue