From 82693cea225e8900e8716f27363929aaa1ecdcd7 Mon Sep 17 00:00:00 2001 From: Wesley Spikes Date: Wed, 6 Jul 2011 15:58:24 -0700 Subject: [PATCH 01/10] Adding common, and a sample config.yml Introduces a new dependancy on PyYAML --- .gitignore | 2 + common.py | 162 ++++++++++++++++++++++++++++++++++++++++++++++ config.yml.SAMPLE | 59 +++++++++++++++++ requirements.txt | 1 + 4 files changed, 224 insertions(+) create mode 100644 common.py create mode 100644 config.yml.SAMPLE diff --git a/.gitignore b/.gitignore index f2f6baa..2b930f3 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ *.pyo /virtualenv + +config.yml diff --git a/common.py b/common.py new file mode 100644 index 0000000..1cc4480 --- /dev/null +++ b/common.py @@ -0,0 +1,162 @@ +import boto.s3.connection +import bunch +import itertools +import os +import random +import string +import yaml + +s3 = bunch.Bunch() +config = bunch.Bunch() +prefix = '' + +bucket_counter = itertools.count(1) + +def choose_bucket_prefix(template, max_len=30): + """ + Choose a prefix for our test buckets, so they're easy to identify. + + Use template and feed it more and more random filler, until it's + as long as possible but still below max_len. + """ + rand = ''.join( + random.choice(string.ascii_lowercase + string.digits) + for c in range(255) + ) + + while rand: + s = template.format(random=rand) + if len(s) <= max_len: + return s + rand = rand[:-1] + + raise RuntimeError( + 'Bucket prefix template is impossible to fulfill: {template!r}'.format( + template=template, + ), + ) + +def nuke_prefixed_buckets(): + for name, conn in s3.items(): + print 'Cleaning buckets from connection {name}'.format(name=name) + for bucket in conn.get_all_buckets(): + if bucket.name.startswith(prefix): + print 'Cleaning bucket {bucket}'.format(bucket=bucket) + try: + bucket.set_canned_acl('private') + for key in bucket.list(): + print 'Cleaning bucket {bucket} key {key}'.format( + bucket=bucket, + key=key, + ) + key.set_canned_acl('private') + key.delete() + bucket.delete() + except boto.exception.S3ResponseError as e: + # TODO workaround for buggy rgw that fails to send + # error_code, remove + if (e.status == 403 + and e.error_code is None + and e.body == ''): + e.error_code = 'AccessDenied' + if e.error_code != 'AccessDenied': + print 'GOT UNWANTED ERROR', e.error_code + raise + # seems like we're not the owner of the bucket; ignore + pass + + print 'Done with cleanup of test buckets.' + +def setup(): + global s3, config, prefix + s3.clear() + config.clear() + + try: + path = os.environ['S3TEST_CONF'] + except KeyError: + raise RuntimeError( + 'To run tests, point environment ' + + 'variable S3TEST_CONF to a config file.', + ) + with file(path) as f: + g = yaml.safe_load_all(f) + for new in g: + config.update(bunch.bunchify(new)) + + # These 3 should always be present. + if not config.has_key('s3'): + raise RuntimeError('Your config file is missing the s3 section!'); + if not config.s3.has_key('defaults'): + raise RuntimeError('Your config file is missing the s3.defaults section!'); + if not config.has_key('fixtures'): + raise RuntimeError('Your config file is missing the fixtures section!'); + + if config.fixtures.has_key('bucket prefix'): + template = config.fixtures['bucket prefix'] + else: + template = 'test-{random}-' + prefix = choose_bucket_prefix(template=template) + if prefix == '': + raise RuntimeError, "Empty Prefix! Aborting!" + + defaults = config.s3.defaults + for section in config.s3.keys(): + if section == 'defaults': + continue + section_config = config.s3[section] + + kwargs = bunch.Bunch() + conn_args = bunch.Bunch( + port = 'port', + host = 'host', + is_secure = 'is_secure', + access_key = 'aws_access_key_id', + secret_key = 'aws_secret_access_key', + ) + for cfg_key in conn_args.keys(): + conn_key = conn_args[cfg_key] + + if section_config.has_key(cfg_key): + kwargs[conn_key] = section_config[cfg_key] + elif defaults.has_key(cfg_key): + kwargs[conn_key] = defaults[cfg_key] + + conn = boto.s3.connection.S3Connection( + # TODO support & test all variations + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + **kwargs + ) + s3[section] = conn + + # WARNING! we actively delete all buckets we see with the prefix + # we've chosen! Choose your prefix with care, and don't reuse + # credentials! + + # We also assume nobody else is going to use buckets with that + # prefix. This is racy but given enough randomness, should not + # really fail. + nuke_prefixed_buckets() + +def get_new_bucket(connection=None): + """ + Get a bucket that exists and is empty. + + Always recreates a bucket from scratch. This is useful to also + reset ACLs and such. + """ + if connection is None: + connection = s3.main + name = '{prefix}{num}'.format( + prefix=prefix, + num=next(bucket_counter), + ) + # the only way for this to fail with a pre-existing bucket is if + # someone raced us between setup nuke_prefixed_buckets and here; + # ignore that as astronomically unlikely + bucket = connection.create_bucket(name) + return bucket + + +if __name__ == '__main__': + setup() diff --git a/config.yml.SAMPLE b/config.yml.SAMPLE new file mode 100644 index 0000000..a733709 --- /dev/null +++ b/config.yml.SAMPLE @@ -0,0 +1,59 @@ +fixtures: +## All the buckets created will start with this prefix; +## {random} will be filled with random characters to pad +## the prefix to 30 characters long, and avoid collisions + bucket prefix: YOURNAMEHERE-{random}- + +file_generation: + groups: +## File generation works by creating N groups of files. Each group of +## files is defined by three elements: number of files, avg(filesize), +## and stddev(filesize) -- in that order. + - [1, 2, 3] + - [4, 5, 6] + +s3: +## This section contains all the connection information + + defaults: +## This section contains the defaults for all of the other connections +## below. You can also place these variables directly there. + +## Replace with e.g. "localhost" to run against local software + host: s3.amazonaws.com + +## Uncomment the port to use soemthing other than 80 +# port: 8080 + +## Say "no" to disable TLS. + is_secure: yes + +## The tests assume two accounts are defined, "main" and "alt". You +## may add other connections to be instantianted as well, however +## any additional ones will not be used unless your tests use them. + + main: + +## The User ID that the S3 provider gives you. For AWS, this is +## typically a 64-char hexstring. + user_id: AWS_USER_ID + +## Display name typically looks more like a unix login, "jdoe" etc + display_name: AWS_DISPLAY_NAME + +## The email for this account. + email: AWS_EMAIL + +## Replace these with your access keys. + access_key: AWS_ACCESS_KEY + secret_key: AWS_SECRET_KEY + + alt: +## Another user accout, used for ACL-related tests. + + user_id: AWS_USER_ID + display_name: AWS_DISPLAY_NAME + email: AWS_EMAIL + access_key: AWS_ACCESS_KEY + secret_key: AWS_SECRET_KEY + diff --git a/requirements.txt b/requirements.txt index 2104908..75d18c2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +PyYAML nose >=1.0.0 boto >=2.0b4 bunch >=1.0.0 From 3ee10d3dbe600ca6dabc40f62ec4f69c382b2e1f Mon Sep 17 00:00:00 2001 From: Wesley Spikes Date: Wed, 6 Jul 2011 16:03:47 -0700 Subject: [PATCH 02/10] test_s3 should reset permissions prior to attempting delete --- test_s3.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test_s3.py b/test_s3.py index ab4a60a..7abdb53 100644 --- a/test_s3.py +++ b/test_s3.py @@ -62,11 +62,13 @@ def nuke_prefixed_buckets(prefix): if bucket.name.startswith(prefix): print 'Cleaning bucket {bucket}'.format(bucket=bucket) try: + bucket.set_canned_acl('private') for key in bucket.list(): print 'Cleaning bucket {bucket} key {key}'.format( bucket=bucket, key=key, ) + key.set_canned_acl('private') key.delete() bucket.delete() except boto.exception.S3ResponseError as e: From 262f1eecd12c0400a463f577c63f0b9fa5d7255f Mon Sep 17 00:00:00 2001 From: Wesley Spikes Date: Thu, 7 Jul 2011 16:05:13 -0700 Subject: [PATCH 03/10] Add teardown to common. --- common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common.py b/common.py index 1cc4480..02be9de 100644 --- a/common.py +++ b/common.py @@ -157,6 +157,6 @@ def get_new_bucket(connection=None): bucket = connection.create_bucket(name) return bucket +def teardown(): + nuke_prefixed_buckets() -if __name__ == '__main__': - setup() From 3a3cbb3d25f26fe71716773766289524d7be7c70 Mon Sep 17 00:00:00 2001 From: Kyle Marsh Date: Wed, 29 Jun 2011 11:16:42 -0700 Subject: [PATCH 04/10] DHO QA: Random Object Generation Script Script to generate garbage objects and push them to a bucket. Script takes a config file on the command line (and some other command line options using optparse) and generates a bunch of objects in an S3 bucket. Also prints public URLs to stdout. Number and sizes of the objects are determined by a yaml config file with each line looking like this: - [A, B, C] A: Number of files in this group B: Mean size of files in this group (in bytes) C: Standard deviation (normal distribution) of file sizes in this group command line options are: - S3 access key - S3 secret key - seed for PRNG - output file to write URLs to - flag to add md5 checksum to url list --- generate_objects.conf | 3 + generate_objects.py | 124 ++++++++++++++++++++++++++++++++++++++++++ realistic.py | 93 +++++++++++++++++++++++++++++++ 3 files changed, 220 insertions(+) create mode 100644 generate_objects.conf create mode 100755 generate_objects.py create mode 100644 realistic.py diff --git a/generate_objects.conf b/generate_objects.conf new file mode 100644 index 0000000..2be00e3 --- /dev/null +++ b/generate_objects.conf @@ -0,0 +1,3 @@ +- [3, 20, 5] +- [3, 30, 2] + diff --git a/generate_objects.py b/generate_objects.py new file mode 100755 index 0000000..e0c94c1 --- /dev/null +++ b/generate_objects.py @@ -0,0 +1,124 @@ +#! /usr/bin/python + +from boto.s3.connection import OrdinaryCallingFormat +from boto.s3.connection import S3Connection +from boto.s3.key import Key +from optparse import OptionParser +from realistic import RandomContentFile +import realistic +import random +import yaml +import boto +import sys + +DHO_HOST = 'objects.dreamhost.com' + +def parse_opts(): + parser = OptionParser(); + parser.add_option('-O' , '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE') + parser.add_option('-a' , '--access-key', dest='access_key', help='use S3 access key KEY', metavar='KEY') + parser.add_option('-s' , '--secret-key', dest='secret_key', help='use S3 secret key KEY', metavar='KEY') + parser.add_option('-b' , '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET') + parser.add_option('--checksum', dest='checksum', action='store_true', help='include the md5 checksum with the object urls') + parser.add_option('--host', dest='host', help='use S3 gateway at HOST', metavar='HOST') + parser.add_option('--seed', dest='seed', help='optional seed for the random number generator') + + parser.set_defaults(host=DHO_HOST) + + return parser.parse_args() + + +def parse_config(config_files): + configurations = [] + for file in config_files: + FILE = open(file, 'r') + configurations = configurations + yaml.load(FILE.read()) + FILE.close() + return configurations + + +def get_bucket(conn, existing_bucket): + if existing_bucket: + return conn.get_bucket(existing_bucket) + else: + goop = '%x' % random.getrandbits(64) + bucket = conn.create_bucket(goop) + bucket.set_acl('public-read') + return bucket + + +def connect_s3(host, access_key, secret_key): + conn = S3Connection( + calling_format = OrdinaryCallingFormat(), + is_secure = False, + host = host, + aws_access_key_id = access_key, + aws_secret_access_key = secret_key) + + return conn + + +def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False): + """Generate random objects with sizes across a normal distribution + specified by mean and standard deviation and write them to bucket. + IN: + boto S3 bucket object + Number of files + mean file size in bytes + standard deviation from mean file size + seed for RNG + flag to tell the method to append md5 checksums to the output + OUT: + list of urls (strings) to objects valid for 1 hour. + If "checksum" is true, each output string consists of the url + followed by the md5 checksum. + """ + urls = [] + file_generator = realistic.files(mean, stddev, seed) + name_generator = realistic.names(15, 4,seed=seed) + for _ in xrange(quantity): + fp = file_generator.next() + print >> sys.stderr, 'sending file with size %dB' % fp.size + key = Key(bucket) + key.key = name_generator.next() + key.set_contents_from_file(fp) + url = key.generate_url(3600) #valid for 1 hour + if checksum: + url += ' %s' % key.md5 + urls.append(url) + + return urls + + +def main(): + (options, args) = parse_opts(); + + #SETUP + random.seed(options.seed if options.seed else None) + if options.outfile: + OUTFILE = open(options.outfile, 'w') + else: + OUTFILE = sys.stdout + + conn = connect_s3(options.host, options.access_key, options.secret_key) + bucket = get_bucket(conn, options.bucket) + urls = [] + + print >> OUTFILE, 'bucket: %s' % bucket.name + print >> sys.stderr, 'setup complete, generating files' + for profile in parse_config(args): + seed = random.random() + urls += generate_objects(bucket, profile[0], profile[1], profile[2], seed, options.checksum) + print >> sys.stderr, 'finished sending files. Saving urls to S3' + + url_string = '\n'.join(urls) + url_key = Key(bucket) + url_key.key = 'urls' + url_key.set_contents_from_string(url_string) + print >> OUTFILE, url_string + print >> sys.stderr, 'done' + + +if __name__ == '__main__': + main() + diff --git a/realistic.py b/realistic.py new file mode 100644 index 0000000..58a7e1a --- /dev/null +++ b/realistic.py @@ -0,0 +1,93 @@ +import hashlib +import random +import string + +class RandomContentFile(object): + def __init__(self, size, seed): + self.seed = seed + self.random = random.Random(self.seed) + self.offset = 0 + self.size = size + self.hash = hashlib.md5() + self.digest_size = self.hash.digest_size + self.digest = None + + def seek(self, offset): + assert offset == 0 + self.random.seed(self.seed) + self.offset = offset + + def tell(self): + return self.offset + + def read(self, size=-1): + if size < 0: + size = self.size - self.offset + + r = [] + + random_count = min(size, self.size - self.offset - self.digest_size) + if random_count > 0: + self.offset += random_count + size -= random_count + data = ''.join(chr(self.random.getrandbits(8)) for _ in xrange(random_count)) + if self.hash is not None: + self.hash.update(data) + r.append(data) + + digest_count = min(size, self.size - self.offset) + if digest_count > 0: + if self.digest is None: + self.digest = self.hash.digest() + self.hash = None + self.offset += digest_count + size -= digest_count + data = self.digest[:digest_count] + r.append(data) + + return ''.join(r) + +def files(mean, stddev, seed=None): + """ + Yields file-like objects with effectively random contents, where + the size of each file follows the normal distribution with `mean` + and `stddev`. + + Beware, the file-likeness is very shallow. You can use boto's + `key.set_contents_from_file` to send these to S3, but they are not + full file objects. + + The last 128 bits are the MD5 digest of the previous bytes, for + verifying round-trip data integrity. For example, if you + re-download the object and place the contents into a file called + ``foo``, the following should print two identical lines: + + python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' = 0: + break + yield RandomContentFile(size=size, seed=rand.getrandbits(32)) + +def names(mean, stddev, charset=None, seed=None): + """ + Yields strings that are somewhat plausible as file names, where + the lenght of each filename follows the normal distribution with + `mean` and `stddev`. + """ + if charset is None: + charset = string.ascii_lowercase + rand = random.Random(seed) + while True: + while True: + length = int(rand.normalvariate(mean, stddev)) + if length >= 0: + break + name = ''.join(rand.choice(charset) for _ in xrange(length)) + yield name From 951dc0fcdb85a0e4601bc27fa7998fed02afe697 Mon Sep 17 00:00:00 2001 From: Kyle Marsh Date: Wed, 6 Jul 2011 12:53:11 -0700 Subject: [PATCH 05/10] dho-qa: Add siege config file and document running siege Adds siege.conf file for siege configuration options Adds docstring to main function in generate_objects.py describing how to run the static content load test. --- generate_objects.conf | 3 +- generate_objects.py | 12 ++ siege.conf | 382 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 395 insertions(+), 2 deletions(-) create mode 100644 siege.conf diff --git a/generate_objects.conf b/generate_objects.conf index 2be00e3..b38a150 100644 --- a/generate_objects.conf +++ b/generate_objects.conf @@ -1,3 +1,2 @@ -- [3, 20, 5] -- [3, 30, 2] +- [10, 2000, 200] diff --git a/generate_objects.py b/generate_objects.py index e0c94c1..4cf6d22 100755 --- a/generate_objects.py +++ b/generate_objects.py @@ -91,6 +91,18 @@ def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False): def main(): + '''To run the static content load test: + ./generate_objects.py -a S3_ACCESS_KEY -s S3_SECRET_KEY -O urls.txt --seed 1234 generate_objects.conf && siege -rc ./siege.conf -r 5 + + This creates a bucket with your S3 credentials and fills it with + garbage objects as described in generate_objects.conf. It writes a + list of URLS to those objects to ./urls.txt. siege then reads the + ./siege.conf config file which tells it to read from ./urls.txt and + log to ./siege.log and hammers each url in urls.txt 5 times (-r flag). + + Results are printed to the terminal and written in CSV format to + ./siege.log + ''' (options, args) = parse_opts(); #SETUP diff --git a/siege.conf b/siege.conf new file mode 100644 index 0000000..c40b334 --- /dev/null +++ b/siege.conf @@ -0,0 +1,382 @@ +# Updated by Siege 2.69, May-24-2010 +# Copyright 2000-2007 by Jeffrey Fulmer, et al. +# +# Siege configuration file -- edit as necessary +# For more information about configuring and running +# this program, visit: http://www.joedog.org/ + +# +# Variable declarations. You can set variables here +# for use in the directives below. Example: +# PROXY = proxy.joedog.org +# Reference variables inside ${} or $(), example: +# proxy-host = ${PROXY} +# You can also reference ENVIRONMENT variables without +# actually declaring them, example: +# logfile = $(HOME)/var/siege.log + +# +# Signify verbose mode, true turns on verbose output +# ex: verbose = true|false +# +verbose = true + +# +# CSV Verbose format: with this option, you can choose +# to format verbose output in traditional siege format +# or comma separated format. The latter will allow you +# to redirect output to a file for import into a spread +# sheet, i.e., siege > file.csv +# ex: csv = true|false (default false) +# +csv = true + +# +# Full URL verbose format: By default siege displays +# the URL path and not the full URL. With this option, +# you # can instruct siege to show the complete URL. +# ex: fullurl = true|false (default false) +# +# fullurl = true + +# +# Display id: in verbose mode, display the siege user +# id associated with the HTTP transaction information +# ex: display-id = true|false +# +# display-id = + +# +# Show logfile location. By default, siege displays the +# logfile location at the end of every run when logging +# You can turn this message off with this directive. +# ex: show-logfile = false +# +show-logfile = true + +# +# Default logging status, true turns logging on. +# ex: logging = true|false +# +logging = true + +# +# Logfile, the default siege logfile is $PREFIX/var/siege.log +# This directive allows you to choose an alternative log file. +# Environment variables may be used as shown in the examples: +# ex: logfile = /home/jeff/var/log/siege.log +# logfile = ${HOME}/var/log/siege.log +# logfile = ${LOGFILE} +# +logfile = ./siege.log + +# +# HTTP protocol. Options HTTP/1.1 and HTTP/1.0. +# Some webservers have broken implementation of the +# 1.1 protocol which skews throughput evaluations. +# If you notice some siege clients hanging for +# extended periods of time, change this to HTTP/1.0 +# ex: protocol = HTTP/1.1 +# protocol = HTTP/1.0 +# +protocol = HTTP/1.1 + +# +# Chunked encoding is required by HTTP/1.1 protocol +# but siege allows you to turn it off as desired. +# +# ex: chunked = true +# +chunked = true + +# +# Cache revalidation. +# Siege supports cache revalidation for both ETag and +# Last-modified headers. If a copy is still fresh, the +# server responds with 304. +# HTTP/1.1 200 0.00 secs: 2326 bytes ==> /apache_pb.gif +# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif +# HTTP/1.1 304 0.00 secs: 0 bytes ==> /apache_pb.gif +# +# ex: cache = true +# +cache = false + +# +# Connection directive. Options "close" and "keep-alive" +# Starting with release 2.57b3, siege implements persistent +# connections in accordance to RFC 2068 using both chunked +# encoding and content-length directives to determine the +# page size. To run siege with persistent connections set +# the connection directive to keep-alive. (Default close) +# CAUTION: use the keep-alive directive with care. +# DOUBLE CAUTION: this directive does not work well on HPUX +# TRIPLE CAUTION: don't use keep-alives until further notice +# ex: connection = close +# connection = keep-alive +# +connection = close + +# +# Default number of simulated concurrent users +# ex: concurrent = 25 +# +concurrent = 15 + +# +# Default duration of the siege. The right hand argument has +# a modifier which specifies the time units, H=hours, M=minutes, +# and S=seconds. If a modifier is not specified, then minutes +# are assumed. +# ex: time = 50M +# +# time = + +# +# Repetitions. The length of siege may be specified in client +# reps rather then a time duration. Instead of specifying a time +# span, you can tell each siege instance to hit the server X number +# of times. So if you chose 'reps = 20' and you've selected 10 +# concurrent users, then siege will hit the server 200 times. +# ex: reps = 20 +# +# reps = + +# +# Default URLs file, set at configuration time, the default +# file is PREFIX/etc/urls.txt. So if you configured siege +# with --prefix=/usr/local then the urls.txt file is installed +# int /usr/local/etc/urls.txt. Use the "file = " directive to +# configure an alternative URLs file. You may use environment +# variables as shown in the examples below: +# ex: file = /export/home/jdfulmer/MYURLS.txt +# file = $HOME/etc/urls.txt +# file = $URLSFILE +# +file = ./urls.txt + +# +# Default URL, this is a single URL that you want to test. This +# is usually set at the command line with the -u option. When +# used, this option overrides the urls.txt (-f FILE/--file=FILE) +# option. You will HAVE to comment this out for in order to use +# the urls.txt file option. +# ex: url = https://shemp.whoohoo.com/docs/index.jsp +# +# url = + +# +# Default delay value, see the siege(1) man page. +# This value is used for load testing, it is not used +# for benchmarking. +# ex: delay = 3 +# +delay = 1 + +# +# Connection timeout value. Set the value in seconds for +# socket connection timeouts. The default value is 30 seconds. +# ex: timeout = 30 +# +# timeout = + +# +# Session expiration: This directive allows you to delete all +# cookies after you pass through the URLs. This means siege will +# grab a new session with each run through its URLs. The default +# value is false. +# ex: expire-session = true +# +# expire-session = + +# +# Failures: This is the number of total connection failures allowed +# before siege aborts. Connection failures (timeouts, socket failures, +# etc.) are combined with 400 and 500 level errors in the final stats, +# but those errors do not count against the abort total. If you set +# this total to 10, then siege will abort after ten socket timeouts, +# but it will NOT abort after ten 404s. This is designed to prevent +# a run-away mess on an unattended siege. The default value is 1024 +# ex: failures = 50 +# +# failures = + +# +# Internet simulation. If true, siege clients will hit +# the URLs in the urls.txt file randomly, thereby simulating +# internet usage. If false, siege will run through the +# urls.txt file in order from first to last and back again. +# ex: internet = true +# +internet = false + +# +# Default benchmarking value, If true, there is NO delay +# between server requests, siege runs as fast as the web +# server and the network will let it. Set this to false +# for load testing. +# ex: benchmark = true +# +benchmark = false + +# +# Set the siege User-Agent to identify yourself at the +# host, the default is: JoeDog/1.00 [en] (X11; I; Siege #.##) +# But that wreaks of corporate techno speak. Feel free +# to make it more interesting :-) Since Limey is recovering +# from minor surgery as I write this, I'll dedicate the +# example to him... +# ex: user-agent = Limey The Bulldog +# +# user-agent = + +# +# Accept-encoding. This option allows you to specify +# acceptable encodings returned by the server. Use this +# directive to turn on compression. By default we accept +# gzip compression. +# +# ex: accept-encoding = * +# accept-encoding = gzip +# accept-encoding = compress;q=0.5;gzip;q=1 +accept-encoding = gzip + +# +# TURN OFF THAT ANNOYING SPINNER! +# Siege spawns a thread and runs a spinner to entertain you +# as it collects and computes its stats. If you don't like +# this feature, you may turn it off here. +# ex: spinner = false +# +spinner = true + +# +# WWW-Authenticate login. When siege hits a webpage +# that requires basic authentication, it will search its +# logins for authentication which matches the specific realm +# requested by the server. If it finds a match, it will send +# that login information. If it fails to match the realm, it +# will send the default login information. (Default is "all"). +# You may configure siege with several logins as long as no +# two realms match. The format for logins is: +# username:password[:realm] where "realm" is optional. +# If you do not supply a realm, then it will default to "all" +# ex: login = jdfulmer:topsecret:Admin +# login = jeff:supersecret +# +# login = + +# +# WWW-Authenticate username and password. When siege +# hits a webpage that requires authentication, it will +# send this user name and password to the server. Note +# this is NOT form based authentication. You will have +# to construct URLs for that. +# ex: username = jdfulmer +# password = whoohoo +# +# username = +# password = + +# +# ssl-cert +# This optional feature allows you to specify a path to a client +# certificate. It is not neccessary to specify a certificate in +# order to use https. If you don't know why you would want one, +# then you probably don't need this feature. Use openssl to +# generate a certificate and key with the following command: +# $ openssl req -nodes -new -days 365 -newkey rsa:1024 \ +# -keyout key.pem -out cert.pem +# Specify a path to cert.pem as follows: +# ex: ssl-cert = /home/jeff/.certs/cert.pem +# +# ssl-cert = + +# +# ssl-key +# Use this option to specify the key you generated with the command +# above. ex: ssl-key = /home/jeff/.certs/key.pem +# You may actually skip this option and combine both your cert and +# your key in a single file: +# $ cat key.pem > client.pem +# $ cat cert.pem >> client.pem +# Now set the path for ssl-cert: +# ex: ssl-cert = /home/jeff/.certs/client.pem +# (in this scenario, you comment out ssl-key) +# +# ssl-key = + +# +# ssl-timeout +# This option sets a connection timeout for the ssl library +# ex: ssl-timeout = 30 +# +# ssl-timeout = + +# +# ssl-ciphers +# You can use this feature to select a specific ssl cipher +# for HTTPs. To view the ones available with your library run +# the following command: openssl ciphers +# ex: ssl-ciphers = EXP-RC4-MD5 +# +# ssl-ciphers = + +# +# Login URL. This is the first URL to be hit by every siege +# client. This feature was designed to allow you to login to +# a server and establish a session. It will only be hit once +# so if you need to hit this URL more then once, make sure it +# also appears in your urls.txt file. +# +# ex: login-url = http://eos.haha.com/login.jsp POST name=jeff&pass=foo +# +# login-url = + +# +# Proxy protocol. This option allows you to select a proxy +# server stress testing. The proxy will request the URL(s) +# specified by -u"my.url.org" OR from the urls.txt file. +# +# ex: proxy-host = proxy.whoohoo.org +# proxy-port = 8080 +# +# proxy-host = +# proxy-port = + +# +# Proxy-Authenticate. When scout hits a proxy server which +# requires username and password authentication, it will this +# username and password to the server. The format is username, +# password and optional realm each separated by a colon. You +# may enter more than one proxy-login as long as each one has +# a different realm. If you do not enter a realm, then scout +# will send that login information to all proxy challenges. If +# you have more than one proxy-login, then scout will attempt +# to match the login to the realm. +# ex: proxy-login: jeff:secret:corporate +# proxy-login: jeff:whoohoo +# +# proxy-login = + +# +# Redirection support. This option allows to to control +# whether a Location: hint will be followed. Most users +# will want to follow redirection information, but sometimes +# it's desired to just get the Location information. +# +# ex: follow-location = false +# +# follow-location = + +# Zero-length data. siege can be configured to disregard +# results in which zero bytes are read after the headers. +# Alternatively, such results can be counted in the final +# tally of outcomes. +# +# ex: zero-data-ok = false +# +# zero-data-ok = + +# +# end of siegerc From a4e5be5f4128139d7ed5dc1b55c1c7ce79101aca Mon Sep 17 00:00:00 2001 From: Kyle Marsh Date: Fri, 8 Jul 2011 11:18:52 -0700 Subject: [PATCH 06/10] dho-qa: static file generator updated to use common Updated generate_objects.py to use Wes's common setup and configuration stuff. Can still override config on the command line. --- generate_objects.py | 60 ++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/generate_objects.py b/generate_objects.py index 4cf6d22..b7ac4ae 100755 --- a/generate_objects.py +++ b/generate_objects.py @@ -6,12 +6,13 @@ from boto.s3.key import Key from optparse import OptionParser from realistic import RandomContentFile import realistic +import traceback import random +import common import yaml import boto import sys -DHO_HOST = 'objects.dreamhost.com' def parse_opts(): parser = OptionParser(); @@ -23,30 +24,9 @@ def parse_opts(): parser.add_option('--host', dest='host', help='use S3 gateway at HOST', metavar='HOST') parser.add_option('--seed', dest='seed', help='optional seed for the random number generator') - parser.set_defaults(host=DHO_HOST) - return parser.parse_args() -def parse_config(config_files): - configurations = [] - for file in config_files: - FILE = open(file, 'r') - configurations = configurations + yaml.load(FILE.read()) - FILE.close() - return configurations - - -def get_bucket(conn, existing_bucket): - if existing_bucket: - return conn.get_bucket(existing_bucket) - else: - goop = '%x' % random.getrandbits(64) - bucket = conn.create_bucket(goop) - bucket.set_acl('public-read') - return bucket - - def connect_s3(host, access_key, secret_key): conn = S3Connection( calling_format = OrdinaryCallingFormat(), @@ -82,7 +62,7 @@ def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False): key = Key(bucket) key.key = name_generator.next() key.set_contents_from_file(fp) - url = key.generate_url(3600) #valid for 1 hour + url = key.generate_url(30758400) #valid for 1 year if checksum: url += ' %s' % key.md5 urls.append(url) @@ -91,8 +71,9 @@ def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False): def main(): - '''To run the static content load test: - ./generate_objects.py -a S3_ACCESS_KEY -s S3_SECRET_KEY -O urls.txt --seed 1234 generate_objects.conf && siege -rc ./siege.conf -r 5 + '''To run the static content load test, make sure you've bootstrapped your + test environment and set up your config.yml file, then run the following: + S3TEST_CONF=config.yml virtualenv/bin/python generate_objects.py -a S3_ACCESS_KEY -s S3_SECRET_KEY -O urls.txt --seed 1234 && siege -rc ./siege.conf -r 5 This creates a bucket with your S3 credentials and fills it with garbage objects as described in generate_objects.conf. It writes a @@ -102,6 +83,9 @@ def main(): Results are printed to the terminal and written in CSV format to ./siege.log + + S3 credentials and output file may also be specified in config.yml + under s3.main and file_generation.url_file ''' (options, args) = parse_opts(); @@ -109,19 +93,30 @@ def main(): random.seed(options.seed if options.seed else None) if options.outfile: OUTFILE = open(options.outfile, 'w') + elif common.config.file_generation.url_file: + OUTFILE = open(common.config.file_generation.url_file, 'w') else: OUTFILE = sys.stdout - conn = connect_s3(options.host, options.access_key, options.secret_key) - bucket = get_bucket(conn, options.bucket) + if options.access_key and options.secret_key: + host = options.host if options.host else common.config.s3.defaults.host + conn = connect_s3(host, options.access_key, options.secret_key) + else: + conn = common.s3.main + + if options.bucket: + bucket = get_bucket(conn, options.bucket) + else: + bucket = common.get_new_bucket() + urls = [] print >> OUTFILE, 'bucket: %s' % bucket.name print >> sys.stderr, 'setup complete, generating files' - for profile in parse_config(args): + for profile in common.config.file_generation.groups: seed = random.random() urls += generate_objects(bucket, profile[0], profile[1], profile[2], seed, options.checksum) - print >> sys.stderr, 'finished sending files. Saving urls to S3' + print >> sys.stderr, 'finished sending files. generating urls and sending to S3' url_string = '\n'.join(urls) url_key = Key(bucket) @@ -132,5 +127,10 @@ def main(): if __name__ == '__main__': - main() + common.setup() + try: + main() + except Exception as e: + traceback.print_exc() + common.teardown() From a1e5c50dc65950743826e89981923872ea398c92 Mon Sep 17 00:00:00 2001 From: Steven Berler Date: Fri, 8 Jul 2011 13:00:09 -0700 Subject: [PATCH 07/10] dho qa: rand_readwrite Adds the rand_readwrite utility. Updates realistic.py with a file verifier class. Updates generate_objects.py to allow the filename seed to be set. --- generate_objects.py | 8 +- rand_readwrite.py | 188 ++++++++++++++++++++++++++++++++++++++++++++ realistic.py | 20 +++++ 3 files changed, 214 insertions(+), 2 deletions(-) create mode 100755 rand_readwrite.py diff --git a/generate_objects.py b/generate_objects.py index b7ac4ae..d4b9448 100755 --- a/generate_objects.py +++ b/generate_objects.py @@ -38,7 +38,7 @@ def connect_s3(host, access_key, secret_key): return conn -def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False): +def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False, name_seed=None): """Generate random objects with sizes across a normal distribution specified by mean and standard deviation and write them to bucket. IN: @@ -48,14 +48,18 @@ def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False): standard deviation from mean file size seed for RNG flag to tell the method to append md5 checksums to the output + seed to use for the file names. defaults to use the other seed OUT: list of urls (strings) to objects valid for 1 hour. If "checksum" is true, each output string consists of the url followed by the md5 checksum. """ + if name_seed == None: + name_seed = seed + urls = [] file_generator = realistic.files(mean, stddev, seed) - name_generator = realistic.names(15, 4,seed=seed) + name_generator = realistic.names(15, 4,seed=name_seed) for _ in xrange(quantity): fp = file_generator.next() print >> sys.stderr, 'sending file with size %dB' % fp.size diff --git a/rand_readwrite.py b/rand_readwrite.py new file mode 100755 index 0000000..e75cba7 --- /dev/null +++ b/rand_readwrite.py @@ -0,0 +1,188 @@ +#!/usr/bin/python + +import gevent +import gevent.queue +import gevent.monkey; gevent.monkey.patch_all() +import optparse +import time +import random + +import generate_objects +import realistic +import common + +class Result: + TYPE_NONE = 0 + TYPE_READER = 1 + TYPE_WRITER = 2 + def __init__(self, name, type=TYPE_NONE, time=0, success=True, size=0, details=''): + self.name = name + self.type = type + self.time = time + self.success = success + self.size = size + self.details = details + + def __repr__(self): + type_dict = {Result.TYPE_NONE : 'None', Result.TYPE_READER : 'Reader', Result.TYPE_WRITER : 'Writer'} + type_s = type_dict[self.type] + if self.success: + status = 'Success' + else: + status = 'FAILURE' + + return "".format( + success=status, + type=type_s, + name=self.name, + size=self.size, + time=self.time, + mbps=(self.size/self.time/1024.0), + details=self.details + ) + +def reader(seconds, bucket, name=None, queue=None): + with gevent.Timeout(seconds, False): + while (1): + count = 0 + for key in bucket.list(): + fp = realistic.FileVerifier() + start = time.clock() + key.get_contents_to_file(fp) + end = time.clock() + elapsed = end - start + if queue: + queue.put(Result(name, + type=Result.TYPE_READER, + time=elapsed, + success=fp.valid(), + size=(fp.size/1024) + ) + ) + count += 1 + if count == 0: + gevent.sleep(1) + +def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file_stddev=0, file_name_seed=None): + with gevent.Timeout(seconds, False): + while (1): + r = random.randint(0, 65535) + start = time.clock() + generate_objects.generate_objects(bucket, quantity, 1024*file_size, 1024*file_stddev, r, + name_seed=file_name_seed + ) + end = time.clock() + elapsed = end - start + if queue: + queue.put(Result(name, + type=Result.TYPE_WRITER, + time=elapsed, + size=file_size*quantity, + details="stddev={stddev}".format(stddev=file_stddev) + ) + ) + +def parse_options(): + parser = optparse.OptionParser() + parser.add_option("-t", "--time", dest="duration", type="float", + help="duration to run tests (seconds)", default=5, metavar="SECS") + parser.add_option("-r", "--read", dest="num_readers", type="int", + help="number of reader threads", default=0, metavar="NUM") + parser.add_option("-w", "--write", dest="num_writers", type="int", + help="number of writer threads", default=2, metavar="NUM") + parser.add_option("-s", "--size", dest="file_size", type="float", + help="file size to use, in kb", default=1024, metavar="KB") + parser.add_option("-q", "--quantity", dest="quantity", type="int", + help="number of files per batch", default=1, metavar="NUM") + parser.add_option("-d", "--stddev", dest="stddev", type="float", + help="stddev of file size", default=0, metavar="KB") + parser.add_option("-W", "--rewrite", dest="rewrite", action="store_true", + help="rewrite the same files (total=quantity)") + parser.add_option("--no-cleanup", dest="cleanup", action="store_false", + help="skip cleaning up all created buckets", default=True) + + return parser.parse_args() + +def main(): + # parse options + (options, args) = parse_options() + + try: + # setup + common.setup() + bucket = common.get_new_bucket() + print "Created bucket: {name}".format(name=bucket.name) + r = None + if (options.rewrite): + r = random.randint(0, 65535) + q = gevent.queue.Queue() + + # main work + print "Using file size: {size} +- {stddev}".format(size=options.file_size, stddev=options.stddev) + print "Spawning {r} readers and {w} writers...".format(r=options.num_readers, w=options.num_writers) + greenlets = [] + greenlets += [gevent.spawn(writer, options.duration, bucket, + name=x, + queue=q, + file_size=options.file_size, + file_stddev=options.stddev, + quantity=options.quantity, + file_name_seed=r + ) for x in xrange(options.num_writers)] + greenlets += [gevent.spawn(reader, options.duration, bucket, + name=x, + queue=q + ) for x in xrange(options.num_readers)] + gevent.spawn_later(options.duration, lambda: q.put(StopIteration)) + + total_read = 0 + total_write = 0 + read_success = 0 + read_failure = 0 + write_success = 0 + write_failure = 0 + for item in q: + print item + if item.type == Result.TYPE_READER: + if item.success: + read_success += 1 + total_read += item.size + else: + read_failure += 1 + elif item.type == Result.TYPE_WRITER: + if item.success: + write_success += 1 + total_write += item.size + else: + write_failure += 1 + + # overall stats + print "--- Stats ---" + print "Total Read: {read} MB ({mbps} MB/s)".format( + read=(total_read/1024.0), + mbps=(total_read/1024.0/options.duration) + ) + print "Total Write: {write} MB ({mbps} MB/s)".format( + write=(total_write/1024.0), + mbps=(total_write/1024.0/options.duration) + ) + print "Read filures: {num} ({percent}%)".format( + num=read_failure, + percent=(100.0*read_failure/max(read_failure+read_success, 1)) + ) + print "Write failures: {num} ({percent}%)".format( + num=write_failure, + percent=(100.0*write_failure/max(write_failure+write_success, 1)) + ) + + gevent.joinall(greenlets, timeout=1) + except Exception as e: + print e + finally: + # cleanup + if options.cleanup: + common.teardown() + +if __name__ == "__main__": + main() + diff --git a/realistic.py b/realistic.py index 58a7e1a..1d55147 100644 --- a/realistic.py +++ b/realistic.py @@ -47,6 +47,26 @@ class RandomContentFile(object): return ''.join(r) +class FileVerifier(object): + def __init__(self): + self.size = 0 + self.hash = hashlib.md5() + self.buf = '' + + def write(self, data): + self.size += len(data) + self.buf += data + digsz = -1*self.hash.digest_size + new_data, self.buf = self.buf[0:digsz], self.buf[digsz:] + self.hash.update(new_data) + + def valid(self): + """ + Returns True if this file looks valid. The file is valid if the end + of the file has the md5 digest for the first part of the file. + """ + return self.buf == self.hash.digest() + def files(mean, stddev, seed=None): """ Yields file-like objects with effectively random contents, where From e5f97830534233542ea5aea185191f9f52bd6797 Mon Sep 17 00:00:00 2001 From: Kyle Marsh Date: Fri, 8 Jul 2011 14:38:12 -0700 Subject: [PATCH 08/10] dho-qa: disentangle file generation from uploading Static load test script now provides separate functions for generating a list of random-file pointers and uploading those files to an S3 store. When run as a script it still does both, but you can call each function individually from a different script after loading the module. --- generate_objects.py | 100 ++++++++++++++++++++------------------------ 1 file changed, 46 insertions(+), 54 deletions(-) diff --git a/generate_objects.py b/generate_objects.py index d4b9448..2c2dab0 100755 --- a/generate_objects.py +++ b/generate_objects.py @@ -17,11 +17,7 @@ import sys def parse_opts(): parser = OptionParser(); parser.add_option('-O' , '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE') - parser.add_option('-a' , '--access-key', dest='access_key', help='use S3 access key KEY', metavar='KEY') - parser.add_option('-s' , '--secret-key', dest='secret_key', help='use S3 secret key KEY', metavar='KEY') parser.add_option('-b' , '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET') - parser.add_option('--checksum', dest='checksum', action='store_true', help='include the md5 checksum with the object urls') - parser.add_option('--host', dest='host', help='use S3 gateway at HOST', metavar='HOST') parser.add_option('--seed', dest='seed', help='optional seed for the random number generator') return parser.parse_args() @@ -38,63 +34,67 @@ def connect_s3(host, access_key, secret_key): return conn -def generate_objects(bucket, quantity, mean, stddev, seed, checksum=False, name_seed=None): - """Generate random objects with sizes across a normal distribution - specified by mean and standard deviation and write them to bucket. +def get_random_files(quantity, mean, stddev, seed): + """Create file-like objects with pseudorandom contents. + IN: + number of files to create + mean file size in bytes + standard deviation from mean file size + seed for PRNG + OUT: + list of file handles + """ + file_generator = realistic.files(mean, stddev, seed) + return [file_generator.next() for _ in xrange(quantity)] + + +def upload_objects(bucket, files, seed): + """Upload a bunch of files to an S3 bucket IN: boto S3 bucket object - Number of files - mean file size in bytes - standard deviation from mean file size - seed for RNG - flag to tell the method to append md5 checksums to the output - seed to use for the file names. defaults to use the other seed + list of file handles to upload + seed for PRNG OUT: - list of urls (strings) to objects valid for 1 hour. - If "checksum" is true, each output string consists of the url - followed by the md5 checksum. + list of boto S3 key objects """ - if name_seed == None: - name_seed = seed + keys = [] + name_generator = realistic.names(15, 4,seed=seed) - urls = [] - file_generator = realistic.files(mean, stddev, seed) - name_generator = realistic.names(15, 4,seed=name_seed) - for _ in xrange(quantity): - fp = file_generator.next() + for fp in files: print >> sys.stderr, 'sending file with size %dB' % fp.size key = Key(bucket) key.key = name_generator.next() key.set_contents_from_file(fp) - url = key.generate_url(30758400) #valid for 1 year - if checksum: - url += ' %s' % key.md5 - urls.append(url) + keys.append(key) - return urls + return keys def main(): '''To run the static content load test, make sure you've bootstrapped your test environment and set up your config.yml file, then run the following: - S3TEST_CONF=config.yml virtualenv/bin/python generate_objects.py -a S3_ACCESS_KEY -s S3_SECRET_KEY -O urls.txt --seed 1234 && siege -rc ./siege.conf -r 5 + S3TEST_CONF=config.yml virtualenv/bin/python generate_objects.py -O urls.txt --seed 1234 + + This creates a bucket with your S3 credentials (from config.yml) and + fills it with garbage objects as described in generate_objects.conf. + It writes a list of URLS to those objects to ./urls.txt. + + Once you have objcts in your bucket, run the siege benchmarking program: + siege -rc ./siege.conf -r 5 + + This tells siege to read the ./siege.conf config file which tells it to + use the urls in ./urls.txt and log to ./siege.log. It hits each url in + urls.txt 5 times (-r flag). - This creates a bucket with your S3 credentials and fills it with - garbage objects as described in generate_objects.conf. It writes a - list of URLS to those objects to ./urls.txt. siege then reads the - ./siege.conf config file which tells it to read from ./urls.txt and - log to ./siege.log and hammers each url in urls.txt 5 times (-r flag). - Results are printed to the terminal and written in CSV format to ./siege.log - - S3 credentials and output file may also be specified in config.yml - under s3.main and file_generation.url_file ''' (options, args) = parse_opts(); #SETUP random.seed(options.seed if options.seed else None) + conn = common.s3.main + if options.outfile: OUTFILE = open(options.outfile, 'w') elif common.config.file_generation.url_file: @@ -102,31 +102,23 @@ def main(): else: OUTFILE = sys.stdout - if options.access_key and options.secret_key: - host = options.host if options.host else common.config.s3.defaults.host - conn = connect_s3(host, options.access_key, options.secret_key) - else: - conn = common.s3.main - if options.bucket: - bucket = get_bucket(conn, options.bucket) + bucket = conn.create_bucket(options.bucket) else: bucket = common.get_new_bucket() - urls = [] - + keys = [] print >> OUTFILE, 'bucket: %s' % bucket.name print >> sys.stderr, 'setup complete, generating files' for profile in common.config.file_generation.groups: seed = random.random() - urls += generate_objects(bucket, profile[0], profile[1], profile[2], seed, options.checksum) - print >> sys.stderr, 'finished sending files. generating urls and sending to S3' + files = get_random_files(profile[0], profile[1], profile[2], seed) + keys += upload_objects(bucket, files, seed) + + print >> sys.stderr, 'finished sending files. generating urls' + for key in keys: + print >> OUTFILE, key.generate_url(30758400) #valid for 1 year - url_string = '\n'.join(urls) - url_key = Key(bucket) - url_key.key = 'urls' - url_key.set_contents_from_string(url_string) - print >> OUTFILE, url_string print >> sys.stderr, 'done' From 99ef831a0d922c536e0f00067655491fd1b0be53 Mon Sep 17 00:00:00 2001 From: Steven Berler Date: Fri, 8 Jul 2011 14:52:22 -0700 Subject: [PATCH 09/10] dho qa: update rand readwrite for file generator Now works correctly again after the changes to the random file generator. Also now gets the true size of files when generating using a stddev != 0 (rather than just assuming all files were the mean size). --- rand_readwrite.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/rand_readwrite.py b/rand_readwrite.py index e75cba7..eb72e65 100755 --- a/rand_readwrite.py +++ b/rand_readwrite.py @@ -67,18 +67,22 @@ def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file with gevent.Timeout(seconds, False): while (1): r = random.randint(0, 65535) + r2 = r + if file_name_seed != None: + r2 = file_name_seed + + files = generate_objects.get_random_files(quantity, 1024*file_size, 1024*file_stddev, r) + start = time.clock() - generate_objects.generate_objects(bucket, quantity, 1024*file_size, 1024*file_stddev, r, - name_seed=file_name_seed - ) + keys = generate_objects.upload_objects(bucket, files, r2) end = time.clock() elapsed = end - start + if queue: queue.put(Result(name, type=Result.TYPE_WRITER, time=elapsed, - size=file_size*quantity, - details="stddev={stddev}".format(stddev=file_stddev) + size=sum([(file.size/1024) for file in files]), ) ) From 90d0f065229422811f02f0a4a9adeb543c0ec1b4 Mon Sep 17 00:00:00 2001 From: Wesley Spikes Date: Fri, 8 Jul 2011 15:04:22 -0700 Subject: [PATCH 10/10] Removing dead code (generate_objects:connect_s3) --- generate_objects.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/generate_objects.py b/generate_objects.py index 2c2dab0..5671837 100755 --- a/generate_objects.py +++ b/generate_objects.py @@ -23,17 +23,6 @@ def parse_opts(): return parser.parse_args() -def connect_s3(host, access_key, secret_key): - conn = S3Connection( - calling_format = OrdinaryCallingFormat(), - is_secure = False, - host = host, - aws_access_key_id = access_key, - aws_secret_access_key = secret_key) - - return conn - - def get_random_files(quantity, mean, stddev, seed): """Create file-like objects with pseudorandom contents. IN: