forked from TrueCloudLab/s3-tests
Whitespace cleanup.
This commit is contained in:
parent
e91a3aea66
commit
3fe595abd4
6 changed files with 58 additions and 52 deletions
|
@ -1,5 +1,5 @@
|
|||
from boto.auth_handler import AuthHandler
|
||||
|
||||
class AnonymousAuthHandler(AuthHandler):
|
||||
def add_auth(self, http_request, **kwargs):
|
||||
return # Nothing to do for anonymous access!
|
||||
def add_auth(self, http_request, **kwargs):
|
||||
return # Nothing to do for anonymous access!
|
||||
|
|
43
common.py
43
common.py
|
@ -15,7 +15,7 @@ bucket_counter = itertools.count(1)
|
|||
def choose_bucket_prefix(template, max_len=30):
|
||||
"""
|
||||
Choose a prefix for our test buckets, so they're easy to identify.
|
||||
|
||||
|
||||
Use template and feed it more and more random filler, until it's
|
||||
as long as possible but still below max_len.
|
||||
"""
|
||||
|
@ -23,13 +23,13 @@ def choose_bucket_prefix(template, max_len=30):
|
|||
random.choice(string.ascii_lowercase + string.digits)
|
||||
for c in range(255)
|
||||
)
|
||||
|
||||
|
||||
while rand:
|
||||
s = template.format(random=rand)
|
||||
if len(s) <= max_len:
|
||||
return s
|
||||
rand = rand[:-1]
|
||||
|
||||
|
||||
raise RuntimeError(
|
||||
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
|
||||
template=template,
|
||||
|
@ -64,14 +64,14 @@ def nuke_prefixed_buckets():
|
|||
raise
|
||||
# seems like we're not the owner of the bucket; ignore
|
||||
pass
|
||||
|
||||
|
||||
print 'Done with cleanup of test buckets.'
|
||||
|
||||
def setup():
|
||||
global s3, config, prefix
|
||||
s3.clear()
|
||||
config.clear()
|
||||
|
||||
|
||||
try:
|
||||
path = os.environ['S3TEST_CONF']
|
||||
except KeyError:
|
||||
|
@ -83,7 +83,7 @@ def setup():
|
|||
g = yaml.safe_load_all(f)
|
||||
for new in g:
|
||||
config.update(bunch.bunchify(new))
|
||||
|
||||
|
||||
# These 3 should always be present.
|
||||
if not config.has_key('s3'):
|
||||
raise RuntimeError('Your config file is missing the s3 section!');
|
||||
|
@ -91,7 +91,7 @@ def setup():
|
|||
raise RuntimeError('Your config file is missing the s3.defaults section!');
|
||||
if not config.has_key('fixtures'):
|
||||
raise RuntimeError('Your config file is missing the fixtures section!');
|
||||
|
||||
|
||||
if config.fixtures.has_key('bucket prefix'):
|
||||
template = config.fixtures['bucket prefix']
|
||||
else:
|
||||
|
@ -99,40 +99,40 @@ def setup():
|
|||
prefix = choose_bucket_prefix(template=template)
|
||||
if prefix == '':
|
||||
raise RuntimeError, "Empty Prefix! Aborting!"
|
||||
|
||||
|
||||
defaults = config.s3.defaults
|
||||
for section in config.s3.keys():
|
||||
if section == 'defaults':
|
||||
continue
|
||||
section_config = config.s3[section]
|
||||
|
||||
|
||||
kwargs = bunch.Bunch()
|
||||
conn_args = bunch.Bunch(
|
||||
port = 'port',
|
||||
host = 'host',
|
||||
is_secure = 'is_secure',
|
||||
access_key = 'aws_access_key_id',
|
||||
secret_key = 'aws_secret_access_key',
|
||||
port='port',
|
||||
host='host',
|
||||
is_secure='is_secure',
|
||||
access_key='aws_access_key_id',
|
||||
secret_key='aws_secret_access_key',
|
||||
)
|
||||
for cfg_key in conn_args.keys():
|
||||
conn_key = conn_args[cfg_key]
|
||||
|
||||
|
||||
if section_config.has_key(cfg_key):
|
||||
kwargs[conn_key] = section_config[cfg_key]
|
||||
elif defaults.has_key(cfg_key):
|
||||
kwargs[conn_key] = defaults[cfg_key]
|
||||
|
||||
kwargs[conn_key] = defaults[cfg_key]
|
||||
|
||||
conn = boto.s3.connection.S3Connection(
|
||||
# TODO support & test all variations
|
||||
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
|
||||
**kwargs
|
||||
)
|
||||
s3[section] = conn
|
||||
|
||||
|
||||
# WARNING! we actively delete all buckets we see with the prefix
|
||||
# we've chosen! Choose your prefix with care, and don't reuse
|
||||
# credentials!
|
||||
|
||||
|
||||
# We also assume nobody else is going to use buckets with that
|
||||
# prefix. This is racy but given enough randomness, should not
|
||||
# really fail.
|
||||
|
@ -141,7 +141,7 @@ def setup():
|
|||
def get_new_bucket(connection=None):
|
||||
"""
|
||||
Get a bucket that exists and is empty.
|
||||
|
||||
|
||||
Always recreates a bucket from scratch. This is useful to also
|
||||
reset ACLs and such.
|
||||
"""
|
||||
|
@ -158,5 +158,4 @@ def get_new_bucket(connection=None):
|
|||
return bucket
|
||||
|
||||
def teardown():
|
||||
nuke_prefixed_buckets()
|
||||
|
||||
nuke_prefixed_buckets()
|
||||
|
|
|
@ -11,8 +11,8 @@ import sys
|
|||
|
||||
def parse_opts():
|
||||
parser = OptionParser();
|
||||
parser.add_option('-O' , '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
|
||||
parser.add_option('-b' , '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET')
|
||||
parser.add_option('-O', '--outfile', help='write output to FILE. Defaults to STDOUT', metavar='FILE')
|
||||
parser.add_option('-b', '--bucket', dest='bucket', help='push objects to BUCKET', metavar='BUCKET')
|
||||
parser.add_option('--seed', dest='seed', help='optional seed for the random number generator')
|
||||
|
||||
return parser.parse_args()
|
||||
|
@ -42,7 +42,7 @@ def upload_objects(bucket, files, seed):
|
|||
list of boto S3 key objects
|
||||
"""
|
||||
keys = []
|
||||
name_generator = realistic.names(15, 4,seed=seed)
|
||||
name_generator = realistic.names(15, 4, seed=seed)
|
||||
|
||||
for fp in files:
|
||||
print >> sys.stderr, 'sending file with size %dB' % fp.size
|
||||
|
@ -113,4 +113,3 @@ if __name__ == '__main__':
|
|||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
common.teardown()
|
||||
|
||||
|
|
|
@ -12,9 +12,10 @@ import realistic
|
|||
import common
|
||||
|
||||
class Result:
|
||||
TYPE_NONE = 0
|
||||
TYPE_NONE = 0
|
||||
TYPE_READER = 1
|
||||
TYPE_WRITER = 2
|
||||
|
||||
def __init__(self, name, type=TYPE_NONE, time=0, success=True, size=0, details=''):
|
||||
self.name = name
|
||||
self.type = type
|
||||
|
@ -24,7 +25,7 @@ class Result:
|
|||
self.details = details
|
||||
|
||||
def __repr__(self):
|
||||
type_dict = {Result.TYPE_NONE : 'None', Result.TYPE_READER : 'Reader', Result.TYPE_WRITER : 'Writer'}
|
||||
type_dict = {Result.TYPE_NONE: 'None', Result.TYPE_READER: 'Reader', Result.TYPE_WRITER: 'Writer'}
|
||||
type_s = type_dict[self.type]
|
||||
if self.success:
|
||||
status = 'Success'
|
||||
|
@ -37,7 +38,7 @@ class Result:
|
|||
name=self.name,
|
||||
size=self.size,
|
||||
time=self.time,
|
||||
mbps=(self.size/self.time/1024.0),
|
||||
mbps=self.size / self.time / 1024.0,
|
||||
details=self.details
|
||||
)
|
||||
|
||||
|
@ -52,13 +53,15 @@ def reader(seconds, bucket, name=None, queue=None):
|
|||
end = time.clock()
|
||||
elapsed = end - start
|
||||
if queue:
|
||||
queue.put(Result(name,
|
||||
type=Result.TYPE_READER,
|
||||
time=elapsed,
|
||||
success=fp.valid(),
|
||||
size=(fp.size/1024)
|
||||
queue.put(
|
||||
Result(
|
||||
name,
|
||||
type=Result.TYPE_READER,
|
||||
time=elapsed,
|
||||
success=fp.valid(),
|
||||
size=fp.size / 1024,
|
||||
),
|
||||
)
|
||||
)
|
||||
count += 1
|
||||
if count == 0:
|
||||
gevent.sleep(1)
|
||||
|
@ -71,7 +74,12 @@ def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file
|
|||
if file_name_seed != None:
|
||||
r2 = file_name_seed
|
||||
|
||||
files = generate_objects.get_random_files(quantity, 1024*file_size, 1024*file_stddev, r)
|
||||
files = generate_objects.get_random_files(
|
||||
quantity=quantity,
|
||||
mean=1024 * file_size,
|
||||
stddev=1024 * file_stddev,
|
||||
seed=r,
|
||||
)
|
||||
|
||||
start = time.clock()
|
||||
generate_objects.upload_objects(bucket, files, r2)
|
||||
|
@ -79,7 +87,7 @@ def writer(seconds, bucket, name=None, queue=None, quantity=1, file_size=1, file
|
|||
elapsed = end - start
|
||||
|
||||
if queue:
|
||||
queue.put(Result(name,
|
||||
queue.put(Result(name,
|
||||
type=Result.TYPE_WRITER,
|
||||
time=elapsed,
|
||||
size=sum([(file.size/1024) for file in files]),
|
||||
|
@ -108,16 +116,16 @@ def parse_options():
|
|||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
# parse options
|
||||
# parse options
|
||||
(options, args) = parse_options()
|
||||
|
||||
|
||||
try:
|
||||
# setup
|
||||
common.setup()
|
||||
bucket = common.get_new_bucket()
|
||||
print "Created bucket: {name}".format(name=bucket.name)
|
||||
r = None
|
||||
if (options.rewrite):
|
||||
if (options.rewrite):
|
||||
r = random.randint(0, 65535)
|
||||
q = gevent.queue.Queue()
|
||||
|
||||
|
@ -189,4 +197,3 @@ def main():
|
|||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ def files(mean, stddev, seed=None):
|
|||
re-download the object and place the contents into a file called
|
||||
``foo``, the following should print two identical lines:
|
||||
|
||||
python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo
|
||||
python -c 'import sys, hashlib; data=sys.stdin.read(); print hashlib.md5(data[:-16]).hexdigest(); print "".join("%02x" % ord(c) for c in data[-16:])' <foo
|
||||
|
||||
Except for objects shorter than 16 bytes, where the second line
|
||||
will be proportionally shorter.
|
||||
|
|
17
test_s3.py
17
test_s3.py
|
@ -200,7 +200,7 @@ def check_grants(got, want):
|
|||
eq(len(got), len(want))
|
||||
got = sorted(got, key=operator.attrgetter('id'))
|
||||
want = sorted(want, key=operator.itemgetter('id'))
|
||||
for g,w in zip(got, want):
|
||||
for g, w in zip(got, want):
|
||||
w = dict(w)
|
||||
eq(g.permission, w.pop('permission'))
|
||||
eq(g.id, w.pop('id'))
|
||||
|
@ -944,7 +944,7 @@ def generate_random(mb_size):
|
|||
s = ''
|
||||
left = mb_size - x
|
||||
this_part_size = min(left, part_size_mb)
|
||||
for y in range(this_part_size * mb / chunk):
|
||||
for y in range(this_part_size * mb / chunk):
|
||||
s = s + strpart
|
||||
yield s
|
||||
if (x == mb_size):
|
||||
|
@ -989,9 +989,10 @@ def test_list_multipart_upload():
|
|||
|
||||
def _simple_http_req_100_cont(host, port, method, resource):
|
||||
req = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
|
||||
method = method,
|
||||
resource = resource,
|
||||
host = host)
|
||||
method=method,
|
||||
resource=resource,
|
||||
host=host,
|
||||
)
|
||||
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(5)
|
||||
|
@ -1014,7 +1015,7 @@ def _simple_http_req_100_cont(host, port, method, resource):
|
|||
def test_100_continue():
|
||||
bucket = get_new_bucket()
|
||||
objname = 'testobj'
|
||||
resource = '/{bucket}/{obj}'.format(bucket = bucket.name, obj = objname)
|
||||
resource = '/{bucket}/{obj}'.format(bucket=bucket.name, obj=objname)
|
||||
|
||||
status = _simple_http_req_100_cont(s3.main.host, s3.main.port, 'PUT', resource)
|
||||
eq(status, '403')
|
||||
|
@ -1075,7 +1076,7 @@ def _test_atomic_write(file_size):
|
|||
# create <file_size> file of A's
|
||||
fp_a = FakeFile(file_size, 'A')
|
||||
key.set_contents_from_file(fp_a)
|
||||
|
||||
|
||||
# verify A's
|
||||
_verify_atomic_key_data(key, file_size, 'A')
|
||||
|
||||
|
@ -1102,7 +1103,7 @@ def _test_atomic_dual_write(file_size):
|
|||
bucket = get_new_bucket()
|
||||
objname = 'testobj'
|
||||
key = bucket.new_key(objname)
|
||||
|
||||
|
||||
# get a second key object (for the same key)
|
||||
# so both can be writing without interfering
|
||||
key2 = bucket.new_key(objname)
|
||||
|
|
Loading…
Reference in a new issue