forked from TrueCloudLab/s3-tests
Compare commits
4 commits
Author | SHA1 | Date | |
---|---|---|---|
|
b518fd2022 | ||
|
f26be5eff7 | ||
|
d9da7172c1 | ||
|
53e6f52d86 |
5 changed files with 85 additions and 15 deletions
|
@ -41,5 +41,4 @@ virtualenv --no-site-packages --distribute virtualenv
|
|||
# easy_install, and we really wanted pip; next line will fail if pip
|
||||
# requirements.txt does not match setup.py requirements -- sucky but
|
||||
# good enough for now
|
||||
./virtualenv/bin/python setup.py develop \
|
||||
--allow-hosts None
|
||||
./virtualenv/bin/python setup.py develop
|
||||
|
|
|
@ -26,6 +26,9 @@ def get_prefix():
|
|||
assert prefix is not None
|
||||
return prefix
|
||||
|
||||
def is_slow_backend():
|
||||
return slow_backend
|
||||
|
||||
def choose_bucket_prefix(template, max_len=30):
|
||||
"""
|
||||
Choose a prefix for our test buckets, so they're easy to identify.
|
||||
|
@ -237,6 +240,7 @@ def setup():
|
|||
|
||||
global prefix
|
||||
global targets
|
||||
global slow_backend
|
||||
|
||||
try:
|
||||
template = cfg.get('fixtures', 'bucket prefix')
|
||||
|
@ -244,6 +248,11 @@ def setup():
|
|||
template = 'test-{random}-'
|
||||
prefix = choose_bucket_prefix(template=template)
|
||||
|
||||
try:
|
||||
slow_backend = cfg.getboolean('fixtures', 'slow backend')
|
||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
|
||||
slow_backend = False
|
||||
|
||||
# pull the default_region out, if it exists
|
||||
try:
|
||||
default_region = cfg.get('fixtures', 'default_region')
|
||||
|
|
|
@ -47,6 +47,7 @@ from . import (
|
|||
targets,
|
||||
config,
|
||||
get_prefix,
|
||||
is_slow_backend,
|
||||
)
|
||||
|
||||
|
||||
|
@ -4806,6 +4807,7 @@ def test_region_bucket_create_master_access_remove_secondary():
|
|||
e = assert_raises(boto.exception.S3ResponseError, master_conn.get_bucket, bucket.name)
|
||||
eq(e.status, 404)
|
||||
|
||||
|
||||
@attr(resource='object')
|
||||
@attr(method='copy')
|
||||
@attr(operation='copy object between regions, verify')
|
||||
|
@ -4821,8 +4823,12 @@ def test_region_copy_object():
|
|||
print 'created new dest bucket ', dest_bucket.name
|
||||
region_sync_meta(targets.main, dest)
|
||||
|
||||
for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024,
|
||||
100 * 1024 * 1024):
|
||||
if is_slow_backend():
|
||||
sizes = (1024, 10 * 1024 * 1024)
|
||||
else:
|
||||
sizes = (1024, 10 * 1024 * 1024, 100 * 1024 * 1024)
|
||||
|
||||
for file_size in sizes:
|
||||
for (k2, r) in targets.main.iteritems():
|
||||
if r == dest_conn:
|
||||
continue
|
||||
|
|
|
@ -21,7 +21,7 @@ def reader(bucket, worker_id, file_names, queue, rand):
|
|||
objname = rand.choice(file_names)
|
||||
key = bucket.new_key(objname)
|
||||
|
||||
fp = realistic.FileVerifier()
|
||||
fp = realistic.FileValidator()
|
||||
result = dict(
|
||||
type='r',
|
||||
bucket=bucket.name,
|
||||
|
@ -31,7 +31,7 @@ def reader(bucket, worker_id, file_names, queue, rand):
|
|||
|
||||
start = time.time()
|
||||
try:
|
||||
key.get_contents_to_file(fp)
|
||||
key.get_contents_to_file(fp._file)
|
||||
except gevent.GreenletExit:
|
||||
raise
|
||||
except Exception as e:
|
||||
|
@ -50,7 +50,7 @@ def reader(bucket, worker_id, file_names, queue, rand):
|
|||
end = time.time()
|
||||
|
||||
if not fp.valid():
|
||||
m='md5sum check failed start={s} ({se}) end={e} size={sz} obj={o}'.format(s=time.ctime(start), se=start, e=end, sz=fp.size, o=objname)
|
||||
m='md5sum check failed start={s} ({se}) end={e} size={sz} obj={o}'.format(s=time.ctime(start), se=start, e=end, sz=fp._file.tell(), o=objname)
|
||||
result.update(
|
||||
error=dict(
|
||||
msg=m,
|
||||
|
@ -63,13 +63,13 @@ def reader(bucket, worker_id, file_names, queue, rand):
|
|||
result.update(
|
||||
start=start,
|
||||
duration=int(round(elapsed * NANOSECOND)),
|
||||
chunks=fp.chunks,
|
||||
)
|
||||
queue.put(result)
|
||||
|
||||
def writer(bucket, worker_id, file_names, files, queue, rand):
|
||||
while True:
|
||||
fp = next(files)
|
||||
fp.seek(0)
|
||||
objname = rand.choice(file_names)
|
||||
key = bucket.new_key(objname)
|
||||
|
||||
|
@ -104,7 +104,6 @@ def writer(bucket, worker_id, file_names, files, queue, rand):
|
|||
result.update(
|
||||
start=start,
|
||||
duration=int(round(elapsed * NANOSECOND)),
|
||||
chunks=fp.last_chunks,
|
||||
)
|
||||
|
||||
queue.put(result)
|
||||
|
@ -189,7 +188,7 @@ def main():
|
|||
)
|
||||
q = gevent.queue.Queue()
|
||||
|
||||
|
||||
|
||||
# warmup - get initial set of files uploaded if there are any writers specified
|
||||
if config.readwrite.writers > 0:
|
||||
print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
|
||||
|
|
|
@ -12,6 +12,62 @@ import os
|
|||
NANOSECOND = int(1e9)
|
||||
|
||||
|
||||
def generate_file_contents(size):
|
||||
"""
|
||||
A helper function to generate binary contents for a given size, and
|
||||
calculates the md5 hash of the contents appending itself at the end of the
|
||||
blob.
|
||||
It uses sha1's hexdigest which is 40 chars long. So any binary generated
|
||||
should remove the last 40 chars from the blob to retrieve the original hash
|
||||
and binary so that validity can be proved.
|
||||
"""
|
||||
size = int(size)
|
||||
contents = os.urandom(size)
|
||||
content_hash = hashlib.sha1(contents).hexdigest()
|
||||
return contents + content_hash
|
||||
|
||||
|
||||
class FileValidator(object):
|
||||
|
||||
def __init__(self, f=None):
|
||||
self._file = tempfile.SpooledTemporaryFile()
|
||||
self.original_hash = None
|
||||
self.new_hash = None
|
||||
if f:
|
||||
f.seek(0)
|
||||
shutil.copyfileobj(f, self._file)
|
||||
|
||||
def valid(self):
|
||||
"""
|
||||
Returns True if this file looks valid. The file is valid if the end
|
||||
of the file has the md5 digest for the first part of the file.
|
||||
"""
|
||||
self._file.seek(0)
|
||||
contents = self._file.read()
|
||||
self.original_hash, binary = contents[-40:], contents[:-40]
|
||||
self.new_hash = hashlib.sha1(binary).hexdigest()
|
||||
if not self.new_hash == self.original_hash:
|
||||
print 'original hash: ', self.original_hash
|
||||
print 'new hash: ', self.new_hash
|
||||
print 'size: ', self._file.tell()
|
||||
return False
|
||||
return True
|
||||
|
||||
# XXX not sure if we need all of these
|
||||
def seek(self, offset, whence=os.SEEK_SET):
|
||||
self._file.seek(offset, whence)
|
||||
|
||||
def tell(self):
|
||||
return self._file.tell()
|
||||
|
||||
def read(self, size=-1):
|
||||
return self._file.read(size)
|
||||
|
||||
def write(self, data):
|
||||
self._file.write(data)
|
||||
self._file.seek(0)
|
||||
|
||||
|
||||
class RandomContentFile(object):
|
||||
def __init__(self, size, seed):
|
||||
self.size = size
|
||||
|
@ -99,7 +155,7 @@ class PrecomputedContentFile(object):
|
|||
self._file = tempfile.SpooledTemporaryFile()
|
||||
f.seek(0)
|
||||
shutil.copyfileobj(f, self._file)
|
||||
|
||||
|
||||
self.last_chunks = self.chunks = None
|
||||
self.seek(0)
|
||||
|
||||
|
@ -182,6 +238,7 @@ def files(mean, stddev, seed=None):
|
|||
break
|
||||
yield RandomContentFile(size=size, seed=rand.getrandbits(32))
|
||||
|
||||
|
||||
def files2(mean, stddev, seed=None, numfiles=10):
|
||||
"""
|
||||
Yields file objects with effectively random contents, where the
|
||||
|
@ -192,17 +249,17 @@ def files2(mean, stddev, seed=None, numfiles=10):
|
|||
stores `numfiles` files and yields them in a loop.
|
||||
"""
|
||||
# pre-compute all the files (and save with TemporaryFiles)
|
||||
rand_files = files(mean, stddev, seed)
|
||||
fs = []
|
||||
for _ in xrange(numfiles):
|
||||
f = next(rand_files)
|
||||
t = tempfile.SpooledTemporaryFile()
|
||||
shutil.copyfileobj(f, t)
|
||||
t.write(generate_file_contents(random.normalvariate(mean, stddev)))
|
||||
t.seek(0)
|
||||
fs.append(t)
|
||||
|
||||
while True:
|
||||
for f in fs:
|
||||
yield PrecomputedContentFile(f)
|
||||
yield f
|
||||
|
||||
|
||||
def names(mean, stddev, charset=None, seed=None):
|
||||
"""
|
||||
|
|
Loading…
Reference in a new issue