mirror of
https://github.com/ceph/s3-tests.git
synced 2024-12-24 19:25:23 +00:00
Adding common, and a sample config.yml
Introduces a new dependancy on PyYAML
This commit is contained in:
parent
d81e2d40e6
commit
82693cea22
4 changed files with 224 additions and 0 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -9,3 +9,5 @@
|
|||
*.pyo
|
||||
|
||||
/virtualenv
|
||||
|
||||
config.yml
|
||||
|
|
162
common.py
Normal file
162
common.py
Normal file
|
@ -0,0 +1,162 @@
|
|||
import boto.s3.connection
|
||||
import bunch
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import yaml
|
||||
|
||||
s3 = bunch.Bunch()
|
||||
config = bunch.Bunch()
|
||||
prefix = ''
|
||||
|
||||
bucket_counter = itertools.count(1)
|
||||
|
||||
def choose_bucket_prefix(template, max_len=30):
|
||||
"""
|
||||
Choose a prefix for our test buckets, so they're easy to identify.
|
||||
|
||||
Use template and feed it more and more random filler, until it's
|
||||
as long as possible but still below max_len.
|
||||
"""
|
||||
rand = ''.join(
|
||||
random.choice(string.ascii_lowercase + string.digits)
|
||||
for c in range(255)
|
||||
)
|
||||
|
||||
while rand:
|
||||
s = template.format(random=rand)
|
||||
if len(s) <= max_len:
|
||||
return s
|
||||
rand = rand[:-1]
|
||||
|
||||
raise RuntimeError(
|
||||
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
|
||||
template=template,
|
||||
),
|
||||
)
|
||||
|
||||
def nuke_prefixed_buckets():
|
||||
for name, conn in s3.items():
|
||||
print 'Cleaning buckets from connection {name}'.format(name=name)
|
||||
for bucket in conn.get_all_buckets():
|
||||
if bucket.name.startswith(prefix):
|
||||
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
|
||||
try:
|
||||
bucket.set_canned_acl('private')
|
||||
for key in bucket.list():
|
||||
print 'Cleaning bucket {bucket} key {key}'.format(
|
||||
bucket=bucket,
|
||||
key=key,
|
||||
)
|
||||
key.set_canned_acl('private')
|
||||
key.delete()
|
||||
bucket.delete()
|
||||
except boto.exception.S3ResponseError as e:
|
||||
# TODO workaround for buggy rgw that fails to send
|
||||
# error_code, remove
|
||||
if (e.status == 403
|
||||
and e.error_code is None
|
||||
and e.body == ''):
|
||||
e.error_code = 'AccessDenied'
|
||||
if e.error_code != 'AccessDenied':
|
||||
print 'GOT UNWANTED ERROR', e.error_code
|
||||
raise
|
||||
# seems like we're not the owner of the bucket; ignore
|
||||
pass
|
||||
|
||||
print 'Done with cleanup of test buckets.'
|
||||
|
||||
def setup():
|
||||
global s3, config, prefix
|
||||
s3.clear()
|
||||
config.clear()
|
||||
|
||||
try:
|
||||
path = os.environ['S3TEST_CONF']
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
'To run tests, point environment '
|
||||
+ 'variable S3TEST_CONF to a config file.',
|
||||
)
|
||||
with file(path) as f:
|
||||
g = yaml.safe_load_all(f)
|
||||
for new in g:
|
||||
config.update(bunch.bunchify(new))
|
||||
|
||||
# These 3 should always be present.
|
||||
if not config.has_key('s3'):
|
||||
raise RuntimeError('Your config file is missing the s3 section!');
|
||||
if not config.s3.has_key('defaults'):
|
||||
raise RuntimeError('Your config file is missing the s3.defaults section!');
|
||||
if not config.has_key('fixtures'):
|
||||
raise RuntimeError('Your config file is missing the fixtures section!');
|
||||
|
||||
if config.fixtures.has_key('bucket prefix'):
|
||||
template = config.fixtures['bucket prefix']
|
||||
else:
|
||||
template = 'test-{random}-'
|
||||
prefix = choose_bucket_prefix(template=template)
|
||||
if prefix == '':
|
||||
raise RuntimeError, "Empty Prefix! Aborting!"
|
||||
|
||||
defaults = config.s3.defaults
|
||||
for section in config.s3.keys():
|
||||
if section == 'defaults':
|
||||
continue
|
||||
section_config = config.s3[section]
|
||||
|
||||
kwargs = bunch.Bunch()
|
||||
conn_args = bunch.Bunch(
|
||||
port = 'port',
|
||||
host = 'host',
|
||||
is_secure = 'is_secure',
|
||||
access_key = 'aws_access_key_id',
|
||||
secret_key = 'aws_secret_access_key',
|
||||
)
|
||||
for cfg_key in conn_args.keys():
|
||||
conn_key = conn_args[cfg_key]
|
||||
|
||||
if section_config.has_key(cfg_key):
|
||||
kwargs[conn_key] = section_config[cfg_key]
|
||||
elif defaults.has_key(cfg_key):
|
||||
kwargs[conn_key] = defaults[cfg_key]
|
||||
|
||||
conn = boto.s3.connection.S3Connection(
|
||||
# TODO support & test all variations
|
||||
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
|
||||
**kwargs
|
||||
)
|
||||
s3[section] = conn
|
||||
|
||||
# WARNING! we actively delete all buckets we see with the prefix
|
||||
# we've chosen! Choose your prefix with care, and don't reuse
|
||||
# credentials!
|
||||
|
||||
# We also assume nobody else is going to use buckets with that
|
||||
# prefix. This is racy but given enough randomness, should not
|
||||
# really fail.
|
||||
nuke_prefixed_buckets()
|
||||
|
||||
def get_new_bucket(connection=None):
|
||||
"""
|
||||
Get a bucket that exists and is empty.
|
||||
|
||||
Always recreates a bucket from scratch. This is useful to also
|
||||
reset ACLs and such.
|
||||
"""
|
||||
if connection is None:
|
||||
connection = s3.main
|
||||
name = '{prefix}{num}'.format(
|
||||
prefix=prefix,
|
||||
num=next(bucket_counter),
|
||||
)
|
||||
# the only way for this to fail with a pre-existing bucket is if
|
||||
# someone raced us between setup nuke_prefixed_buckets and here;
|
||||
# ignore that as astronomically unlikely
|
||||
bucket = connection.create_bucket(name)
|
||||
return bucket
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
setup()
|
59
config.yml.SAMPLE
Normal file
59
config.yml.SAMPLE
Normal file
|
@ -0,0 +1,59 @@
|
|||
fixtures:
|
||||
## All the buckets created will start with this prefix;
|
||||
## {random} will be filled with random characters to pad
|
||||
## the prefix to 30 characters long, and avoid collisions
|
||||
bucket prefix: YOURNAMEHERE-{random}-
|
||||
|
||||
file_generation:
|
||||
groups:
|
||||
## File generation works by creating N groups of files. Each group of
|
||||
## files is defined by three elements: number of files, avg(filesize),
|
||||
## and stddev(filesize) -- in that order.
|
||||
- [1, 2, 3]
|
||||
- [4, 5, 6]
|
||||
|
||||
s3:
|
||||
## This section contains all the connection information
|
||||
|
||||
defaults:
|
||||
## This section contains the defaults for all of the other connections
|
||||
## below. You can also place these variables directly there.
|
||||
|
||||
## Replace with e.g. "localhost" to run against local software
|
||||
host: s3.amazonaws.com
|
||||
|
||||
## Uncomment the port to use soemthing other than 80
|
||||
# port: 8080
|
||||
|
||||
## Say "no" to disable TLS.
|
||||
is_secure: yes
|
||||
|
||||
## The tests assume two accounts are defined, "main" and "alt". You
|
||||
## may add other connections to be instantianted as well, however
|
||||
## any additional ones will not be used unless your tests use them.
|
||||
|
||||
main:
|
||||
|
||||
## The User ID that the S3 provider gives you. For AWS, this is
|
||||
## typically a 64-char hexstring.
|
||||
user_id: AWS_USER_ID
|
||||
|
||||
## Display name typically looks more like a unix login, "jdoe" etc
|
||||
display_name: AWS_DISPLAY_NAME
|
||||
|
||||
## The email for this account.
|
||||
email: AWS_EMAIL
|
||||
|
||||
## Replace these with your access keys.
|
||||
access_key: AWS_ACCESS_KEY
|
||||
secret_key: AWS_SECRET_KEY
|
||||
|
||||
alt:
|
||||
## Another user accout, used for ACL-related tests.
|
||||
|
||||
user_id: AWS_USER_ID
|
||||
display_name: AWS_DISPLAY_NAME
|
||||
email: AWS_EMAIL
|
||||
access_key: AWS_ACCESS_KEY
|
||||
secret_key: AWS_SECRET_KEY
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
PyYAML
|
||||
nose >=1.0.0
|
||||
boto >=2.0b4
|
||||
bunch >=1.0.0
|
||||
|
|
Loading…
Reference in a new issue