s3tests: some more multiregion changes

Now creating a connection per region for each user, can access
master and secondaries, and set a default region.
No longer using a specific region per user, as it doesn't
make sense.

Signed-off-by: Yehuda Sadeh <yehuda@inktank.com>
This commit is contained in:
Yehuda Sadeh 2013-07-26 10:46:56 -07:00
parent 99d3b4928d
commit 0fb067de30
2 changed files with 56 additions and 33 deletions

View file

@ -144,10 +144,32 @@ class RegionsInfo:
else: else:
self.secondaries.append(region_config) self.secondaries.append(region_config)
def get(self, name): def get(self, name):
return self.m[name]; return self.m[name]
def get(self):
return self.m
def iteritems(self):
return self.m.iteritems()
regions = RegionsInfo() regions = RegionsInfo()
class RegionsConn:
def __init__(self):
self.m = bunch.Bunch()
self.default = None
self.master = None
self.secondaries = []
def add(self, name, conn):
self.m[name] = conn
if not self.default:
self.default = conn
if (conn.conf.is_master):
self.master = conn
else:
self.secondaries.append(conn)
# nosetests --processes=N with N>1 is safe # nosetests --processes=N with N>1 is safe
_multiprocess_can_split_ = True _multiprocess_can_split_ = True
@ -193,11 +215,8 @@ def setup():
if type_ != 's3': if type_ != 's3':
continue continue
try: if len(regions.get()) == 0:
region_name = cfg.get(section, 'region') regions.add("default", TargetConfig(cfg, section))
region_config = regions.get(region_name)
except ConfigParser.NoOptionError:
region_config = TargetConfig(cfg, section)
config[name] = bunch.Bunch() config[name] = bunch.Bunch()
for var in [ for var in [
@ -209,17 +228,21 @@ def setup():
config[name][var] = cfg.get(section, var) config[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError: except ConfigParser.NoOptionError:
pass pass
targets[name] = RegionsConn()
for (k, conf) in regions.iteritems():
conn = boto.s3.connection.S3Connection( conn = boto.s3.connection.S3Connection(
aws_access_key_id=cfg.get(section, 'access_key'), aws_access_key_id=cfg.get(section, 'access_key'),
aws_secret_access_key=cfg.get(section, 'secret_key'), aws_secret_access_key=cfg.get(section, 'secret_key'),
is_secure=region_config.is_secure, is_secure=conf.is_secure,
port=region_config.port, port=conf.port,
host=region_config.host, host=conf.host,
# TODO test vhost calling format # TODO test vhost calling format
calling_format=region_config.calling_format, calling_format=conf.calling_format,
) )
s3[name] = conn targets[name].add(k, TargetConnection(conf, conn))
targets[name] = TargetConnection(region_config, conn) s3[name] = targets[name].default.connection
# WARNING! we actively delete all buckets we see with the prefix # WARNING! we actively delete all buckets we see with the prefix
# we've chosen! Choose your prefix with care, and don't reuse # we've chosen! Choose your prefix with care, and don't reuse
@ -262,7 +285,7 @@ def get_new_bucket(target=None, name=None, headers=None):
reset ACLs and such. reset ACLs and such.
""" """
if target is None: if target is None:
target = targets.main target = targets.main.default
connection = target.connection connection = target.connection
if name is None: if name is None:
name = get_new_bucket_name() name = get_new_bucket_name()

View file

@ -753,7 +753,7 @@ def test_object_write_to_nonexist_bucket():
def test_bucket_create_delete(): def test_bucket_create_delete():
name = '{prefix}foo'.format(prefix=get_prefix()) name = '{prefix}foo'.format(prefix=get_prefix())
print 'Trying bucket {name!r}'.format(name=name) print 'Trying bucket {name!r}'.format(name=name)
bucket = get_new_bucket(targets.main, name) bucket = get_new_bucket(targets.main.default, name)
# make sure it's actually there # make sure it's actually there
s3.main.get_bucket(bucket.name) s3.main.get_bucket(bucket.name)
bucket.delete() bucket.delete()
@ -2312,7 +2312,7 @@ def check_bad_bucket_name(name):
Attempt to create a bucket with a specified name, and confirm Attempt to create a bucket with a specified name, and confirm
that the request fails because of an invalid bucket name. that the request fails because of an invalid bucket name.
""" """
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, name) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name)
eq(e.status, 400) eq(e.status, 400)
eq(e.reason, 'Bad Request') eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidBucketName') eq(e.error_code, 'InvalidBucketName')
@ -2338,7 +2338,7 @@ def test_bucket_create_naming_bad_starts_nonalpha():
def test_bucket_create_naming_bad_short_empty(): def test_bucket_create_naming_bad_short_empty():
# bucket creates where name is empty look like PUTs to the parent # bucket creates where name is empty look like PUTs to the parent
# resource (with slash), hence their error response is different # resource (with slash), hence their error response is different
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, '') e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, '')
eq(e.status, 405) eq(e.status, 405)
eq(e.reason, 'Method Not Allowed') eq(e.reason, 'Method Not Allowed')
eq(e.error_code, 'MethodNotAllowed') eq(e.error_code, 'MethodNotAllowed')
@ -2385,7 +2385,7 @@ def check_good_bucket_name(name, _prefix=None):
# should be very rare # should be very rare
if _prefix is None: if _prefix is None:
_prefix = get_prefix() _prefix = get_prefix()
get_new_bucket(targets.main, '{prefix}{name}'.format( get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=_prefix, prefix=_prefix,
name=name, name=name,
)) ))
@ -2399,7 +2399,7 @@ def _test_bucket_create_naming_good_long(length):
prefix = get_prefix() prefix = get_prefix()
assert len(prefix) < 255 assert len(prefix) < 255
num = length - len(prefix) num = length - len(prefix)
get_new_bucket(targets.main, '{prefix}{name}'.format( get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=prefix, prefix=prefix,
name=num*'a', name=num*'a',
)) ))
@ -2474,7 +2474,7 @@ def test_bucket_list_long_name():
prefix = get_prefix() prefix = get_prefix()
length = 251 length = 251
num = length - len(prefix) num = length - len(prefix)
bucket = get_new_bucket(targets.main, '{prefix}{name}'.format( bucket = get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=prefix, prefix=prefix,
name=num*'a', name=num*'a',
)) ))
@ -2572,9 +2572,9 @@ def test_bucket_create_naming_dns_dash_dot():
@attr(operation='re-create') @attr(operation='re-create')
@attr(assertion='idempotent success') @attr(assertion='idempotent success')
def test_bucket_create_exists(): def test_bucket_create_exists():
bucket = get_new_bucket(targets.main) bucket = get_new_bucket(targets.main.default)
# REST idempotency means this should be a nop # REST idempotency means this should be a nop
get_new_bucket(targets.main, bucket.name) get_new_bucket(targets.main.default, bucket.name)
@attr(resource='bucket') @attr(resource='bucket')
@ -2585,7 +2585,7 @@ def test_bucket_create_exists_nonowner():
# Names are shared across a global namespace. As such, no two # Names are shared across a global namespace. As such, no two
# users can create a bucket with that same name. # users can create a bucket with that same name.
bucket = get_new_bucket() bucket = get_new_bucket()
e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt, bucket.name) e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt.default, bucket.name)
eq(e.status, 409) eq(e.status, 409)
eq(e.reason, 'Conflict') eq(e.reason, 'Conflict')
eq(e.error_code, 'BucketAlreadyExists') eq(e.error_code, 'BucketAlreadyExists')
@ -2908,7 +2908,7 @@ def test_object_acl_canned_authenticatedread():
@attr(operation='acl bucket-owner-read') @attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values') @attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerread(): def test_object_acl_canned_bucketownerread():
bucket = get_new_bucket(targets.main) bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write') bucket.set_acl('public-read-write')
key = s3.alt.get_bucket(bucket.name).new_key('foo') key = s3.alt.get_bucket(bucket.name).new_key('foo')
@ -2952,7 +2952,7 @@ def test_object_acl_canned_bucketownerread():
@attr(operation='acl bucket-owner-read') @attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values') @attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerfullcontrol(): def test_object_acl_canned_bucketownerfullcontrol():
bucket = get_new_bucket(targets.main) bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write') bucket.set_acl('public-read-write')
key = s3.alt.get_bucket(bucket.name).new_key('foo') key = s3.alt.get_bucket(bucket.name).new_key('foo')
@ -3461,7 +3461,7 @@ def test_object_header_acl_grants():
@attr('fails_on_dho') @attr('fails_on_dho')
def test_bucket_header_acl_grants(): def test_bucket_header_acl_grants():
headers = _get_acl_header() headers = _get_acl_header()
bucket = get_new_bucket(targets.main, get_prefix(), headers) bucket = get_new_bucket(targets.main.default, get_prefix(), headers)
policy = bucket.get_acl() policy = bucket.get_acl()
check_grants( check_grants(
@ -3596,7 +3596,7 @@ def test_bucket_acl_revoke_all():
@attr('fails_on_rgw') @attr('fails_on_rgw')
def test_logging_toggle(): def test_logging_toggle():
bucket = get_new_bucket() bucket = get_new_bucket()
log_bucket = get_new_bucket(targets.main, bucket.name + '-log') log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log')
log_bucket.set_as_logging_target() log_bucket.set_as_logging_target()
bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name) bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name)
bucket.disable_logging() bucket.disable_logging()
@ -3908,7 +3908,7 @@ def test_bucket_recreate_not_overriding():
names = [e.name for e in list(li)] names = [e.name for e in list(li)]
eq(names, key_names) eq(names, key_names)
bucket2 = get_new_bucket(targets.main, bucket.name) bucket2 = get_new_bucket(targets.main.default, bucket.name)
li = bucket.list() li = bucket.list()
@ -4001,7 +4001,7 @@ def test_object_copy_diff_bucket():
@attr(operation='copy from an inaccessible bucket') @attr(operation='copy from an inaccessible bucket')
@attr(assertion='fails w/AttributeError') @attr(assertion='fails w/AttributeError')
def test_object_copy_not_owned_bucket(): def test_object_copy_not_owned_bucket():
buckets = [get_new_bucket(), get_new_bucket(targets.alt)] buckets = [get_new_bucket(), get_new_bucket(targets.alt.default)]
print repr(buckets[1]) print repr(buckets[1])
key = buckets[0].new_key('foo123bar') key = buckets[0].new_key('foo123bar')
key.set_contents_from_string('foo') key.set_contents_from_string('foo')