From bccf36587de0af0972afe673082d2de977c854fa Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 24 Jul 2013 11:51:26 -0700 Subject: [PATCH 01/16] requirements.txt: work around pip 1.4 issue Signed-off-by: Yehuda Sadeh --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3a7d63e..f81903b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,5 +6,5 @@ bunch >=1.0.0 gevent ==0.13.6 isodate >=0.4.4 requests ==0.14.0 -pytz +pytz >=2011k ordereddict From c40b1cd3484e8dc055f9493196437ebf3e24a902 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 24 Jul 2013 13:23:24 -0700 Subject: [PATCH 02/16] support region configuration Signed-off-by: Yehuda Sadeh --- s3tests/functional/__init__.py | 13 ++++++++++--- s3tests/functional/test_s3.py | 24 ++++++++++++------------ 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index 1e92c09..ce8ca59 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -88,12 +88,18 @@ def setup(): cfg.readfp(f) global prefix + global location try: template = cfg.get('fixtures', 'bucket prefix') except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): template = 'test-{random}-' prefix = choose_bucket_prefix(template=template) + try: + location = cfg.get('region main', 'name') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + location = '' + s3.clear() config.clear() calling_formats = dict( @@ -179,7 +185,7 @@ def get_new_bucket_name(): return name -def get_new_bucket(connection=None): +def get_new_bucket(connection=None, name=None, headers=None): """ Get a bucket that exists and is empty. @@ -188,9 +194,10 @@ def get_new_bucket(connection=None): """ if connection is None: connection = s3.main - name = get_new_bucket_name() + if name is None: + name = get_new_bucket_name() # the only way for this to fail with a pre-existing bucket is if # someone raced us between setup nuke_prefixed_buckets and here; # ignore that as astronomically unlikely - bucket = connection.create_bucket(name) + bucket = connection.create_bucket(name, location=location, headers=headers) return bucket diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index 7a8cd60..cf80f05 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -752,7 +752,7 @@ def test_object_write_to_nonexist_bucket(): def test_bucket_create_delete(): name = '{prefix}foo'.format(prefix=get_prefix()) print 'Trying bucket {name!r}'.format(name=name) - bucket = s3.main.create_bucket(name) + bucket = get_new_bucket(s3.main, name) # make sure it's actually there s3.main.get_bucket(bucket.name) bucket.delete() @@ -2311,7 +2311,7 @@ def check_bad_bucket_name(name): Attempt to create a bucket with a specified name, and confirm that the request fails because of an invalid bucket name. """ - e = assert_raises(boto.exception.S3ResponseError, s3.main.create_bucket, name) + e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, s3.main, name) eq(e.status, 400) eq(e.reason, 'Bad Request') eq(e.error_code, 'InvalidBucketName') @@ -2337,7 +2337,7 @@ def test_bucket_create_naming_bad_starts_nonalpha(): def test_bucket_create_naming_bad_short_empty(): # bucket creates where name is empty look like PUTs to the parent # resource (with slash), hence their error response is different - e = assert_raises(boto.exception.S3ResponseError, s3.main.create_bucket, '') + e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, s3.main, '') eq(e.status, 405) eq(e.reason, 'Method Not Allowed') eq(e.error_code, 'MethodNotAllowed') @@ -2384,7 +2384,7 @@ def check_good_bucket_name(name, _prefix=None): # should be very rare if _prefix is None: _prefix = get_prefix() - s3.main.create_bucket('{prefix}{name}'.format( + get_new_bucket(s3.main, '{prefix}{name}'.format( prefix=_prefix, name=name, )) @@ -2398,7 +2398,7 @@ def _test_bucket_create_naming_good_long(length): prefix = get_prefix() assert len(prefix) < 255 num = length - len(prefix) - s3.main.create_bucket('{prefix}{name}'.format( + get_new_bucket(s3.main, '{prefix}{name}'.format( prefix=prefix, name=num*'a', )) @@ -2473,7 +2473,7 @@ def test_bucket_list_long_name(): prefix = get_prefix() length = 251 num = length - len(prefix) - bucket = s3.main.create_bucket('{prefix}{name}'.format( + bucket = get_new_bucket(s3.main, '{prefix}{name}'.format( prefix=prefix, name=num*'a', )) @@ -2571,9 +2571,9 @@ def test_bucket_create_naming_dns_dash_dot(): @attr(operation='re-create') @attr(assertion='idempotent success') def test_bucket_create_exists(): - bucket = get_new_bucket() + bucket = get_new_bucket(s3.main) # REST idempotency means this should be a nop - s3.main.create_bucket(bucket.name) + get_new_bucket(s3.main, bucket.name) @attr(resource='bucket') @@ -2584,7 +2584,7 @@ def test_bucket_create_exists_nonowner(): # Names are shared across a global namespace. As such, no two # users can create a bucket with that same name. bucket = get_new_bucket() - e = assert_raises(boto.exception.S3CreateError, s3.alt.create_bucket, bucket.name) + e = assert_raises(boto.exception.S3CreateError, get_new_bucket, s3.alt, bucket.name) eq(e.status, 409) eq(e.reason, 'Conflict') eq(e.error_code, 'BucketAlreadyExists') @@ -3460,7 +3460,7 @@ def test_object_header_acl_grants(): @attr('fails_on_dho') def test_bucket_header_acl_grants(): headers = _get_acl_header() - bucket = s3.main.create_bucket(get_prefix(), headers=headers) + bucket = get_new_bucket(s3.main, get_prefix(), headers) policy = bucket.get_acl() check_grants( @@ -3595,7 +3595,7 @@ def test_bucket_acl_revoke_all(): @attr('fails_on_rgw') def test_logging_toggle(): bucket = get_new_bucket() - log_bucket = s3.main.create_bucket(bucket.name + '-log') + log_bucket = get_new_bucket(s3.main, bucket.name + '-log') log_bucket.set_as_logging_target() bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name) bucket.disable_logging() @@ -3907,7 +3907,7 @@ def test_bucket_recreate_not_overriding(): names = [e.name for e in list(li)] eq(names, key_names) - bucket2 = s3.main.create_bucket(bucket.name) + bucket2 = get_new_bucket(s3.main, bucket.name) li = bucket.list() From 232dd358177fafbd69d9c7b4847d8edccf2d6410 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 25 Jul 2013 14:13:34 -0700 Subject: [PATCH 03/16] add flexible multi-region configuration can now create a region-specific configuration: [region foo] api_name = ... host = ... port = ... and set that region to be used for specific connection: [s3 main] region = foo Signed-off-by: Yehuda Sadeh --- s3tests/functional/__init__.py | 109 +++++++++++++++++++++++---------- s3tests/functional/test_s3.py | 31 +++++----- 2 files changed, 93 insertions(+), 47 deletions(-) diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index ce8ca59..9dcbf07 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -9,10 +9,18 @@ import string s3 = bunch.Bunch() config = bunch.Bunch() +regions = bunch.Bunch() +targets = bunch.Bunch() # this will be assigned by setup() prefix = None +calling_formats = dict( + ordinary=boto.s3.connection.OrdinaryCallingFormat(), + subdomain=boto.s3.connection.SubdomainCallingFormat(), + vhost=boto.s3.connection.VHostCallingFormat(), + ) + def get_prefix(): assert prefix is not None return prefix @@ -71,6 +79,48 @@ def nuke_prefixed_buckets(prefix): print 'Done with cleanup of test buckets.' +class TargetConfig: + def __init__(self, cfg, section): + self.port = None + self.api_name = '' + self.is_master = False + self.is_secure = False + try: + self.api_name = cfg.get(section, 'api_name') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + pass + try: + self.port = cfg.getint(section, 'port') + except ConfigParser.NoOptionError: + pass + try: + self.host=cfg.get(section, 'host') + except ConfigParser.NoOptionError: + raise RuntimeError( + 'host not specified for section {s}'.format(s=section) + ) + try: + self.is_secure=cfg.getboolean(section, 'is_secure') + except ConfigParser.NoOptionError: + pass + + try: + raw_calling_format = cfg.get(section, 'calling_format') + except ConfigParser.NoOptionError: + raw_calling_format = 'ordinary' + + try: + self.calling_format = calling_formats[raw_calling_format] + except KeyError: + raise RuntimeError( + 'calling_format unknown: %r' % raw_calling_format + ) + +class TargetConnection: + def __init__(self, conf, conn): + self.conf = conf + self.connection = conn + # nosetests --processes=N with N>1 is safe _multiprocess_can_split_ = True @@ -88,25 +138,28 @@ def setup(): cfg.readfp(f) global prefix - global location + global targets + try: template = cfg.get('fixtures', 'bucket prefix') except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): template = 'test-{random}-' prefix = choose_bucket_prefix(template=template) - try: - location = cfg.get('region main', 'name') - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): - location = '' - s3.clear() config.clear() - calling_formats = dict( - ordinary=boto.s3.connection.OrdinaryCallingFormat(), - subdomain=boto.s3.connection.SubdomainCallingFormat(), - vhost=boto.s3.connection.VHostCallingFormat(), - ) + regions.clear() + + for section in cfg.sections(): + try: + (type_, name) = section.split(None, 1) + except ValueError: + continue + if type_ != 'region': + continue + region_conf = TargetConfig(cfg, section) + regions[name] = region_conf + for section in cfg.sections(): try: (type_, name) = section.split(None, 1) @@ -114,22 +167,12 @@ def setup(): continue if type_ != 's3': continue - try: - port = cfg.getint(section, 'port') - except ConfigParser.NoOptionError: - port = None try: - raw_calling_format = cfg.get(section, 'calling_format') + region_name = cfg.get(section, 'region') + region_config = regions[region_name] except ConfigParser.NoOptionError: - raw_calling_format = 'ordinary' - - try: - calling_format = calling_formats[raw_calling_format] - except KeyError: - raise RuntimeError( - 'calling_format unknown: %r' % raw_calling_format - ) + region_config = TargetConfig(cfg, section) config[name] = bunch.Bunch() for var in [ @@ -144,13 +187,14 @@ def setup(): conn = boto.s3.connection.S3Connection( aws_access_key_id=cfg.get(section, 'access_key'), aws_secret_access_key=cfg.get(section, 'secret_key'), - is_secure=cfg.getboolean(section, 'is_secure'), - port=port, - host=cfg.get(section, 'host'), + is_secure=region_config.is_secure, + port=region_config.port, + host=region_config.host, # TODO test vhost calling format - calling_format=calling_format, + calling_format=region_config.calling_format, ) s3[name] = conn + targets[name] = TargetConnection(region_config, conn) # WARNING! we actively delete all buckets we see with the prefix # we've chosen! Choose your prefix with care, and don't reuse @@ -185,19 +229,20 @@ def get_new_bucket_name(): return name -def get_new_bucket(connection=None, name=None, headers=None): +def get_new_bucket(target=None, name=None, headers=None): """ Get a bucket that exists and is empty. Always recreates a bucket from scratch. This is useful to also reset ACLs and such. """ - if connection is None: - connection = s3.main + if target is None: + target = targets.main + connection = target.connection if name is None: name = get_new_bucket_name() # the only way for this to fail with a pre-existing bucket is if # someone raced us between setup nuke_prefixed_buckets and here; # ignore that as astronomically unlikely - bucket = connection.create_bucket(name, location=location, headers=headers) + bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers) return bucket diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index cf80f05..641e415 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -40,6 +40,7 @@ from . import ( get_new_bucket, get_new_bucket_name, s3, + targets, config, get_prefix, ) @@ -752,7 +753,7 @@ def test_object_write_to_nonexist_bucket(): def test_bucket_create_delete(): name = '{prefix}foo'.format(prefix=get_prefix()) print 'Trying bucket {name!r}'.format(name=name) - bucket = get_new_bucket(s3.main, name) + bucket = get_new_bucket(targets.main, name) # make sure it's actually there s3.main.get_bucket(bucket.name) bucket.delete() @@ -2311,7 +2312,7 @@ def check_bad_bucket_name(name): Attempt to create a bucket with a specified name, and confirm that the request fails because of an invalid bucket name. """ - e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, s3.main, name) + e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, name) eq(e.status, 400) eq(e.reason, 'Bad Request') eq(e.error_code, 'InvalidBucketName') @@ -2337,7 +2338,7 @@ def test_bucket_create_naming_bad_starts_nonalpha(): def test_bucket_create_naming_bad_short_empty(): # bucket creates where name is empty look like PUTs to the parent # resource (with slash), hence their error response is different - e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, s3.main, '') + e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, '') eq(e.status, 405) eq(e.reason, 'Method Not Allowed') eq(e.error_code, 'MethodNotAllowed') @@ -2384,7 +2385,7 @@ def check_good_bucket_name(name, _prefix=None): # should be very rare if _prefix is None: _prefix = get_prefix() - get_new_bucket(s3.main, '{prefix}{name}'.format( + get_new_bucket(targets.main, '{prefix}{name}'.format( prefix=_prefix, name=name, )) @@ -2398,7 +2399,7 @@ def _test_bucket_create_naming_good_long(length): prefix = get_prefix() assert len(prefix) < 255 num = length - len(prefix) - get_new_bucket(s3.main, '{prefix}{name}'.format( + get_new_bucket(targets.main, '{prefix}{name}'.format( prefix=prefix, name=num*'a', )) @@ -2473,7 +2474,7 @@ def test_bucket_list_long_name(): prefix = get_prefix() length = 251 num = length - len(prefix) - bucket = get_new_bucket(s3.main, '{prefix}{name}'.format( + bucket = get_new_bucket(targets.main, '{prefix}{name}'.format( prefix=prefix, name=num*'a', )) @@ -2571,9 +2572,9 @@ def test_bucket_create_naming_dns_dash_dot(): @attr(operation='re-create') @attr(assertion='idempotent success') def test_bucket_create_exists(): - bucket = get_new_bucket(s3.main) + bucket = get_new_bucket(targets.main) # REST idempotency means this should be a nop - get_new_bucket(s3.main, bucket.name) + get_new_bucket(targets.main, bucket.name) @attr(resource='bucket') @@ -2584,7 +2585,7 @@ def test_bucket_create_exists_nonowner(): # Names are shared across a global namespace. As such, no two # users can create a bucket with that same name. bucket = get_new_bucket() - e = assert_raises(boto.exception.S3CreateError, get_new_bucket, s3.alt, bucket.name) + e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt, bucket.name) eq(e.status, 409) eq(e.reason, 'Conflict') eq(e.error_code, 'BucketAlreadyExists') @@ -2907,7 +2908,7 @@ def test_object_acl_canned_authenticatedread(): @attr(operation='acl bucket-owner-read') @attr(assertion='read back expected values') def test_object_acl_canned_bucketownerread(): - bucket = get_new_bucket(s3.main) + bucket = get_new_bucket(targets.main) bucket.set_acl('public-read-write') key = s3.alt.get_bucket(bucket.name).new_key('foo') @@ -2951,7 +2952,7 @@ def test_object_acl_canned_bucketownerread(): @attr(operation='acl bucket-owner-read') @attr(assertion='read back expected values') def test_object_acl_canned_bucketownerfullcontrol(): - bucket = get_new_bucket(s3.main) + bucket = get_new_bucket(targets.main) bucket.set_acl('public-read-write') key = s3.alt.get_bucket(bucket.name).new_key('foo') @@ -3460,7 +3461,7 @@ def test_object_header_acl_grants(): @attr('fails_on_dho') def test_bucket_header_acl_grants(): headers = _get_acl_header() - bucket = get_new_bucket(s3.main, get_prefix(), headers) + bucket = get_new_bucket(targets.main, get_prefix(), headers) policy = bucket.get_acl() check_grants( @@ -3595,7 +3596,7 @@ def test_bucket_acl_revoke_all(): @attr('fails_on_rgw') def test_logging_toggle(): bucket = get_new_bucket() - log_bucket = get_new_bucket(s3.main, bucket.name + '-log') + log_bucket = get_new_bucket(targets.main, bucket.name + '-log') log_bucket.set_as_logging_target() bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name) bucket.disable_logging() @@ -3907,7 +3908,7 @@ def test_bucket_recreate_not_overriding(): names = [e.name for e in list(li)] eq(names, key_names) - bucket2 = get_new_bucket(s3.main, bucket.name) + bucket2 = get_new_bucket(targets.main, bucket.name) li = bucket.list() @@ -4000,7 +4001,7 @@ def test_object_copy_diff_bucket(): @attr(operation='copy from an inaccessible bucket') @attr(assertion='fails w/AttributeError') def test_object_copy_not_owned_bucket(): - buckets = [get_new_bucket(), get_new_bucket(s3.alt)] + buckets = [get_new_bucket(), get_new_bucket(targets.alt)] print repr(buckets[1]) key = buckets[0].new_key('foo123bar') key.set_contents_from_string('foo') From 1c6b1ba1f6a97a509baaa04886052e198b255e5f Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 25 Jul 2013 16:43:19 -0700 Subject: [PATCH 04/16] rearrange regions info container Now able to easily get the master and secondaries Signed-off-by: Yehuda Sadeh --- s3tests/functional/__init__.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index 9dcbf07..745be6c 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -9,7 +9,6 @@ import string s3 = bunch.Bunch() config = bunch.Bunch() -regions = bunch.Bunch() targets = bunch.Bunch() # this will be assigned by setup() @@ -121,6 +120,29 @@ class TargetConnection: self.conf = conf self.connection = conn + + +class RegionsInfo: + def __init__(self): + self.m = bunch.Bunch() + self.master = None + self.secondaries = [] + + def add(self, name, region_config): + self.m[name] = region_config + if (region_config.is_master): + if not self.master is None: + raise RuntimeError( + 'multiple regions defined as master' + ) + self.master = region_config + else: + self.secondaries.append(region_config) + def get(self, name): + return self.m[name]; + +regions = RegionsInfo() + # nosetests --processes=N with N>1 is safe _multiprocess_can_split_ = True @@ -148,7 +170,6 @@ def setup(): s3.clear() config.clear() - regions.clear() for section in cfg.sections(): try: @@ -157,8 +178,7 @@ def setup(): continue if type_ != 'region': continue - region_conf = TargetConfig(cfg, section) - regions[name] = region_conf + regions.add(name, TargetConfig(cfg, section)) for section in cfg.sections(): try: @@ -170,7 +190,7 @@ def setup(): try: region_name = cfg.get(section, 'region') - region_config = regions[region_name] + region_config = regions.get(region_name) except ConfigParser.NoOptionError: region_config = TargetConfig(cfg, section) From 99d3b4928d76ea78085b37d561038a0bc23dccf9 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 25 Jul 2013 16:44:41 -0700 Subject: [PATCH 05/16] set the region is_master field Signed-off-by: Yehuda Sadeh --- s3tests/functional/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index 745be6c..f4644bb 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -98,6 +98,11 @@ class TargetConfig: raise RuntimeError( 'host not specified for section {s}'.format(s=section) ) + try: + self.is_master=cfg.getboolean(section, 'is_master') + except ConfigParser.NoOptionError: + pass + try: self.is_secure=cfg.getboolean(section, 'is_secure') except ConfigParser.NoOptionError: From 0fb067de30dc2f92bc3499e776c3cc9d674b2a54 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 26 Jul 2013 10:46:56 -0700 Subject: [PATCH 06/16] s3tests: some more multiregion changes Now creating a connection per region for each user, can access master and secondaries, and set a default region. No longer using a specific region per user, as it doesn't make sense. Signed-off-by: Yehuda Sadeh --- s3tests/functional/__init__.py | 59 +++++++++++++++++++++++----------- s3tests/functional/test_s3.py | 30 ++++++++--------- 2 files changed, 56 insertions(+), 33 deletions(-) diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index f4644bb..69bc1be 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -144,10 +144,32 @@ class RegionsInfo: else: self.secondaries.append(region_config) def get(self, name): - return self.m[name]; + return self.m[name] + def get(self): + return self.m + def iteritems(self): + return self.m.iteritems() regions = RegionsInfo() + +class RegionsConn: + def __init__(self): + self.m = bunch.Bunch() + self.default = None + self.master = None + self.secondaries = [] + + def add(self, name, conn): + self.m[name] = conn + if not self.default: + self.default = conn + if (conn.conf.is_master): + self.master = conn + else: + self.secondaries.append(conn) + + # nosetests --processes=N with N>1 is safe _multiprocess_can_split_ = True @@ -193,11 +215,8 @@ def setup(): if type_ != 's3': continue - try: - region_name = cfg.get(section, 'region') - region_config = regions.get(region_name) - except ConfigParser.NoOptionError: - region_config = TargetConfig(cfg, section) + if len(regions.get()) == 0: + regions.add("default", TargetConfig(cfg, section)) config[name] = bunch.Bunch() for var in [ @@ -209,17 +228,21 @@ def setup(): config[name][var] = cfg.get(section, var) except ConfigParser.NoOptionError: pass - conn = boto.s3.connection.S3Connection( - aws_access_key_id=cfg.get(section, 'access_key'), - aws_secret_access_key=cfg.get(section, 'secret_key'), - is_secure=region_config.is_secure, - port=region_config.port, - host=region_config.host, - # TODO test vhost calling format - calling_format=region_config.calling_format, - ) - s3[name] = conn - targets[name] = TargetConnection(region_config, conn) + + targets[name] = RegionsConn() + + for (k, conf) in regions.iteritems(): + conn = boto.s3.connection.S3Connection( + aws_access_key_id=cfg.get(section, 'access_key'), + aws_secret_access_key=cfg.get(section, 'secret_key'), + is_secure=conf.is_secure, + port=conf.port, + host=conf.host, + # TODO test vhost calling format + calling_format=conf.calling_format, + ) + targets[name].add(k, TargetConnection(conf, conn)) + s3[name] = targets[name].default.connection # WARNING! we actively delete all buckets we see with the prefix # we've chosen! Choose your prefix with care, and don't reuse @@ -262,7 +285,7 @@ def get_new_bucket(target=None, name=None, headers=None): reset ACLs and such. """ if target is None: - target = targets.main + target = targets.main.default connection = target.connection if name is None: name = get_new_bucket_name() diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index 641e415..09e4cef 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -753,7 +753,7 @@ def test_object_write_to_nonexist_bucket(): def test_bucket_create_delete(): name = '{prefix}foo'.format(prefix=get_prefix()) print 'Trying bucket {name!r}'.format(name=name) - bucket = get_new_bucket(targets.main, name) + bucket = get_new_bucket(targets.main.default, name) # make sure it's actually there s3.main.get_bucket(bucket.name) bucket.delete() @@ -2312,7 +2312,7 @@ def check_bad_bucket_name(name): Attempt to create a bucket with a specified name, and confirm that the request fails because of an invalid bucket name. """ - e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, name) + e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name) eq(e.status, 400) eq(e.reason, 'Bad Request') eq(e.error_code, 'InvalidBucketName') @@ -2338,7 +2338,7 @@ def test_bucket_create_naming_bad_starts_nonalpha(): def test_bucket_create_naming_bad_short_empty(): # bucket creates where name is empty look like PUTs to the parent # resource (with slash), hence their error response is different - e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, '') + e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, '') eq(e.status, 405) eq(e.reason, 'Method Not Allowed') eq(e.error_code, 'MethodNotAllowed') @@ -2385,7 +2385,7 @@ def check_good_bucket_name(name, _prefix=None): # should be very rare if _prefix is None: _prefix = get_prefix() - get_new_bucket(targets.main, '{prefix}{name}'.format( + get_new_bucket(targets.main.default, '{prefix}{name}'.format( prefix=_prefix, name=name, )) @@ -2399,7 +2399,7 @@ def _test_bucket_create_naming_good_long(length): prefix = get_prefix() assert len(prefix) < 255 num = length - len(prefix) - get_new_bucket(targets.main, '{prefix}{name}'.format( + get_new_bucket(targets.main.default, '{prefix}{name}'.format( prefix=prefix, name=num*'a', )) @@ -2474,7 +2474,7 @@ def test_bucket_list_long_name(): prefix = get_prefix() length = 251 num = length - len(prefix) - bucket = get_new_bucket(targets.main, '{prefix}{name}'.format( + bucket = get_new_bucket(targets.main.default, '{prefix}{name}'.format( prefix=prefix, name=num*'a', )) @@ -2572,9 +2572,9 @@ def test_bucket_create_naming_dns_dash_dot(): @attr(operation='re-create') @attr(assertion='idempotent success') def test_bucket_create_exists(): - bucket = get_new_bucket(targets.main) + bucket = get_new_bucket(targets.main.default) # REST idempotency means this should be a nop - get_new_bucket(targets.main, bucket.name) + get_new_bucket(targets.main.default, bucket.name) @attr(resource='bucket') @@ -2585,7 +2585,7 @@ def test_bucket_create_exists_nonowner(): # Names are shared across a global namespace. As such, no two # users can create a bucket with that same name. bucket = get_new_bucket() - e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt, bucket.name) + e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt.default, bucket.name) eq(e.status, 409) eq(e.reason, 'Conflict') eq(e.error_code, 'BucketAlreadyExists') @@ -2908,7 +2908,7 @@ def test_object_acl_canned_authenticatedread(): @attr(operation='acl bucket-owner-read') @attr(assertion='read back expected values') def test_object_acl_canned_bucketownerread(): - bucket = get_new_bucket(targets.main) + bucket = get_new_bucket(targets.main.default) bucket.set_acl('public-read-write') key = s3.alt.get_bucket(bucket.name).new_key('foo') @@ -2952,7 +2952,7 @@ def test_object_acl_canned_bucketownerread(): @attr(operation='acl bucket-owner-read') @attr(assertion='read back expected values') def test_object_acl_canned_bucketownerfullcontrol(): - bucket = get_new_bucket(targets.main) + bucket = get_new_bucket(targets.main.default) bucket.set_acl('public-read-write') key = s3.alt.get_bucket(bucket.name).new_key('foo') @@ -3461,7 +3461,7 @@ def test_object_header_acl_grants(): @attr('fails_on_dho') def test_bucket_header_acl_grants(): headers = _get_acl_header() - bucket = get_new_bucket(targets.main, get_prefix(), headers) + bucket = get_new_bucket(targets.main.default, get_prefix(), headers) policy = bucket.get_acl() check_grants( @@ -3596,7 +3596,7 @@ def test_bucket_acl_revoke_all(): @attr('fails_on_rgw') def test_logging_toggle(): bucket = get_new_bucket() - log_bucket = get_new_bucket(targets.main, bucket.name + '-log') + log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log') log_bucket.set_as_logging_target() bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name) bucket.disable_logging() @@ -3908,7 +3908,7 @@ def test_bucket_recreate_not_overriding(): names = [e.name for e in list(li)] eq(names, key_names) - bucket2 = get_new_bucket(targets.main, bucket.name) + bucket2 = get_new_bucket(targets.main.default, bucket.name) li = bucket.list() @@ -4001,7 +4001,7 @@ def test_object_copy_diff_bucket(): @attr(operation='copy from an inaccessible bucket') @attr(assertion='fails w/AttributeError') def test_object_copy_not_owned_bucket(): - buckets = [get_new_bucket(), get_new_bucket(targets.alt)] + buckets = [get_new_bucket(), get_new_bucket(targets.alt.default)] print repr(buckets[1]) key = buckets[0].new_key('foo123bar') key.set_contents_from_string('foo') From 5f4910fad54ecee751f8ccde8fe32f266828291d Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 26 Jul 2013 11:16:36 -0700 Subject: [PATCH 07/16] s3tests: test_region_bucket_create_secondary_access_master first multi-region test Signed-off-by: Yehuda Sadeh --- s3tests/functional/test_s3.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index 09e4cef..f6a95a5 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -48,6 +48,8 @@ from . import ( NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid' +def not_eq(a, b): + assert a != b, "%r == %r" % (a, b) def check_access_denied(fn, *args, **kwargs): e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs) @@ -4530,3 +4532,29 @@ def test_ranged_request_response_code(): eq(fetched_content, content[4:8]) eq(status, 206) +def assert_can_test_multiregion(): + not_eq(targets.main.master, None) + not_eq(len(targets.main.secondaries), 0) + +@attr(resource='bucket') +@attr(method='get') +@attr(operation='create on one region, access in another') +@attr(assertion='can\'t access in other region') +@attr('multiregion') +def test_region_bucket_create_secondary_access_master(): + assert_can_test_multiregion() + + master_conn = targets.main.master.connection + + for r in targets.main.secondaries: + conn = r.connection + bucket = get_new_bucket(r) + + e = assert_raises(boto.exception.S3ResponseError, master_conn.get_bucket, bucket.name) + eq(e.status, 301) + + e = assert_raises(boto.exception.S3ResponseError, master_conn.delete_bucket, bucket.name) + eq(e.status, 301) + + + conn.delete_bucket(bucket) From 04d46c59f08a8f7a3b0dc5750b232efe226a252b Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 26 Jul 2013 18:07:52 -0700 Subject: [PATCH 08/16] s3tests: basic test to copy object between regions Signed-off-by: Yehuda Sadeh --- s3tests/functional/test_s3.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index f6a95a5..98ba8b4 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -4541,7 +4541,7 @@ def assert_can_test_multiregion(): @attr(operation='create on one region, access in another') @attr(assertion='can\'t access in other region') @attr('multiregion') -def test_region_bucket_create_secondary_access_master(): +def test_region_bucket_create_secondary_access_remove_master(): assert_can_test_multiregion() master_conn = targets.main.master.connection @@ -4558,3 +4558,30 @@ def test_region_bucket_create_secondary_access_master(): conn.delete_bucket(bucket) + +@attr(resource='object') +@attr(method='copy') +@attr(operation='cread object in one region, read in another') +@attr(assertion='can read object') +@attr('multiregion') +def test_region_copy_object(): + assert_can_test_multiregion() + + master = targets.main.master + + master_conn = master.connection + + master_bucket = get_new_bucket(master) + for r in targets.main.secondaries: + conn = r.connection + bucket = get_new_bucket(r) + + content = 'testcontent' + + key = bucket.new_key('testobj') + key.set_contents_from_string(content) + + master_bucket.copy_key('testobj-dest', bucket.name, key.name) + + bucket.delete_key(key.name) + conn.delete_bucket(bucket) From b4441e3057585c5266f6eaf084e347d707cdece8 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 26 Jul 2013 20:33:48 -0700 Subject: [PATCH 09/16] s3tests: improve cross region copy, sync meta Can now configure sync agent rest address in order to force a sync operation. Another option is to set a waiting time for meta sync. Signed-off-by: Yehuda Sadeh --- s3tests/functional/__init__.py | 19 +++++++++++ s3tests/functional/test_s3.py | 61 ++++++++++++++++++++++++++++------ 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index 69bc1be..88b17e8 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -84,6 +84,9 @@ class TargetConfig: self.api_name = '' self.is_master = False self.is_secure = False + self.sync_agent_addr = None + self.sync_agent_port = 0 + self.sync_meta_wait = 0 try: self.api_name = cfg.get(section, 'api_name') except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): @@ -113,6 +116,22 @@ class TargetConfig: except ConfigParser.NoOptionError: raw_calling_format = 'ordinary' + try: + self.sync_agent_addr = cfg.get(section, 'sync_agent_addr') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + pass + + try: + self.sync_agent_port = cfg.getint(section, 'sync_agent_port') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + pass + + try: + self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait') + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + pass + + try: self.calling_format = calling_formats[raw_calling_format] except KeyError: diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index 98ba8b4..483bb16 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -4,6 +4,7 @@ import boto.s3.connection import boto.s3.acl import bunch import datetime +import time import email.utils import isodate import nose @@ -4559,29 +4560,67 @@ def test_region_bucket_create_secondary_access_remove_master(): conn.delete_bucket(bucket) +def region_sync_meta(conf): + if conf.sync_agent_addr: + ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port)) + eq(ret.status_code, 200) + if conf.sync_meta_wait: + time.sleep(conf.sync_meta_wait) + +@attr(resource='bucket') +@attr(method='get') +@attr(operation='create on one region, access in another') +@attr(assertion='can\'t access in other region') +@attr('multiregion') +def test_region_bucket_create_master_access_remove_secondary(): + assert_can_test_multiregion() + + master = targets.main.master + master_conn = master.connection + + for r in targets.main.secondaries: + conn = r.connection + bucket = get_new_bucket(master) + + region_sync_meta(r.conf) + + e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, bucket.name) + eq(e.status, 301) + + e = assert_raises(boto.exception.S3ResponseError, conn.delete_bucket, bucket.name) + eq(e.status, 301) + + + master_conn.delete_bucket(bucket) + @attr(resource='object') @attr(method='copy') -@attr(operation='cread object in one region, read in another') +@attr(operation='copy object between regions, verify') @attr(assertion='can read object') @attr('multiregion') def test_region_copy_object(): assert_can_test_multiregion() master = targets.main.master - master_conn = master.connection master_bucket = get_new_bucket(master) - for r in targets.main.secondaries: - conn = r.connection - bucket = get_new_bucket(r) + for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024, + 100 * 1024 * 1024): + for r in targets.main.secondaries: + conn = r.connection + bucket = get_new_bucket(r) - content = 'testcontent' + content = 'testcontent' - key = bucket.new_key('testobj') - key.set_contents_from_string(content) + key = bucket.new_key('testobj') + fp_a = FakeWriteFile(file_size, 'A') + key.set_contents_from_file(fp_a) - master_bucket.copy_key('testobj-dest', bucket.name, key.name) + dest_key = master_bucket.copy_key('testobj-dest', bucket.name, key.name) - bucket.delete_key(key.name) - conn.delete_bucket(bucket) + # verify dest + _verify_atomic_key_data(dest_key, file_size, 'A') + + bucket.delete_key(key.name) + conn.delete_bucket(bucket) From 3b733245c5d4d4eddfd9d22f6c984479be3d307b Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 26 Jul 2013 22:19:36 -0700 Subject: [PATCH 10/16] s3tests: modify cross region copy, sync triggering Signed-off-by: Yehuda Sadeh --- s3tests/functional/__init__.py | 3 ++ s3tests/functional/test_s3.py | 68 +++++++++++++++++++++------------- 2 files changed, 46 insertions(+), 25 deletions(-) diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index 88b17e8..a69a175 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -179,6 +179,9 @@ class RegionsConn: self.master = None self.secondaries = [] + def iteritems(self): + return self.m.iteritems() + def add(self, name, conn): self.m[name] = conn if not self.default: diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index 483bb16..ea93b54 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -4560,12 +4560,18 @@ def test_region_bucket_create_secondary_access_remove_master(): conn.delete_bucket(bucket) -def region_sync_meta(conf): - if conf.sync_agent_addr: - ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port)) - eq(ret.status_code, 200) - if conf.sync_meta_wait: - time.sleep(conf.sync_meta_wait) +# syncs all the regions except for the one passed in +def region_sync_meta(targets, region): + + for (k, r) in targets.iteritems(): + if r == region: + continue + conf = r.conf + if conf.sync_agent_addr: + ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port)) + eq(ret.status_code, 200) + if conf.sync_meta_wait: + time.sleep(conf.sync_meta_wait) @attr(resource='bucket') @attr(method='get') @@ -4582,7 +4588,7 @@ def test_region_bucket_create_master_access_remove_secondary(): conn = r.connection bucket = get_new_bucket(master) - region_sync_meta(r.conf) + region_sync_meta(targets.main, master) e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, bucket.name) eq(e.status, 301) @@ -4590,7 +4596,6 @@ def test_region_bucket_create_master_access_remove_secondary(): e = assert_raises(boto.exception.S3ResponseError, conn.delete_bucket, bucket.name) eq(e.status, 301) - master_conn.delete_bucket(bucket) @attr(resource='object') @@ -4601,26 +4606,39 @@ def test_region_bucket_create_master_access_remove_secondary(): def test_region_copy_object(): assert_can_test_multiregion() - master = targets.main.master - master_conn = master.connection + for (k, dest) in targets.main.iteritems(): + dest_conn = dest.connection - master_bucket = get_new_bucket(master) - for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024, - 100 * 1024 * 1024): - for r in targets.main.secondaries: - conn = r.connection - bucket = get_new_bucket(r) + dest_bucket = get_new_bucket(dest) + print 'created new dest bucket ', dest_bucket.name + region_sync_meta(targets.main, dest) - content = 'testcontent' + for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024, + 100 * 1024 * 1024): + for (k2, r) in targets.main.iteritems(): + if r == dest_conn: + continue + conn = r.connection - key = bucket.new_key('testobj') - fp_a = FakeWriteFile(file_size, 'A') - key.set_contents_from_file(fp_a) + bucket = get_new_bucket(r) + print 'created bucket', bucket.name + region_sync_meta(targets.main, r) - dest_key = master_bucket.copy_key('testobj-dest', bucket.name, key.name) + content = 'testcontent' - # verify dest - _verify_atomic_key_data(dest_key, file_size, 'A') + key = bucket.new_key('testobj') + fp_a = FakeWriteFile(file_size, 'A') + key.set_contents_from_file(fp_a) - bucket.delete_key(key.name) - conn.delete_bucket(bucket) + dest_key = dest_bucket.copy_key('testobj-dest', bucket.name, key.name) + + # verify dest + _verify_atomic_key_data(dest_key, file_size, 'A') + + bucket.delete_key(key.name) + print 'removing bucket', bucket.name + conn.delete_bucket(bucket) + + dest_bucket.delete_key(dest_key.name) + + dest_conn.delete_bucket(dest_bucket) From 9954bc4470d6c9a4fe51016850ba4ffe5c5572b1 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Mon, 29 Jul 2013 13:17:46 -0700 Subject: [PATCH 11/16] s3tests: only tun multiregion tests if configured Signed-off-by: Yehuda Sadeh --- s3tests/functional/test_s3.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index ea93b54..a70f391 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -28,6 +28,7 @@ from urlparse import urlparse from nose.tools import eq_ as eq from nose.plugins.attrib import attr +from nose.plugins.skip import SkipTest from .utils import assert_raises import AnonymousAuth @@ -4533,9 +4534,9 @@ def test_ranged_request_response_code(): eq(fetched_content, content[4:8]) eq(status, 206) -def assert_can_test_multiregion(): - not_eq(targets.main.master, None) - not_eq(len(targets.main.secondaries), 0) +def check_can_test_multiregion(): + if not targets.main.master or len(targets.main.secondaries) == 0: + raise SkipTest @attr(resource='bucket') @attr(method='get') @@ -4543,7 +4544,7 @@ def assert_can_test_multiregion(): @attr(assertion='can\'t access in other region') @attr('multiregion') def test_region_bucket_create_secondary_access_remove_master(): - assert_can_test_multiregion() + check_can_test_multiregion() master_conn = targets.main.master.connection @@ -4579,7 +4580,7 @@ def region_sync_meta(targets, region): @attr(assertion='can\'t access in other region') @attr('multiregion') def test_region_bucket_create_master_access_remove_secondary(): - assert_can_test_multiregion() + check_can_test_multiregion() master = targets.main.master master_conn = master.connection @@ -4604,7 +4605,7 @@ def test_region_bucket_create_master_access_remove_secondary(): @attr(assertion='can read object') @attr('multiregion') def test_region_copy_object(): - assert_can_test_multiregion() + check_can_test_multiregion() for (k, dest) in targets.main.iteritems(): dest_conn = dest.connection From 7bdf4b897a5c093923c721c0eaa2303180f88dd3 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 6 Jun 2013 11:19:05 -0700 Subject: [PATCH 12/16] test_s3: basic cors test related to issue #5261 Signed-off-by: Yehuda Sadeh --- requirements.txt | 2 +- s3tests/functional/test_s3.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f81903b..81e93e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ PyYAML nose >=1.0.0 -boto ==2.4.1 +boto >=2.6.0 bunch >=1.0.0 # 0.14 switches to libev, that means bootstrap needs to change too gevent ==0.13.6 diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index a70f391..708815c 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -36,6 +36,7 @@ import AnonymousAuth from email.header import decode_header from ordereddict import OrderedDict +from boto.s3.cors import CORSConfiguration from . import ( nuke_prefixed_buckets, @@ -4252,6 +4253,21 @@ def test_stress_bucket_acls_changes(): for i in xrange(10): _test_bucket_acls_changes_persistent(bucket); +@attr(resource='bucket') +@attr(method='put') +@attr(operation='set cors') +@attr(assertion='succeeds') +def test_set_cors(): + bucket = get_new_bucket() + cfg = CORSConfiguration() + cfg.add_rule('GET', '*') + + e = assert_raises(boto.exception.S3ResponseError, bucket.get_cors) + eq(e.status, 404) + + bucket.set_cors(cfg) + new_cfg = bucket.get_cors() + class FakeFile(object): """ file that simulates seek, tell, and current character From ef471ec2b96b40ec267f91493f3042707572ae18 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 1 Aug 2013 13:24:28 -0700 Subject: [PATCH 13/16] s3tests: improve cors test to cover more functionality Signed-off-by: Yehuda Sadeh --- s3tests/functional/test_s3.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index 708815c..e858736 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -4260,7 +4260,8 @@ def test_stress_bucket_acls_changes(): def test_set_cors(): bucket = get_new_bucket() cfg = CORSConfiguration() - cfg.add_rule('GET', '*') + cfg.add_rule('GET', '*.get') + cfg.add_rule('PUT', '*.put') e = assert_raises(boto.exception.S3ResponseError, bucket.get_cors) eq(e.status, 404) @@ -4268,6 +4269,25 @@ def test_set_cors(): bucket.set_cors(cfg) new_cfg = bucket.get_cors() + eq(len(new_cfg), 2) + + result = bunch.Bunch() + + for c in new_cfg: + eq(len(c.allowed_method), 1) + eq(len(c.allowed_origin), 1) + result[c.allowed_method[0]] = c.allowed_origin[0] + + + eq(result['GET'], '*.get') + eq(result['PUT'], '*.put') + + bucket.delete_cors() + + e = assert_raises(boto.exception.S3ResponseError, bucket.get_cors) + eq(e.status, 404) + + class FakeFile(object): """ file that simulates seek, tell, and current character From f20c6e250e8d29bc2f8f89a01485c6f46b37e50e Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Mon, 5 Aug 2013 13:55:22 -0700 Subject: [PATCH 14/16] rename 'partial' metadata sync to 'incremental' Signed-off-by: Yehuda Sadeh --- s3tests/functional/test_s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index e858736..4668587 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -4605,7 +4605,7 @@ def region_sync_meta(targets, region): continue conf = r.conf if conf.sync_agent_addr: - ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port)) + ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port)) eq(ret.status_code, 200) if conf.sync_meta_wait: time.sleep(conf.sync_meta_wait) From 34a06133eb0deb317739c67b10ca17d010cd3358 Mon Sep 17 00:00:00 2001 From: Joe Buck Date: Fri, 2 Aug 2013 16:49:20 -0700 Subject: [PATCH 15/16] readwrite.py: adding parameters Add an optional parameter to trigger deterministic file name creation (for separate write/read tasks). Also, change the behavior when zero writers are specified to actually generate no data. Signed-off-by: Joe Buck Reviewed-by: Josh Durgin --- s3tests/readwrite.py | 80 +++++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 30 deletions(-) diff --git a/s3tests/readwrite.py b/s3tests/readwrite.py index 3298a64..d3b680e 100644 --- a/s3tests/readwrite.py +++ b/s3tests/readwrite.py @@ -161,13 +161,23 @@ def main(): bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30) bucket = conn.create_bucket(bucket_name) print "Created bucket: {name}".format(name=bucket.name) - file_names = realistic.names( - mean=15, - stddev=4, - seed=seeds['names'], - ) - file_names = itertools.islice(file_names, config.readwrite.files.num) - file_names = list(file_names) + + # check flag for deterministic file name creation + if not config.readwrite.get('deterministic_file_names'): + print 'Creating random file names' + file_names = realistic.names( + mean=15, + stddev=4, + seed=seeds['names'], + ) + file_names = itertools.islice(file_names, config.readwrite.files.num) + file_names = list(file_names) + else: + print 'Creating file names that are deterministic' + file_names = [] + for x in xrange(config.readwrite.files.num): + file_names.append('test_file_{num}'.format(num=x)) + files = realistic.files2( mean=1024 * config.readwrite.files.size, stddev=1024 * config.readwrite.files.stddev, @@ -175,18 +185,20 @@ def main(): ) q = gevent.queue.Queue() - # warmup - get initial set of files uploaded - print "Uploading initial set of {num} files".format(num=config.readwrite.files.num) - warmup_pool = gevent.pool.Pool(size=100) - for file_name in file_names: - fp = next(files) - warmup_pool.spawn_link_exception( - write_file, - bucket=bucket, - file_name=file_name, - fp=fp, - ) - warmup_pool.join() + + # warmup - get initial set of files uploaded if there are any writers specified + if config.readwrite.writers > 0: + print "Uploading initial set of {num} files".format(num=config.readwrite.files.num) + warmup_pool = gevent.pool.Pool(size=100) + for file_name in file_names: + fp = next(files) + warmup_pool.spawn_link_exception( + write_file, + bucket=bucket, + file_name=file_name, + fp=fp, + ) + warmup_pool.join() # main work print "Starting main worker loop." @@ -194,17 +206,25 @@ def main(): print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers) group = gevent.pool.Group() rand_writer = random.Random(seeds['writer']) - for x in xrange(config.readwrite.writers): - this_rand = random.Random(rand_writer.randrange(2**32)) - group.spawn_link_exception( - writer, - bucket=bucket, - worker_id=x, - file_names=file_names, - files=files, - queue=q, - rand=this_rand, - ) + + # Don't create random files if deterministic_files_names is set and true + if not config.readwrite.get('deterministic_file_names'): + for x in xrange(config.readwrite.writers): + this_rand = random.Random(rand_writer.randrange(2**32)) + group.spawn_link_exception( + writer, + bucket=bucket, + worker_id=x, + file_names=file_names, + files=files, + queue=q, + rand=this_rand, + ) + + # Since the loop generating readers already uses config.readwrite.readers + # and the file names are already generated (randomly or deterministically), + # this loop needs no additional qualifiers. If zero readers are specified, + # it will behave as expected (no data is read) rand_reader = random.Random(seeds['reader']) for x in xrange(config.readwrite.readers): this_rand = random.Random(rand_reader.randrange(2**32)) From 9799858e90d9055f387f7c991cfcbf50f48cf42d Mon Sep 17 00:00:00 2001 From: Joe Buck Date: Thu, 8 Aug 2013 21:47:34 -0700 Subject: [PATCH 16/16] readwrite: error propagation code This is my attempt at enabling errors in the readwrite.py to propate up to the calling teuthology task. Signed-off-by: Joe Buck --- s3tests/readwrite.py | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/s3tests/readwrite.py b/s3tests/readwrite.py index d3b680e..cdfcf17 100644 --- a/s3tests/readwrite.py +++ b/s3tests/readwrite.py @@ -55,13 +55,13 @@ def reader(bucket, worker_id, file_names, queue, rand): msg='md5sum check failed', ), ) - - elapsed = end - start - result.update( - start=start, - duration=int(round(elapsed * NANOSECOND)), - chunks=fp.chunks, - ) + else: + elapsed = end - start + result.update( + start=start, + duration=int(round(elapsed * NANOSECOND)), + chunks=fp.chunks, + ) queue.put(result) def writer(bucket, worker_id, file_names, files, queue, rand): @@ -97,12 +97,13 @@ def writer(bucket, worker_id, file_names, files, queue, rand): else: end = time.time() - elapsed = end - start - result.update( - start=start, - duration=int(round(elapsed * NANOSECOND)), - chunks=fp.last_chunks, - ) + elapsed = end - start + result.update( + start=start, + duration=int(round(elapsed * NANOSECOND)), + chunks=fp.last_chunks, + ) + queue.put(result) def parse_options(): @@ -241,7 +242,19 @@ def main(): q.put(StopIteration) gevent.spawn_later(config.readwrite.duration, stop) - yaml.safe_dump_all(q, stream=real_stdout) + # wait for all the tests to finish + group.join() + print 'post-join, queue size {size}'.format(size=q.qsize()) + + if q.qsize() > 0: + for temp_dict in q: + if 'error' in temp_dict: + raise Exception('exception:\n\t{msg}\n\t{trace}'.format( + msg=temp_dict['error']['msg'], + trace=temp_dict['error']['traceback']) + ) + else: + yaml.safe_dump(temp_dict, stream=real_stdout) finally: # cleanup