s3tests: improve cross region copy, sync meta

Can now configure sync agent rest address in order to force
a sync operation. Another option is to set a waiting time for
meta sync.

Signed-off-by: Yehuda Sadeh <yehuda@inktank.com>
This commit is contained in:
Yehuda Sadeh 2013-07-26 20:33:48 -07:00
parent 04d46c59f0
commit b4441e3057
2 changed files with 69 additions and 11 deletions

View file

@ -84,6 +84,9 @@ class TargetConfig:
self.api_name = '' self.api_name = ''
self.is_master = False self.is_master = False
self.is_secure = False self.is_secure = False
self.sync_agent_addr = None
self.sync_agent_port = 0
self.sync_meta_wait = 0
try: try:
self.api_name = cfg.get(section, 'api_name') self.api_name = cfg.get(section, 'api_name')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
@ -113,6 +116,22 @@ class TargetConfig:
except ConfigParser.NoOptionError: except ConfigParser.NoOptionError:
raw_calling_format = 'ordinary' raw_calling_format = 'ordinary'
try:
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try: try:
self.calling_format = calling_formats[raw_calling_format] self.calling_format = calling_formats[raw_calling_format]
except KeyError: except KeyError:

View file

@ -4,6 +4,7 @@ import boto.s3.connection
import boto.s3.acl import boto.s3.acl
import bunch import bunch
import datetime import datetime
import time
import email.utils import email.utils
import isodate import isodate
import nose import nose
@ -4559,29 +4560,67 @@ def test_region_bucket_create_secondary_access_remove_master():
conn.delete_bucket(bucket) conn.delete_bucket(bucket)
def region_sync_meta(conf):
if conf.sync_agent_addr:
ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
eq(ret.status_code, 200)
if conf.sync_meta_wait:
time.sleep(conf.sync_meta_wait)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create on one region, access in another')
@attr(assertion='can\'t access in other region')
@attr('multiregion')
def test_region_bucket_create_master_access_remove_secondary():
assert_can_test_multiregion()
master = targets.main.master
master_conn = master.connection
for r in targets.main.secondaries:
conn = r.connection
bucket = get_new_bucket(master)
region_sync_meta(r.conf)
e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, bucket.name)
eq(e.status, 301)
e = assert_raises(boto.exception.S3ResponseError, conn.delete_bucket, bucket.name)
eq(e.status, 301)
master_conn.delete_bucket(bucket)
@attr(resource='object') @attr(resource='object')
@attr(method='copy') @attr(method='copy')
@attr(operation='cread object in one region, read in another') @attr(operation='copy object between regions, verify')
@attr(assertion='can read object') @attr(assertion='can read object')
@attr('multiregion') @attr('multiregion')
def test_region_copy_object(): def test_region_copy_object():
assert_can_test_multiregion() assert_can_test_multiregion()
master = targets.main.master master = targets.main.master
master_conn = master.connection master_conn = master.connection
master_bucket = get_new_bucket(master) master_bucket = get_new_bucket(master)
for r in targets.main.secondaries: for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024,
conn = r.connection 100 * 1024 * 1024):
bucket = get_new_bucket(r) for r in targets.main.secondaries:
conn = r.connection
bucket = get_new_bucket(r)
content = 'testcontent' content = 'testcontent'
key = bucket.new_key('testobj') key = bucket.new_key('testobj')
key.set_contents_from_string(content) fp_a = FakeWriteFile(file_size, 'A')
key.set_contents_from_file(fp_a)
master_bucket.copy_key('testobj-dest', bucket.name, key.name) dest_key = master_bucket.copy_key('testobj-dest', bucket.name, key.name)
bucket.delete_key(key.name) # verify dest
conn.delete_bucket(bucket) _verify_atomic_key_data(dest_key, file_size, 'A')
bucket.delete_key(key.name)
conn.delete_bucket(bucket)