forked from TrueCloudLab/s3-tests
Compare commits
2 commits
master
...
wip-buck-n
Author | SHA1 | Date | |
---|---|---|---|
|
5c81151247 | ||
|
d0ad2aad20 |
3 changed files with 63 additions and 36 deletions
|
@ -7,6 +7,8 @@ import os
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
|
|
||||||
|
from .utils import region_sync_meta
|
||||||
|
|
||||||
s3 = bunch.Bunch()
|
s3 = bunch.Bunch()
|
||||||
config = bunch.Bunch()
|
config = bunch.Bunch()
|
||||||
targets = bunch.Bunch()
|
targets = bunch.Bunch()
|
||||||
|
@ -49,31 +51,49 @@ def choose_bucket_prefix(template, max_len=30):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def nuke_prefixed_buckets_on_conn(prefix, name, conn):
|
||||||
|
print 'Cleaning buckets from connection {name} prefix {prefix!r}.'.format(
|
||||||
|
name=name,
|
||||||
|
prefix=prefix,
|
||||||
|
)
|
||||||
|
for bucket in conn.get_all_buckets():
|
||||||
|
if bucket.name.startswith(prefix):
|
||||||
|
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
|
||||||
|
try:
|
||||||
|
bucket.set_canned_acl('private')
|
||||||
|
for key in bucket.list():
|
||||||
|
print 'Cleaning bucket {bucket} key {key}'.format(
|
||||||
|
bucket=bucket,
|
||||||
|
key=key,
|
||||||
|
)
|
||||||
|
key.set_canned_acl('private')
|
||||||
|
key.delete()
|
||||||
|
bucket.delete()
|
||||||
|
except boto.exception.S3ResponseError as e:
|
||||||
|
if e.error_code != 'AccessDenied':
|
||||||
|
print 'GOT UNWANTED ERROR', e.error_code
|
||||||
|
raise
|
||||||
|
# seems like we're not the owner of the bucket; ignore
|
||||||
|
pass
|
||||||
|
|
||||||
def nuke_prefixed_buckets(prefix):
|
def nuke_prefixed_buckets(prefix):
|
||||||
|
# First, delete all buckets on the master connection
|
||||||
for name, conn in s3.items():
|
for name, conn in s3.items():
|
||||||
print 'Cleaning buckets from connection {name} prefix {prefix!r}.'.format(
|
#if conn == s3[targets.main.master]:
|
||||||
name=name,
|
if conn == targets.main.master.connection:
|
||||||
prefix=prefix,
|
print 'Deleting buckets on {name} (master)'.format(name=name)
|
||||||
)
|
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
||||||
for bucket in conn.get_all_buckets():
|
|
||||||
if bucket.name.startswith(prefix):
|
# Then sync to propagate deletes to secondaries
|
||||||
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
|
region_sync_meta(targets.main, targets.main.master.connection)
|
||||||
try:
|
print 'region-sync in nuke_prefixed_buckets'
|
||||||
bucket.set_canned_acl('private')
|
|
||||||
for key in bucket.list():
|
# Now delete remaining buckets on any other connection
|
||||||
print 'Cleaning bucket {bucket} key {key}'.format(
|
for name, conn in s3.items():
|
||||||
bucket=bucket,
|
#if conn != s3[targets.main.master]:
|
||||||
key=key,
|
if conn != targets.main.master.connection:
|
||||||
)
|
print 'Deleting buckets on {name} (non-master)'.format(name=name)
|
||||||
key.set_canned_acl('private')
|
nuke_prefixed_buckets_on_conn(prefix, name, conn)
|
||||||
key.delete()
|
|
||||||
bucket.delete()
|
|
||||||
except boto.exception.S3ResponseError as e:
|
|
||||||
if e.error_code != 'AccessDenied':
|
|
||||||
print 'GOT UNWANTED ERROR', e.error_code
|
|
||||||
raise
|
|
||||||
# seems like we're not the owner of the bucket; ignore
|
|
||||||
pass
|
|
||||||
|
|
||||||
print 'Done with cleanup of test buckets.'
|
print 'Done with cleanup of test buckets.'
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ from nose.plugins.attrib import attr
|
||||||
from nose.plugins.skip import SkipTest
|
from nose.plugins.skip import SkipTest
|
||||||
|
|
||||||
from .utils import assert_raises
|
from .utils import assert_raises
|
||||||
|
from .utils import region_sync_meta
|
||||||
import AnonymousAuth
|
import AnonymousAuth
|
||||||
|
|
||||||
from email.header import decode_header
|
from email.header import decode_header
|
||||||
|
@ -4611,19 +4612,6 @@ def test_region_bucket_create_secondary_access_remove_master():
|
||||||
|
|
||||||
conn.delete_bucket(bucket)
|
conn.delete_bucket(bucket)
|
||||||
|
|
||||||
# syncs all the regions except for the one passed in
|
|
||||||
def region_sync_meta(targets, region):
|
|
||||||
|
|
||||||
for (k, r) in targets.iteritems():
|
|
||||||
if r == region:
|
|
||||||
continue
|
|
||||||
conf = r.conf
|
|
||||||
if conf.sync_agent_addr:
|
|
||||||
ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
|
|
||||||
eq(ret.status_code, 200)
|
|
||||||
if conf.sync_meta_wait:
|
|
||||||
time.sleep(conf.sync_meta_wait)
|
|
||||||
|
|
||||||
@attr(resource='bucket')
|
@attr(resource='bucket')
|
||||||
@attr(method='get')
|
@attr(method='get')
|
||||||
@attr(operation='create on one region, access in another')
|
@attr(operation='create on one region, access in another')
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
|
import requests
|
||||||
|
import time
|
||||||
|
|
||||||
|
from nose.tools import eq_ as eq
|
||||||
|
|
||||||
def assert_raises(excClass, callableObj, *args, **kwargs):
|
def assert_raises(excClass, callableObj, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Like unittest.TestCase.assertRaises, but returns the exception.
|
Like unittest.TestCase.assertRaises, but returns the exception.
|
||||||
|
@ -12,3 +17,17 @@ def assert_raises(excClass, callableObj, *args, **kwargs):
|
||||||
else:
|
else:
|
||||||
excName = str(excClass)
|
excName = str(excClass)
|
||||||
raise AssertionError("%s not raised" % excName)
|
raise AssertionError("%s not raised" % excName)
|
||||||
|
|
||||||
|
# syncs all the regions except for the one passed in
|
||||||
|
def region_sync_meta(targets, region):
|
||||||
|
|
||||||
|
for (k, r) in targets.iteritems():
|
||||||
|
if r == region:
|
||||||
|
continue
|
||||||
|
conf = r.conf
|
||||||
|
if conf.sync_agent_addr:
|
||||||
|
ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
|
||||||
|
eq(ret.status_code, 200)
|
||||||
|
if conf.sync_meta_wait:
|
||||||
|
time.sleep(conf.sync_meta_wait)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue