Compare commits

...

8 commits

Author SHA1 Message Date
Yuan Zhou
3b1b4638f4 gevent: bump to >=1.0
There are some DNS resolving issue on 0.13.6.
Bumping to >=1.0 fixes the issues for me.

Signed-off-by: Yuan Zhou <yuan.zhou@intel.com>
2016-04-06 13:26:35 +02:00
Abhishek Lekshmanan
7d26245b3a connection: add a debug option to debug boto calls
Basically setting debug level via conf file and setting a stream logger
prefixed with 'boto' for more debugging

Signed-off-by: Abhishek Lekshmanan <abhishek@suse.com>
2016-04-06 13:26:35 +02:00
Dan Mick
dc26c0d696 Remove "--allow-hosts None" because it magically makes things work
Fixes: 12302
Signed-off-by: Dan Mick <dan.mick@redhat.com>
(cherry picked from commit 5f34b358fc)
2015-07-14 14:05:39 -07:00
Yehuda Sadeh
34530164b1 s3tests: fix for python 2.6
Signed-off-by: Yehuda Sadeh <yehuda@redhat.com>
2015-06-30 11:27:39 -07:00
Yehuda Sadeh
5d72a43ffa s3tests: cleanup, set bucket acls if permission denied
retry bucket removal after setting the bucket permissions if we're
getting pemission denied. This will make sure the bucket gets removed.

Signed-off-by: Yehuda Sadeh <yehuda@redhat.com>
(cherry picked from commit c41ebab9cf)
2015-05-13 17:14:35 -07:00
Yehuda Sadeh
366fdee269 s3tests: add tests of acl settings by alt user
verify ownership

Signed-off-by: Yehuda Sadeh <yehuda@redhat.com>
2015-05-13 17:14:35 -07:00
Sage Weil
f041a73153 Merge pull request #41 from ceph/wip-10770
rgw: concurrent non-versioned object creation, deletion

Reviewed-by: Sage Weil <sage@redhat.com>
2015-02-05 11:32:23 -08:00
Yehuda Sadeh
4342e14114 rgw: concurrent non-versioned object creation, deletion
Tests issue #10770: bad bucket index entry on bucket index cancelled
operation.

Signed-off-by: Yehuda Sadeh <yehuda@inktank.com>
2015-02-05 10:02:45 -08:00
8 changed files with 105 additions and 34 deletions

View file

@ -41,5 +41,4 @@ virtualenv --no-site-packages --distribute virtualenv
# easy_install, and we really wanted pip; next line will fail if pip
# requirements.txt does not match setup.py requirements -- sucky but
# good enough for now
./virtualenv/bin/python setup.py develop \
--allow-hosts None
./virtualenv/bin/python setup.py develop

View file

@ -3,7 +3,7 @@ nose >=1.0.0
boto >=2.6.0
bunch >=1.0.0
# 0.14 switches to libev, that means bootstrap needs to change too
gevent ==0.13.6
gevent >=1.0
isodate >=0.4.4
requests ==0.14.0
pytz >=2011k

View file

@ -108,6 +108,9 @@ def connect(conf):
raise RuntimeError(
'calling_format unknown: %r' % raw_calling_format
)
if conf.has_key('debug'):
kwargs['debug']=conf['debug']
boto.set_stream_logger('boto')
# TODO test vhost calling format
conn = boto.s3.connection.S3Connection(**kwargs)
return conn

View file

@ -64,22 +64,31 @@ def nuke_prefixed_buckets_on_conn(prefix, name, conn):
print 'prefix=',prefix
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
try:
# bucket.set_canned_acl('private')
for key in bucket.list_versions():
print 'Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
# key.set_canned_acl('private')
bucket.delete_key(key.name, version_id = key.version_id)
bucket.delete()
except boto.exception.S3ResponseError as e:
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
raise
# seems like we're not the owner of the bucket; ignore
pass
success = False
for i in xrange(2):
try:
for key in bucket.list_versions():
print 'Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
# key.set_canned_acl('private')
bucket.delete_key(key.name, version_id = key.version_id)
bucket.delete()
success = True
except boto.exception.S3ResponseError as e:
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
raise
# seems like we don't have permissions set appropriately, we'll
# modify permissions and retry
pass
if success:
return
bucket.set_canned_acl('private')
def nuke_prefixed_buckets(prefix):
# If no regions are specified, use the simple method
@ -106,7 +115,6 @@ def nuke_prefixed_buckets(prefix):
print 'Done with cleanup of test buckets.'
class TargetConfig:
def __init__(self, cfg, section):
self.port = None

View file

@ -3261,6 +3261,27 @@ def test_object_acl_canned_bucketownerfullcontrol():
key.delete()
bucket.delete()
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify owner')
def test_object_acl_full_control_verify_owner():
bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write')
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
key.add_user_grant(permission='FULL_CONTROL', user_id=config.alt.user_id)
k2 = s3.alt.get_bucket(bucket.name).get_key('foo')
k2.add_user_grant(permission='READ_ACP', user_id=config.alt.user_id)
policy = k2.get_acl()
eq(policy.owner.id, config.main.user_id)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl private')
@ -3532,6 +3553,12 @@ def test_bucket_acl_grant_userid_fullcontrol():
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket)
# verify owner did not change
bucket2 = s3.main.get_bucket(bucket.name)
policy = bucket2.get_acl()
eq(policy.owner.id, config.main.user_id)
eq(policy.owner.display_name, config.main.display_name)
@attr(resource='bucket')
@attr(method='ACLs')
@ -5762,7 +5789,7 @@ def test_versioning_multi_object_delete_with_marker_create():
keyname = 'key'
rmkeys = { bucket.new_key(keyname) }
rmkeys = [ bucket.new_key(keyname) ]
eq(_count_bucket_versioned_objs(bucket), 0)
@ -5852,17 +5879,18 @@ def test_versioned_object_acl():
check_grants(k.get_acl().acl.grants, default_policy)
def _do_create_object(bucket, objname, i):
def _do_create_object(bucket, objname, i, obj_size):
k = bucket.new_key(objname)
k.set_contents_from_string('data {i}'.format(i=i))
s = 'x' * obj_size
k.set_contents_from_string(s)
def _do_remove_ver(bucket, obj):
bucket.delete_key(obj.name, version_id = obj.version_id)
def _do_create_versioned_obj_concurrent(bucket, objname, num):
def _do_create_obj_concurrent(bucket, objname, num, obj_size=0):
t = []
for i in range(num):
thr = threading.Thread(target = _do_create_object, args=(bucket, objname, i))
thr = threading.Thread(target = _do_create_object, args=(bucket, objname, i, obj_size))
thr.start()
t.append(thr)
return t
@ -5894,7 +5922,7 @@ def test_versioned_concurrent_object_create_concurrent_remove():
num_objs = 5
for i in xrange(5):
t = _do_create_versioned_obj_concurrent(bucket, keyname, num_objs)
t = _do_create_obj_concurrent(bucket, keyname, num_objs)
_do_wait_completion(t)
eq(_count_bucket_versioned_objs(bucket), num_objs)
@ -5923,7 +5951,7 @@ def test_versioned_concurrent_object_create_and_remove():
all_threads = []
for i in xrange(3):
t = _do_create_versioned_obj_concurrent(bucket, keyname, num_objs)
t = _do_create_obj_concurrent(bucket, keyname, num_objs)
all_threads.append(t)
t = _do_clear_versioned_bucket_concurrent(bucket)
@ -5938,3 +5966,36 @@ def test_versioned_concurrent_object_create_and_remove():
eq(_count_bucket_versioned_objs(bucket), 0)
eq(len(bucket.get_all_keys()), 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation of objects, concurrent removal')
@attr(assertion='works')
def test_non_versioned_concurrent_object_create_concurrent_remove():
bucket = get_new_bucket()
# not configuring versioning here! this test is non-versioned
keyname = 'myobj'
num_objs = 5
obj_size = 10 # non-zero
for i in xrange(5):
t = _do_create_obj_concurrent(bucket, keyname, num_objs, obj_size)
_do_wait_completion(t)
keys = []
for k in bucket.get_all_keys():
keys.append(k)
eq(len(keys), 1)
eq(keys[0].size, obj_size)
t = _do_clear_versioned_bucket_concurrent(bucket)
_do_wait_completion(t)
eq(_count_bucket_versioned_objs(bucket), 0)
eq(len(bucket.get_all_keys()), 0)

View file

@ -195,7 +195,7 @@ def main():
warmup_pool = gevent.pool.Pool(size=100)
for file_name in file_names:
fp = next(files)
warmup_pool.spawn_link_exception(
warmup_pool.spawn(
write_file,
bucket=bucket,
file_name=file_name,
@ -214,7 +214,7 @@ def main():
if not config.readwrite.get('deterministic_file_names'):
for x in xrange(config.readwrite.writers):
this_rand = random.Random(rand_writer.randrange(2**32))
group.spawn_link_exception(
group.spawn(
writer,
bucket=bucket,
worker_id=x,
@ -231,7 +231,7 @@ def main():
rand_reader = random.Random(seeds['reader'])
for x in xrange(config.readwrite.readers):
this_rand = random.Random(rand_reader.randrange(2**32))
group.spawn_link_exception(
group.spawn(
reader,
bucket=bucket,
worker_id=x,

View file

@ -161,7 +161,7 @@ def main():
)
q = gevent.queue.Queue()
logger_g = gevent.spawn_link_exception(yaml.safe_dump_all, q, stream=real_stdout)
logger_g = gevent.spawn(yaml.safe_dump_all, q, stream=real_stdout)
print "Writing {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
@ -171,7 +171,7 @@ def main():
start = time.time()
for objname in objnames:
fp = next(files)
pool.spawn_link_exception(
pool.spawn(
writer,
bucket=bucket,
objname=objname,
@ -195,7 +195,7 @@ def main():
pool = gevent.pool.Pool(size=config.roundtrip.readers)
start = time.time()
for objname in objnames:
pool.spawn_link_exception(
pool.spawn(
reader,
bucket=bucket,
objname=objname,

View file

@ -16,7 +16,7 @@ setup(
'boto >=2.0b4',
'PyYAML',
'bunch >=1.0.0',
'gevent ==0.13.6',
'gevent >=1.0',
'isodate >=0.4.4',
],