diff --git a/README.rst b/README.rst index 42ad7d5..f2b9818 100644 --- a/README.rst +++ b/README.rst @@ -6,14 +6,10 @@ This is a set of unofficial Amazon AWS S3 compatibility tests, that can be useful to people implementing software that exposes an S3-like API. The tests use the Boto2 and Boto3 libraries. -The tests use the Nose test framework. To get started, ensure you have -the ``virtualenv`` software installed; e.g. on Debian/Ubuntu:: +The tests use the Tox tool. To get started, ensure you have the ``tox`` +software installed; e.g. on Debian/Ubuntu:: - sudo apt-get install python-virtualenv - -and then run:: - - ./bootstrap + sudo apt-get install tox You will need to create a configuration file with the location of the service and two different credentials. A sample configuration file named @@ -22,29 +18,25 @@ used to run the s3 tests on a Ceph cluster started with vstart. Once you have that file copied and edited, you can run the tests with:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests + S3TEST_CONF=your.conf tox You can specify which directory of tests to run:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional + S3TEST_CONF=your.conf tox s3tests_boto3/functional You can specify which file of tests to run:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_s3 + S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_s3.py You can specify which test to run:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_s3:test_bucket_list_empty - -To gather a list of tests being run, use the flags:: - - -v --collect-only + S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_s3.py::test_bucket_list_empty Some tests have attributes set based on their current reliability and things like AWS not enforcing their spec stricly. You can filter tests based on their attributes:: - S3TEST_CONF=aws.conf ./virtualenv/bin/nosetests -a '!fails_on_aws' + S3TEST_CONF=aws.conf tox -- -m 'not fails_on_aws' Most of the tests have both Boto3 and Boto2 versions. Tests written in Boto2 are in the ``s3tests`` directory. Tests written in Boto3 are @@ -52,7 +44,7 @@ located in the ``s3test_boto3`` directory. You can run only the boto3 tests with:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests -v -s -A 'not fails_on_rgw' s3tests_boto3.functional + S3TEST_CONF=your.conf tox -- -m 'not fails_on_aws' s3tests_boto3/functional ======================== STS compatibility tests @@ -62,11 +54,11 @@ This section contains some basic tests for the AssumeRole, GetSessionToken and A You can run only the sts tests (all the three API's) with:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_sts + S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_sts.py You can filter tests based on the attributes. There is a attribute named ``test_of_sts`` to run AssumeRole and GetSessionToken tests and ``webidentity_test`` to run the AssumeRoleWithWebIdentity tests. If you want to execute only ``test_of_sts`` tests you can apply that filter as below:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests -v -s -A 'test_of_sts' s3tests_boto3.functional.test_sts + S3TEST_CONF=your.conf tox -- -m test_of_sts s3tests_boto3/functional/test_sts.py For running ``webidentity_test`` you'll need have Keycloak running. @@ -88,14 +80,14 @@ Adding above capabilities to "iam" user is also taken care by vstart (If Ceph cl To run these tests, create configuration file with section "iam" and "s3 alt" refer s3tests.conf.SAMPLE. Once you have that configuration file copied and edited, you can run all the tests with:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_iam + S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_iam.py You can also specify specific test to run:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_iam:test_put_user_policy + S3TEST_CONF=your.conf tox s3tests_boto3/functional/test_iam.py::test_put_user_policy Some tests have attributes set such as "fails_on_rgw". You can filter tests based on their attributes:: - S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_iam -a '!fails_on_rgw' + S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_iam.py -m 'not fails_on_rgw' diff --git a/bootstrap b/bootstrap deleted file mode 100755 index 0bba312..0000000 --- a/bootstrap +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -set -e - -virtualenv="virtualenv" -declare -a packages -source /etc/os-release - -case "$ID" in - debian|ubuntu|devuan) - packages=(debianutils python3-pip python3-virtualenv python3-dev libevent-dev libffi-dev libxml2-dev libxslt-dev zlib1g-dev) - for package in ${packages[@]}; do - if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then - # add a space after old values - missing="${missing:+$missing }$package" - fi - done - - if [ -n "$missing" ]; then - echo "$0: missing required DEB packages. Installing via sudo." 1>&2 - sudo apt-get -y install $missing - fi - ;; - centos|fedora|rhel|rocky|ol|virtuozzo) - - packages=(which python3-virtualenv python36-devel libevent-devel libffi-devel libxml2-devel libxslt-devel zlib-devel) - for package in ${packages[@]}; do - # When the package is python36-devel we change it to python3-devel on Fedora - if [[ ${package} == "python36-devel" && -f /etc/fedora-release ]]; then - package=python36 - fi - if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then - missing="${missing:+$missing }$package" - fi - done - - if [ -n "$missing" ]; then - echo "$0: Missing required RPM packages: ${missing}." 1>&2 - sudo yum -y install $missing - fi - ;; - opensuse*|suse|sles) - - packages=(which python3-virtualenv python3-devel libev-devel libffi-devel libxml2-devel libxslt-devel zlib-devel) - for package in ${packages[@]}; do - if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then - missing="${missing:+$missing }$package" - fi - if [ -n "$missing" ]; then - echo "$0: Missing required RPM packages: ${missing}." 1>&2 - sudo zypper --non-interactive install --no-recommends $missing - fi - done - - ;; - *) - echo "Bootstrap script does not support this distro yet, consider adding the packages" - exit 1 -esac - - -# s3-tests only works on python 3.6 not newer versions of python3 -${virtualenv} --python=$(which python3.6) virtualenv - -# avoid pip bugs -./virtualenv/bin/pip3 install --upgrade pip - -# latest setuptools supporting python 2.7 -./virtualenv/bin/pip install setuptools==44.1.0 - -./virtualenv/bin/pip3 install -r requirements.txt - -# forbid setuptools from using the network because it'll try to use -# easy_install, and we really wanted pip; next line will fail if pip -# requirements.txt does not match setup.py requirements -- sucky but -# good enough for now -./virtualenv/bin/python3 setup.py develop diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..0e3bcba --- /dev/null +++ b/pytest.ini @@ -0,0 +1,43 @@ +[pytest] +markers = + abac_test + appendobject + auth_aws2 + auth_aws4 + auth_common + bucket_policy + bucket_encryption + cloud_transition + encryption + fails_on_aws + fails_on_dbstore + fails_on_dho + fails_on_mod_proxy_fcgi + fails_on_rgw + fails_on_s3 + fails_with_subdomain + lifecycle + lifecycle_expiration + lifecycle_transition + list_objects_v2 + object_lock + session_policy + s3select + s3website + s3website_routing_rules + s3website_redirect_location + 3website + sse_s3 + storage_class + tagging + test_of_iam + test_of_sts + token_claims_trust_policy_test + token_principal_tag_role_policy_test + token_request_tag_trust_policy_test + token_resource_tags_test + token_role_tags_test + token_tag_keys_test + user_policy + versioning + webidentity_test diff --git a/requirements.txt b/requirements.txt index 0e625a1..ac1d18f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ PyYAML -nose >=1.0.0 boto >=2.6.0 boto3 >=1.0.0 # botocore-1.28 broke v2 signatures, see https://tracker.ceph.com/issues/58059 @@ -12,3 +11,4 @@ requests >=2.23.0 pytz >=2011k httplib2 lxml +pytest diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py index 8911e02..a3890d4 100644 --- a/s3tests/functional/__init__.py +++ b/s3tests/functional/__init__.py @@ -7,6 +7,7 @@ import itertools import os import random import string +import pytest from http.client import HTTPConnection, HTTPSConnection from urllib.parse import urlparse @@ -370,6 +371,15 @@ def teardown(): # remove our buckets here also, to avoid littering nuke_prefixed_buckets(prefix=prefix) +@pytest.fixture(scope="package") +def configfile(): + setup() + yield config + +@pytest.fixture(autouse=True) +def setup_teardown(configfile): + yield + teardown() bucket_counter = itertools.count(1) diff --git a/s3tests/functional/test_headers.py b/s3tests/functional/test_headers.py index 63f0c56..b3eb576 100644 --- a/s3tests/functional/test_headers.py +++ b/s3tests/functional/test_headers.py @@ -4,7 +4,7 @@ import boto.exception import boto.s3.connection import boto.s3.acl import boto.utils -import nose +import pytest import operator import random import string @@ -18,15 +18,13 @@ from urllib.parse import urlparse from boto.s3.connection import S3Connection -from nose.tools import eq_ as eq -from nose.plugins.attrib import attr -from nose.plugins.skip import SkipTest - from .utils import assert_raises from email.header import decode_header from . import ( + configfile, + setup_teardown, _make_raw_request, nuke_prefixed_buckets, get_new_bucket, @@ -144,6 +142,10 @@ def _clear_custom_headers(): _custom_headers = {} _remove_headers = [] +@pytest.fixture(autouse=True) +def clear_custom_headers(setup_teardown): + yield + _clear_custom_headers() # clear headers before teardown() def _add_custom_headers(headers=None, remove=None): """ Define header customizations (additions, replacements, removals) @@ -166,40 +168,23 @@ def _setup_bad_object(headers=None, remove=None): _add_custom_headers(headers=headers, remove=remove) return bucket.new_key('foo') -def tag(*tags): - def wrap(func): - for tag in tags: - setattr(func, tag, True) - return func - return wrap - # # common tests # -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no content length') -@attr(assertion='fails 411') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_object_create_bad_contentlength_none(): key = _setup_bad_object(remove=('Content-Length',)) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 411) - eq(e.reason, 'Length Required') - eq(e.error_code,'MissingContentLength') + assert e.status == 411 + assert e.reason == 'Length Required' + assert e.error_code == 'MissingContentLength' -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/content length too long') -@attr(assertion='fails 400') -@nose.with_setup(teardown=_clear_custom_headers) -@attr('fails_on_rgw') +@pytest.mark.auth_common +@pytest.mark.fails_on_rgw def test_object_create_bad_contentlength_mismatch_above(): content = 'bar' length = len(content) + 1 @@ -212,45 +197,30 @@ def test_object_create_bad_contentlength_mismatch_above(): key.should_retry = no_retry e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content) - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'RequestTimeout') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'RequestTimeout' -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty authorization') -@attr(assertion='fails 403') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_object_create_bad_authorization_empty(): key = _setup_bad_object({'Authorization': ''}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'AccessDenied') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'AccessDenied' -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date and x-amz-date') -@attr(assertion='succeeds') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_object_create_date_and_amz_date(): date = formatdate(usegmt=True) key = _setup_bad_object({'Date': date, 'X-Amz-Date': date}) key.set_contents_from_string('bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/x-amz-date and no date') -@attr(assertion='succeeds') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_object_create_amz_date_and_no_date(): date = formatdate(usegmt=True) key = _setup_bad_object({'X-Amz-Date': date}, ('Date',)) @@ -258,41 +228,26 @@ def test_object_create_amz_date_and_no_date(): # the teardown is really messed up here. check it out -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no authorization') -@attr(assertion='fails 403') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_object_create_bad_authorization_none(): key = _setup_bad_object(remove=('Authorization',)) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'AccessDenied') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'AccessDenied' -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no content length') -@attr(assertion='succeeds') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_bucket_create_contentlength_none(): _add_custom_headers(remove=('Content-Length',)) get_new_bucket() -@tag('auth_common') -@attr(resource='bucket') -@attr(method='acls') -@attr(operation='set w/no content length') -@attr(assertion='succeeds') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_object_acl_create_contentlength_none(): bucket = get_new_bucket() key = bucket.new_key('foo') @@ -315,160 +270,110 @@ def _create_new_connection(): ) return TargetConnection(targets.main.default.conf, conn) -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty content length') -@attr(assertion='fails 400') -@nose.with_setup(teardown=_clear_custom_headers) -@attr('fails_on_rgw') +@pytest.mark.auth_common +@pytest.mark.fails_on_rgw def test_bucket_create_bad_contentlength_empty(): conn = _create_new_connection() _add_custom_headers({'Content-Length': ''}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, conn) - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no content length') -@attr(assertion='succeeds') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_bucket_create_bad_contentlength_none(): _add_custom_headers(remove=('Content-Length',)) bucket = get_new_bucket() -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty authorization') -@attr(assertion='fails 403') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_bucket_create_bad_authorization_empty(): _add_custom_headers({'Authorization': ''}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'AccessDenied') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'AccessDenied' # the teardown is really messed up here. check it out -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no authorization') -@attr(assertion='fails 403') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_common +@pytest.mark.fails_on_dbstore def test_bucket_create_bad_authorization_none(): _add_custom_headers(remove=('Authorization',)) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'AccessDenied') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'AccessDenied' # # AWS2 specific tests # -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/content length too short') -@attr(assertion='fails 400') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws2 +@pytest.mark.fails_on_dbstore def test_object_create_bad_contentlength_mismatch_below_aws2(): check_aws2_support() content = 'bar' length = len(content) - 1 key = _setup_bad_object({'Content-Length': length}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content) - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'BadDigest') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'BadDigest' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/incorrect authorization') -@attr(assertion='fails 403') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws2 +@pytest.mark.fails_on_dbstore def test_object_create_bad_authorization_incorrect_aws2(): check_aws2_support() key = _setup_bad_object({'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId') -@tag('auth_aws2') -@nose.with_setup(teardown=_clear_custom_headers) -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid authorization') -@attr(assertion='fails 400') -@attr('fails_on_dbstore') +@pytest.mark.auth_aws2 +@pytest.mark.fails_on_dbstore def test_object_create_bad_authorization_invalid_aws2(): check_aws2_support() key = _setup_bad_object({'Authorization': 'AWS HAHAHA'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'InvalidArgument') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'InvalidArgument' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no date') -@attr(assertion='fails 403') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws2 +@pytest.mark.fails_on_dbstore def test_object_create_bad_date_none_aws2(): check_aws2_support() key = _setup_bad_object(remove=('Date',)) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'AccessDenied') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/invalid authorization') -@attr(assertion='fails 400') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws2 def test_bucket_create_bad_authorization_invalid_aws2(): check_aws2_support() _add_custom_headers({'Authorization': 'AWS HAHAHA'}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'InvalidArgument') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'InvalidArgument' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no date') -@attr(assertion='fails 403') -@attr('fails_on_dbstore') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws2 +@pytest.mark.fails_on_dbstore def test_bucket_create_bad_date_none_aws2(): check_aws2_support() _add_custom_headers(remove=('Date',)) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'AccessDenied') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'AccessDenied' # # AWS4 specific tests @@ -476,35 +381,25 @@ def test_bucket_create_bad_date_none_aws2(): def check_aws4_support(): if 'S3_USE_SIGV4' not in os.environ: - raise SkipTest + pytest.skip('sigv4 tests not enabled by S3_USE_SIGV4') def check_aws2_support(): if 'S3_USE_SIGV4' in os.environ: - raise SkipTest + pytest.skip('sigv2 tests disabled by S3_USE_SIGV4') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid MD5') -@attr(assertion='fails 400') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_md5_invalid_garbage_aws4(): check_aws4_support() key = _setup_bad_object({'Content-MD5':'AWS4 HAHAHA'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'InvalidDigest') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'InvalidDigest' -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/content length too short') -@attr(assertion='fails 400') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_contentlength_mismatch_below_aws4(): check_aws4_support() content = 'bar' @@ -512,277 +407,182 @@ def test_object_create_bad_contentlength_mismatch_below_aws4(): key = _setup_bad_object({'Content-Length': length}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content) - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'XAmzContentSHA256Mismatch') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'XAmzContentSHA256Mismatch' -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/incorrect authorization') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_authorization_incorrect_aws4(): check_aws4_support() key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=AKIAIGR7ZNNBHC5BKSUB/20150930/us-east-1/s3/aws4_request,SignedHeaders=host;user-agent,Signature=FWeDfwojDSdS2Ztmpfeubhd9isU='}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId') -@tag('auth_aws4') -@nose.with_setup(teardown=_clear_custom_headers) -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid authorization') -@attr(assertion='fails 400') +@pytest.mark.auth_aws4 def test_object_create_bad_authorization_invalid_aws4(): check_aws4_support() key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=HAHAHA'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case assert e.error_code in ('AuthorizationHeaderMalformed', 'InvalidArgument') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty user agent') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_ua_empty_aws4(): check_aws4_support() key = _setup_bad_object({'User-Agent': ''}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'SignatureDoesNotMatch') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'SignatureDoesNotMatch' -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no user agent') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_ua_none_aws4(): check_aws4_support() key = _setup_bad_object(remove=('User-Agent',)) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'SignatureDoesNotMatch') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'SignatureDoesNotMatch' -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid date') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_date_invalid_aws4(): check_aws4_support() key = _setup_bad_object({'Date': 'Bad Date'}) key.set_contents_from_string('bar') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid x-amz-date') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_amz_date_invalid_aws4(): check_aws4_support() key = _setup_bad_object({'X-Amz-Date': 'Bad Date'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty date') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_date_empty_aws4(): check_aws4_support() key = _setup_bad_object({'Date': ''}) key.set_contents_from_string('bar') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty x-amz-date') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_amz_date_empty_aws4(): check_aws4_support() key = _setup_bad_object({'X-Amz-Date': ''}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no date') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_date_none_aws4(): check_aws4_support() key = _setup_bad_object(remove=('Date',)) key.set_contents_from_string('bar') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no x-amz-date') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_amz_date_none_aws4(): check_aws4_support() key = _setup_bad_object(remove=('X-Amz-Date',)) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date in past') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_date_before_today_aws4(): check_aws4_support() key = _setup_bad_object({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'}) key.set_contents_from_string('bar') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/x-amz-date in past') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_amz_date_before_today_aws4(): check_aws4_support() key = _setup_bad_object({'X-Amz-Date': '20100707T215304Z'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date in future') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_date_after_today_aws4(): check_aws4_support() key = _setup_bad_object({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'}) key.set_contents_from_string('bar') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/x-amz-date in future') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_amz_date_after_today_aws4(): check_aws4_support() key = _setup_bad_object({'X-Amz-Date': '20300707T215304Z'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date before epoch') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_date_before_epoch_aws4(): check_aws4_support() key = _setup_bad_object({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'}) key.set_contents_from_string('bar') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/x-amz-date before epoch') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_amz_date_before_epoch_aws4(): check_aws4_support() key = _setup_bad_object({'X-Amz-Date': '19500707T215304Z'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date after 9999') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_date_after_end_aws4(): check_aws4_support() key = _setup_bad_object({'Date': 'Tue, 07 Jul 9999 21:53:04 GMT'}) key.set_contents_from_string('bar') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/x-amz-date after 9999') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_bad_amz_date_after_end_aws4(): check_aws4_support() key = _setup_bad_object({'X-Amz-Date': '99990707T215304Z'}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar') - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(operation='create with missing signed custom header') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_missing_signed_custom_header_aws4(): check_aws4_support() method='PUT' @@ -804,16 +604,11 @@ def test_object_create_missing_signed_custom_header_aws4(): res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path, body=body, request_headers=request_headers, secure=s3.main.is_secure) - eq(res.status, 403) - eq(res.reason, 'Forbidden') + assert res.status == 403 + assert res.reason == 'Forbidden' -@tag('auth_aws4') -@attr(resource='object') -@attr(method='put') -@attr(opearation='create with missing signed header') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_object_create_missing_signed_header_aws4(): check_aws4_support() method='PUT' @@ -836,219 +631,144 @@ def test_object_create_missing_signed_header_aws4(): res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path, body=body, request_headers=request_headers, secure=s3.main.is_secure) - eq(res.status, 403) - eq(res.reason, 'Forbidden') + assert res.status == 403 + assert res.reason == 'Forbidden' -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/invalid authorization') -@attr(assertion='fails 400') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_authorization_invalid_aws4(): check_aws4_support() _add_custom_headers({'Authorization': 'AWS4 HAHAHA'}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'InvalidArgument') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'InvalidArgument' -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty user agent') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_ua_empty_aws4(): check_aws4_support() _add_custom_headers({'User-Agent': ''}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'SignatureDoesNotMatch') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'SignatureDoesNotMatch' -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no user agent') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_ua_none_aws4(): check_aws4_support() _add_custom_headers(remove=('User-Agent',)) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'SignatureDoesNotMatch') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'SignatureDoesNotMatch' -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/invalid date') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_date_invalid_aws4(): check_aws4_support() _add_custom_headers({'Date': 'Bad Date'}) get_new_bucket() -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/invalid x-amz-date') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_amz_date_invalid_aws4(): check_aws4_support() _add_custom_headers({'X-Amz-Date': 'Bad Date'}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty date') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_date_empty_aws4(): check_aws4_support() _add_custom_headers({'Date': ''}) get_new_bucket() -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty x-amz-date') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_amz_date_empty_aws4(): check_aws4_support() _add_custom_headers({'X-Amz-Date': ''}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no date') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_date_none_aws4(): check_aws4_support() _add_custom_headers(remove=('Date',)) get_new_bucket() -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no x-amz-date') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_amz_date_none_aws4(): check_aws4_support() _add_custom_headers(remove=('X-Amz-Date',)) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/date in past') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_date_before_today_aws4(): check_aws4_support() _add_custom_headers({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'}) get_new_bucket() -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/x-amz-date in past') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_amz_date_before_today_aws4(): check_aws4_support() _add_custom_headers({'X-Amz-Date': '20100707T215304Z'}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/date in future') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_date_after_today_aws4(): check_aws4_support() _add_custom_headers({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'}) get_new_bucket() -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/x-amz-date in future') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_amz_date_after_today_aws4(): check_aws4_support() _add_custom_headers({'X-Amz-Date': '20300707T215304Z'}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch') -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/date before epoch') -@attr(assertion='succeeds') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_date_before_epoch_aws4(): check_aws4_support() _add_custom_headers({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'}) get_new_bucket() -@tag('auth_aws4') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/x-amz-date before epoch') -@attr(assertion='fails 403') -@nose.with_setup(teardown=_clear_custom_headers) +@pytest.mark.auth_aws4 def test_bucket_create_bad_amz_date_before_epoch_aws4(): check_aws4_support() _add_custom_headers({'X-Amz-Date': '19500707T215304Z'}) e = assert_raises(boto.exception.S3ResponseError, get_new_bucket) - eq(e.status, 403) - eq(e.reason, 'Forbidden') + assert e.status == 403 + assert e.reason == 'Forbidden' assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch') diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py index 808f9fa..cc70c24 100644 --- a/s3tests/functional/test_s3.py +++ b/s3tests/functional/test_s3.py @@ -7,7 +7,7 @@ import datetime import time import email.utils import isodate -import nose +import pytest import operator import socket import ssl @@ -27,16 +27,14 @@ import re from collections import defaultdict from urllib.parse import urlparse -from nose.tools import eq_ as eq -from nose.plugins.attrib import attr -from nose.plugins.skip import SkipTest - from . import utils from .utils import assert_raises from .policy import Policy, Statement, make_json_policy from . import ( + configfile, + setup_teardown, nuke_prefixed_buckets, get_new_bucket, get_new_bucket_name, @@ -53,9 +51,9 @@ from . import ( def check_access_denied(fn, *args, **kwargs): e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs) - eq(e.status, 403) - eq(e.reason, 'Forbidden') - eq(e.error_code, 'AccessDenied') + assert e.status == 403 + assert e.reason == 'Forbidden' + assert e.error_code == 'AccessDenied' def check_bad_bucket_name(name): """ @@ -63,9 +61,9 @@ def check_bad_bucket_name(name): that the request fails because of an invalid bucket name. """ e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name) - eq(e.status, 400) - eq(e.reason.lower(), 'bad request') # some proxies vary the case - eq(e.error_code, 'InvalidBucketName') + assert e.status == 400 + assert e.reason.lower() == 'bad request' # some proxies vary the case + assert e.error_code == 'InvalidBucketName' def _create_keys(bucket=None, keys=[]): """ @@ -94,20 +92,16 @@ def _get_alt_connection(): # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/! in name') -@attr(assertion='fails with subdomain') +@pytest.mark.fails_with_subdomain def test_bucket_create_naming_bad_punctuation(): # characters other than [a-zA-Z0-9._-] check_bad_bucket_name('alpha!soup') def check_versioning(bucket, status): try: - eq(bucket.get_versioning_status()['Versioning'], status) + assert bucket.get_versioning_status()['Versioning'] == status except KeyError: - eq(status, None) + assert status == None # amazon is eventual consistent, retry a bit if failed def check_configure_versioning_retry(bucket, status, expected_string): @@ -126,14 +120,10 @@ def check_configure_versioning_retry(bucket, status, expected_string): time.sleep(1) - eq(expected_string, read_status) + assert expected_string == read_status -@attr(resource='object') -@attr(method='create') -@attr(operation='create versioned object, read not exist null version') -@attr(assertion='read null version behaves correctly') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.versioning +@pytest.mark.fails_on_dbstore def test_versioning_obj_read_not_exist_null(): bucket = get_new_bucket() check_versioning(bucket, None) @@ -147,16 +137,12 @@ def test_versioning_obj_read_not_exist_null(): key.set_contents_from_string(content) key = bucket.get_key(objname, version_id='null') - eq(key, None) + assert key == None -@attr(resource='object') -@attr(method='put') -@attr(operation='append object') -@attr(assertion='success') -@attr('fails_on_aws') -@attr('fails_with_subdomain') -@attr('appendobject') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_with_subdomain +@pytest.mark.appendobject +@pytest.mark.fails_on_dbstore def test_append_object(): bucket = get_new_bucket() key = bucket.new_key('foo') @@ -168,20 +154,16 @@ def test_append_object(): res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path1, body='abc', secure=s3.main.is_secure) path2 = path + '&append&position=3' res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path2, body='abc', secure=s3.main.is_secure) - eq(res.status, 200) - eq(res.reason, 'OK') + assert res.status == 200 + assert res.reason == 'OK' key = bucket.get_key('foo') - eq(key.size, 6) + assert key.size == 6 -@attr(resource='object') -@attr(method='put') -@attr(operation='append to normal object') -@attr(assertion='fails 409') -@attr('fails_on_aws') -@attr('fails_with_subdomain') -@attr('appendobject') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_with_subdomain +@pytest.mark.appendobject +@pytest.mark.fails_on_dbstore def test_append_normal_object(): bucket = get_new_bucket() key = bucket.new_key('foo') @@ -192,17 +174,13 @@ def test_append_normal_object(): path = o.path + '?' + o.query path = path + '&append&position=3' res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path, body='abc', secure=s3.main.is_secure) - eq(res.status, 409) + assert res.status == 409 -@attr(resource='object') -@attr(method='put') -@attr(operation='append position not right') -@attr(assertion='fails 409') -@attr('fails_on_aws') -@attr('fails_with_subdomain') -@attr('appendobject') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_with_subdomain +@pytest.mark.appendobject +@pytest.mark.fails_on_dbstore def test_append_object_position_wrong(): bucket = get_new_bucket() key = bucket.new_key('foo') @@ -214,17 +192,13 @@ def test_append_object_position_wrong(): res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path1, body='abc', secure=s3.main.is_secure) path2 = path + '&append&position=9' res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path2, body='abc', secure=s3.main.is_secure) - eq(res.status, 409) - eq(int(res.getheader('x-rgw-next-append-position')), 3) + assert res.status == 409 + assert int(res.getheader('x-rgw-next-append-position')) == 3 # TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden # http://tracker.newdream.net/issues/984 -@attr(resource='bucket.log') -@attr(method='put') -@attr(operation='set/enable/disable logging target') -@attr(assertion='operations succeed') -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_logging_toggle(): bucket = get_new_bucket() log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log') @@ -311,13 +285,13 @@ def gen_rand_string(size, chars=string.ascii_uppercase + string.digits): def verify_object(bucket, k, data=None, storage_class=None): if storage_class: - eq(k.storage_class, storage_class) + assert k.storage_class == storage_class if data: read_data = k.get_contents_as_string() equal = data == read_data # avoid spamming log if data not equal - eq(equal, True) + assert equal == True def copy_object_storage_class(src_bucket, src_key, dest_bucket, dest_key, storage_class): query_args=None @@ -333,7 +307,7 @@ def copy_object_storage_class(src_bucket, src_key, dest_bucket, dest_key, storag res = dest_bucket.connection.make_request('PUT', dest_bucket.name, dest_key.name, query_args=query_args, headers=headers) - eq(res.status, 200) + assert res.status == 200 def _populate_multipart_key(bucket, kname, size, storage_class=None): (upload, data) = _multipart_upload(bucket, kname, size, storage_class=storage_class) @@ -401,16 +375,13 @@ def lc_transitions(transitions=None): return result -@attr(resource='object') -@attr(method='put') -@attr(operation='test create object with storage class') -@attr('storage_class') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.storage_class +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_object_storage_class(): sc = configured_storage_classes() if len(sc) < 2: - raise SkipTest + pytest.skip('requires multiple storage classes') bucket = get_new_bucket() @@ -420,16 +391,13 @@ def test_object_storage_class(): verify_object(bucket, k, data, storage_class) -@attr(resource='object') -@attr(method='put') -@attr(operation='test create multipart object with storage class') -@attr('storage_class') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.storage_class +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_object_storage_class_multipart(): sc = configured_storage_classes() if len(sc) < 2: - raise SkipTest + pytest.skip('requires multiple storage classes') bucket = get_new_bucket() size = 11 * 1024 * 1024 @@ -439,13 +407,13 @@ def test_object_storage_class_multipart(): (upload, data) = _multipart_upload(bucket, key, size, storage_class=storage_class) upload.complete_upload() key2 = bucket.get_key(key) - eq(key2.size, size) - eq(key2.storage_class, storage_class) + assert key2.size == size + assert key2.storage_class == storage_class def _do_test_object_modify_storage_class(obj_write_func, size): sc = configured_storage_classes() if len(sc) < 2: - raise SkipTest + pytest.skip('requires multiple storage classes') bucket = get_new_bucket() @@ -462,29 +430,23 @@ def _do_test_object_modify_storage_class(obj_write_func, size): copy_object_storage_class(bucket, k, bucket, k, new_storage_class) verify_object(bucket, k, data, storage_class) -@attr(resource='object') -@attr(method='put') -@attr(operation='test changing objects storage class') -@attr('storage_class') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.storage_class +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_object_modify_storage_class(): _do_test_object_modify_storage_class(_populate_key, size=9*1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='test changing objects storage class') -@attr('storage_class') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.storage_class +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_object_modify_storage_class_multipart(): _do_test_object_modify_storage_class(_populate_multipart_key, size=11*1024*1024) def _do_test_object_storage_class_copy(obj_write_func, size): sc = configured_storage_classes() if len(sc) < 2: - raise SkipTest + pytest.skip('requires multiple storage classes') src_bucket = get_new_bucket() dest_bucket = get_new_bucket() @@ -502,21 +464,15 @@ def _do_test_object_storage_class_copy(obj_write_func, size): copy_object_storage_class(src_bucket, src_key, dest_bucket, dest_key, new_storage_class) verify_object(dest_bucket, dest_key, data, new_storage_class) -@attr(resource='object') -@attr(method='copy') -@attr(operation='test copy object to object with different storage class') -@attr('storage_class') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.storage_class +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_object_storage_class_copy(): _do_test_object_storage_class_copy(_populate_key, size=9*1024*1024) -@attr(resource='object') -@attr(method='copy') -@attr(operation='test changing objects storage class') -@attr('storage_class') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.storage_class +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_object_storage_class_copy_multipart(): _do_test_object_storage_class_copy(_populate_multipart_key, size=9*1024*1024) @@ -573,7 +529,7 @@ class FakeFileVerifier(object): if self.char == None: self.char = data[0] self.size += size - eq(data.decode(), self.char*size) + assert data.decode() == self.char*size def _verify_atomic_key_data(key, size=-1, char=None): """ @@ -582,7 +538,7 @@ def _verify_atomic_key_data(key, size=-1, char=None): fp_verify = FakeFileVerifier(char) key.get_contents_to_file(fp_verify) if size >= 0: - eq(fp_verify.size, size) + assert fp_verify.size == size def _test_atomic_dual_conditional_write(file_size): """ @@ -611,28 +567,20 @@ def _test_atomic_dual_conditional_write(file_size): # key.set_contents_from_file(fp_c, headers={'If-Match': etag_fp_a}) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_c, headers={'If-Match': etag_fp_a}) - eq(e.status, 412) - eq(e.reason, 'Precondition Failed') - eq(e.error_code, 'PreconditionFailed') + assert e.status == 412 + assert e.reason == 'Precondition Failed' + assert e.error_code == 'PreconditionFailed' # verify the file _verify_atomic_key_data(key, file_size, 'B') -@attr(resource='object') -@attr(method='put') -@attr(operation='write one or the other') -@attr(assertion='1MB successful') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_atomic_dual_conditional_write_1mb(): _test_atomic_dual_conditional_write(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='write file in deleted bucket') -@attr(assertion='fail 404') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_atomic_write_bucket_gone(): bucket = get_new_bucket() @@ -644,9 +592,9 @@ def test_atomic_write_bucket_gone(): key = bucket.new_key('foo') fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket) e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_a) - eq(e.status, 404) - eq(e.reason, 'Not Found') - eq(e.error_code, 'NoSuchBucket') + assert e.status == 404 + assert e.reason == 'Not Found' + assert e.error_code == 'NoSuchBucket' def _multipart_upload_enc(bucket, s3_key_name, size, part_size=5*1024*1024, do_list=None, init_headers=None, part_headers=None, @@ -672,12 +620,8 @@ def _multipart_upload_enc(bucket, s3_key_name, size, part_size=5*1024*1024, -@attr(resource='object') -@attr(method='put') -@attr(operation='multipart upload with bad key for uploading chunks') -@attr(assertion='successful') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encryption_sse_c_multipart_invalid_chunks_1(): bucket = get_new_bucket() key = "multipart_enc" @@ -698,14 +642,10 @@ def test_encryption_sse_c_multipart_invalid_chunks_1(): _multipart_upload_enc, bucket, key, objlen, init_headers=init_headers, part_headers=part_headers, metadata={'foo': 'bar'}) - eq(e.status, 400) + assert e.status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='multipart upload with bad md5 for chunks') -@attr(assertion='successful') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encryption_sse_c_multipart_invalid_chunks_2(): bucket = get_new_bucket() key = "multipart_enc" @@ -726,15 +666,11 @@ def test_encryption_sse_c_multipart_invalid_chunks_2(): _multipart_upload_enc, bucket, key, objlen, init_headers=init_headers, part_headers=part_headers, metadata={'foo': 'bar'}) - eq(e.status, 400) + assert e.status == 400 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy for a user belonging to a different tenant') -@attr(assertion='succeeds') -@attr('fails_with_subdomain') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.fails_with_subdomain +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_different_tenant(): bucket = get_new_bucket() key = bucket.new_key('asdf') @@ -769,11 +705,8 @@ def test_bucket_policy_different_tenant(): b = new_conn.get_bucket(bucket_name) b.get_all_keys() -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put condition operator end with ifExists') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_set_condition_operator_end_with_IfExists(): bucket = _create_keys(keys=['foo']) policy = '''{ @@ -792,29 +725,25 @@ def test_bucket_policy_set_condition_operator_end_with_IfExists(): } ] }''' % bucket.name - eq(bucket.set_policy(policy), True) + assert bucket.set_policy(policy) == True res = _make_request('GET', bucket.name, bucket.get_key("foo"), request_headers={'referer': 'http://www.example.com/'}) - eq(res.status, 200) + assert res.status == 200 res = _make_request('GET', bucket.name, bucket.get_key("foo"), request_headers={'referer': 'http://www.example.com/index.html'}) - eq(res.status, 200) + assert res.status == 200 res = _make_request('GET', bucket.name, bucket.get_key("foo")) - eq(res.status, 200) + assert res.status == 200 res = _make_request('GET', bucket.name, bucket.get_key("foo"), request_headers={'referer': 'http://example.com'}) - eq(res.status, 403) + assert res.status == 403 def _make_arn_resource(path="*"): return "arn:aws:s3:::{}".format(path) -@attr(resource='object') -@attr(method='put') -@attr(operation='put obj with RequestObjectTag') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_put_obj_request_obj_tag(): bucket = get_new_bucket() diff --git a/s3tests/functional/test_s3_website.py b/s3tests/functional/test_s3_website.py index 3696004..8cc04ed 100644 --- a/s3tests/functional/test_s3_website.py +++ b/s3tests/functional/test_s3_website.py @@ -1,7 +1,6 @@ - import sys import collections -import nose +import pytest import string import random from pprint import pprint @@ -11,14 +10,11 @@ import socket from urllib.parse import urlparse -from nose.tools import eq_ as eq, ok_ as ok -from nose.plugins.attrib import attr -from nose.tools import timed -from nose.plugins.skip import SkipTest - from .. import common from . import ( + configfile, + setup_teardown, get_new_bucket, get_new_bucket_name, s3, @@ -43,38 +39,26 @@ ERRORDOC_TEMPLATE = '

ErrorDoc

{random}' CAN_WEBSITE = None -@attr('fails_on_dbstore') +@pytest.fixture(autouse=True, scope="module") def check_can_test_website(): - global CAN_WEBSITE - # This is a bit expensive, so we cache this - if CAN_WEBSITE is None: - bucket = get_new_bucket() - try: - wsconf = bucket.get_website_configuration() - CAN_WEBSITE = True - except boto.exception.S3ResponseError as e: - if e.status == 404 and e.reason == 'Not Found' and e.error_code in ['NoSuchWebsiteConfiguration', 'NoSuchKey']: - CAN_WEBSITE = True - elif e.status == 405 and e.reason == 'Method Not Allowed' and e.error_code == 'MethodNotAllowed': - # rgw_enable_static_website is false - CAN_WEBSITE = False - elif e.status == 403 and e.reason == 'SignatureDoesNotMatch' and e.error_code == 'Forbidden': - # This is older versions that do not support the website code - CAN_WEBSITE = False - elif e.status == 501 and e.error_code == 'NotImplemented': - CAN_WEBSITE = False - else: - raise RuntimeError("Unknown response in checking if WebsiteConf is supported", e) - finally: - bucket.delete() - - if CAN_WEBSITE is True: + bucket = get_new_bucket() + try: + wsconf = bucket.get_website_configuration() return True - elif CAN_WEBSITE is False: - raise SkipTest - else: - raise RuntimeError("Unknown cached response in checking if WebsiteConf is supported") - + except boto.exception.S3ResponseError as e: + if e.status == 404 and e.reason == 'Not Found' and e.error_code in ['NoSuchWebsiteConfiguration', 'NoSuchKey']: + return True + elif e.status == 405 and e.reason == 'Method Not Allowed' and e.error_code == 'MethodNotAllowed': + pytest.skip('rgw_enable_static_website is false') + elif e.status == 403 and e.reason == 'SignatureDoesNotMatch' and e.error_code == 'Forbidden': + # This is older versions that do not support the website code + pytest.skip('static website is not implemented') + elif e.status == 501 and e.error_code == 'NotImplemented': + pytest.skip('static website is not implemented') + else: + raise RuntimeError("Unknown response in checking if WebsiteConf is supported", e) + finally: + bucket.delete() def make_website_config(xml_fragment): """ @@ -170,7 +154,7 @@ def _test_website_prep(bucket, xml_template, hardcoded_fields = {}, expect_fail= # Cleanup for our validation common.assert_xml_equal(config_xmlcmp, config_xmlnew) #print("config_xmlcmp\n", config_xmlcmp) - #eq (config_xmlnew, config_xmlcmp) + #assert config_xmlnew == config_xmlcmp f['WebsiteConfiguration'] = config_xmlcmp return f @@ -181,9 +165,9 @@ def __website_expected_reponse_status(res, status, reason): reason = set([reason]) if status is not IGNORE_FIELD: - ok(res.status in status, 'HTTP code was %s should be %s' % (res.status, status)) + assert res.status in status, 'HTTP code was %s should be %s' % (res.status, status) if reason is not IGNORE_FIELD: - ok(res.reason in reason, 'HTTP reason was was %s should be %s' % (res.reason, reason)) + assert res.reason in reason, 'HTTP reason was was %s should be %s' % (res.reason, reason) def _website_expected_default_html(**kwargs): fields = [] @@ -213,22 +197,22 @@ def _website_expected_error_response(res, bucket_name, status, reason, code, con errorcode = res.getheader('x-amz-error-code', None) if errorcode is not None: if code is not IGNORE_FIELD: - eq(errorcode, code) + assert errorcode == code if not isinstance(content, collections.Container): content = set([content]) for f in content: if f is not IGNORE_FIELD and f is not None: f = bytes(f, 'utf-8') - ok(f in body, 'HTML should contain "%s"' % (f, )) + assert f in body, 'HTML should contain "%s"' % (f, ) def _website_expected_redirect_response(res, status, reason, new_url): body = res.read() print(body) __website_expected_reponse_status(res, status, reason) loc = res.getheader('Location', None) - eq(loc, new_url, 'Location header should be set "%s" != "%s"' % (loc,new_url,)) - ok(len(body) == 0, 'Body of a redirect should be empty') + assert loc == new_url, 'Location header should be set "%s" != "%s"' % (loc,new_url,) + assert len(body) == 0, 'Body of a redirect should be empty' def _website_request(bucket_name, path, connect_hostname=None, method='GET', timeout=None): url = get_website_url(proto='http', bucket=bucket_name, path=path) @@ -247,27 +231,16 @@ def _website_request(bucket_name, path, connect_hostname=None, method='GET', tim return res # ---------- Non-existant buckets via the website endpoint -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-existant bucket via website endpoint should give NoSuchBucket, exposing security risk') -@attr('s3website') -@attr('fails_on_rgw') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_rgw def test_website_nonexistant_bucket_s3(): bucket_name = get_new_bucket_name() res = _website_request(bucket_name, '') _website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket')) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -#@attr(assertion='non-existant bucket via website endpoint should give Forbidden, keeping bucket identity secure') -@attr(assertion='non-existant bucket via website endpoint should give NoSuchBucket') -@attr('s3website') -@attr('fails_on_s3') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_s3 +@pytest.mark.fails_on_dbstore def test_website_nonexistant_bucket_rgw(): bucket_name = get_new_bucket_name() res = _website_request(bucket_name, '') @@ -275,14 +248,9 @@ def test_website_nonexistant_bucket_rgw(): _website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket')) #------------- IndexDocument only, successes -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty public buckets via s3website return page for /, where page is public') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) -@timed(10) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore +@pytest.mark.timeout(10) def test_website_public_bucket_list_public_index(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -299,18 +267,13 @@ def test_website_public_bucket_list_public_index(): body = res.read() print(body) indexstring = bytes(indexstring, 'utf-8') - eq(body, indexstring) # default content should match index.html set content + assert body == indexstring # default content should match index.html set content __website_expected_reponse_status(res, 200, 'OK') indexhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty private buckets via s3website return page for /, where page is private') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_public_index(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -329,19 +292,14 @@ def test_website_private_bucket_list_public_index(): body = res.read() print(body) indexstring = bytes(indexstring, 'utf-8') - eq(body, indexstring, 'default content should match index.html set content') + assert body == indexstring, 'default content should match index.html set content' indexhtml.delete() bucket.delete() # ---------- IndexDocument only, failures -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty private buckets via s3website return a 403 for /') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_empty(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -352,13 +310,8 @@ def test_website_private_bucket_list_empty(): _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied')) bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty public buckets via s3website return a 404 for /') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_empty(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -368,13 +321,8 @@ def test_website_public_bucket_list_empty(): _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey')) bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty public buckets via s3website return page for /, where page is private') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_private_index(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -394,13 +342,8 @@ def test_website_public_bucket_list_private_index(): indexhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty private buckets via s3website return page for /, where page is private') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_private_index(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -421,13 +364,8 @@ def test_website_private_bucket_list_private_index(): bucket.delete() # ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but missing -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty private buckets via s3website return a 403 for /, missing errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_empty_missingerrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -438,13 +376,8 @@ def test_website_private_bucket_list_empty_missingerrordoc(): bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty public buckets via s3website return a 404 for /, missing errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_empty_missingerrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -454,13 +387,8 @@ def test_website_public_bucket_list_empty_missingerrordoc(): _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey') bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty public buckets via s3website return page for /, where page is private, missing errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_private_index_missingerrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -479,13 +407,8 @@ def test_website_public_bucket_list_private_index_missingerrordoc(): indexhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty private buckets via s3website return page for /, where page is private, missing errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_private_index_missingerrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -505,13 +428,8 @@ def test_website_private_bucket_list_private_index_missingerrordoc(): bucket.delete() # ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but not accessible -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty private buckets via s3website return a 403 for /, blocked errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_empty_blockederrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -529,18 +447,13 @@ def test_website_private_bucket_list_empty_blockederrordoc(): print(body) _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body) errorstring = bytes(errorstring, 'utf-8') - ok(errorstring not in body, 'error content should NOT match error.html set content') + assert errorstring not in body, 'error content should NOT match error.html set content' errorhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='check if there is an invalid payload after serving error doc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_pubilc_errordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -581,18 +494,13 @@ def test_website_public_bucket_list_pubilc_errordoc(): except socket.timeout: print('no invalid payload') - ok(resp_len == 0, 'invalid payload') + assert resp_len == 0, 'invalid payload' errorhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty public buckets via s3website return a 404 for /, blocked errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_empty_blockederrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -609,18 +517,13 @@ def test_website_public_bucket_list_empty_blockederrordoc(): print(body) _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'), body=body) errorstring = bytes(errorstring, 'utf-8') - ok(errorstring not in body, 'error content should match error.html set content') + assert errorstring not in body, 'error content should match error.html set content' errorhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty public buckets via s3website return page for /, where page is private, blocked errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_private_index_blockederrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -642,19 +545,14 @@ def test_website_public_bucket_list_private_index_blockederrordoc(): print(body) _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body) errorstring = bytes(errorstring, 'utf-8') - ok(errorstring not in body, 'error content should match error.html set content') + assert errorstring not in body, 'error content should match error.html set content' indexhtml.delete() errorhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty private buckets via s3website return page for /, where page is private, blocked errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_private_index_blockederrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -676,20 +574,15 @@ def test_website_private_bucket_list_private_index_blockederrordoc(): print(body) _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body) errorstring = bytes(errorstring, 'utf-8') - ok(errorstring not in body, 'error content should match error.html set content') + assert errorstring not in body, 'error content should match error.html set content' indexhtml.delete() errorhtml.delete() bucket.delete() # ---------- IndexDocument & ErrorDocument, failures with errordoc available -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty private buckets via s3website return a 403 for /, good errordoc') -@attr('s3website') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) -@attr('fails_on_dbstore') +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_empty_gooderrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -707,13 +600,8 @@ def test_website_private_bucket_list_empty_gooderrordoc(): errorhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty public buckets via s3website return a 404 for /, good errordoc') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_empty_gooderrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -732,13 +620,8 @@ def test_website_public_bucket_list_empty_gooderrordoc(): errorhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty public buckets via s3website return page for /, where page is private') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_public_bucket_list_private_index_gooderrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -762,13 +645,8 @@ def test_website_public_bucket_list_private_index_gooderrordoc(): errorhtml.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-empty private buckets via s3website return page for /, where page is private') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_private_bucket_list_private_index_gooderrordoc(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc']) @@ -793,13 +671,8 @@ def test_website_private_bucket_list_private_index_gooderrordoc(): bucket.delete() # ------ RedirectAll tests -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='RedirectAllRequestsTo without protocol should TODO') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_bucket_private_redirectall_base(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll']) @@ -811,13 +684,8 @@ def test_website_bucket_private_redirectall_base(): bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='RedirectAllRequestsTo without protocol should TODO') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_bucket_private_redirectall_path(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll']) @@ -831,13 +699,8 @@ def test_website_bucket_private_redirectall_path(): bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='RedirectAllRequestsTo without protocol should TODO') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_website_bucket_private_redirectall_path_upgrade(): bucket = get_new_bucket() x = string.Template(WEBSITE_CONFIGS_XMLFRAG['RedirectAll+Protocol']).safe_substitute(RedirectAllRequestsTo_Protocol='https') @@ -853,14 +716,9 @@ def test_website_bucket_private_redirectall_path_upgrade(): bucket.delete() # ------ x-amz redirect tests -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='x-amz-website-redirect-location should not fire without websiteconf') -@attr('s3website') -@attr('x-amz-website-redirect-location') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.s3website_redirect_location +@pytest.mark.fails_on_dbstore def test_website_xredirect_nonwebsite(): bucket = get_new_bucket() #f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll']) @@ -872,7 +730,7 @@ def test_website_xredirect_nonwebsite(): headers = {'x-amz-website-redirect-location': redirect_dest} k.set_contents_from_string(content, headers=headers, policy='public-read') redirect = k.get_redirect() - eq(k.get_redirect(), redirect_dest) + assert k.get_redirect() == redirect_dest res = _website_request(bucket.name, '/page') body = res.read() @@ -886,14 +744,9 @@ def test_website_xredirect_nonwebsite(): k.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='x-amz-website-redirect-location should fire websiteconf, relative path, public key') -@attr('s3website') -@attr('x-amz-website-redirect-location') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.s3website_redirect_location +@pytest.mark.fails_on_dbstore def test_website_xredirect_public_relative(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -905,7 +758,7 @@ def test_website_xredirect_public_relative(): headers = {'x-amz-website-redirect-location': redirect_dest} k.set_contents_from_string(content, headers=headers, policy='public-read') redirect = k.get_redirect() - eq(k.get_redirect(), redirect_dest) + assert k.get_redirect() == redirect_dest res = _website_request(bucket.name, '/page') #new_url = get_website_url(bucket_name=bucket.name, path=redirect_dest) @@ -914,14 +767,9 @@ def test_website_xredirect_public_relative(): k.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='x-amz-website-redirect-location should fire websiteconf, absolute, public key') -@attr('s3website') -@attr('x-amz-website-redirect-location') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.s3website_redirect_location +@pytest.mark.fails_on_dbstore def test_website_xredirect_public_abs(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -933,7 +781,7 @@ def test_website_xredirect_public_abs(): headers = {'x-amz-website-redirect-location': redirect_dest} k.set_contents_from_string(content, headers=headers, policy='public-read') redirect = k.get_redirect() - eq(k.get_redirect(), redirect_dest) + assert k.get_redirect() == redirect_dest res = _website_request(bucket.name, '/page') new_url = get_website_url(proto='http', hostname='example.com', path='/foo') @@ -942,14 +790,9 @@ def test_website_xredirect_public_abs(): k.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='x-amz-website-redirect-location should fire websiteconf, relative path, private key') -@attr('s3website') -@attr('x-amz-website-redirect-location') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.s3website_redirect_location +@pytest.mark.fails_on_dbstore def test_website_xredirect_private_relative(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -961,7 +804,7 @@ def test_website_xredirect_private_relative(): headers = {'x-amz-website-redirect-location': redirect_dest} k.set_contents_from_string(content, headers=headers, policy='private') redirect = k.get_redirect() - eq(k.get_redirect(), redirect_dest) + assert k.get_redirect() == redirect_dest res = _website_request(bucket.name, '/page') # We get a 403 because the page is private @@ -970,14 +813,9 @@ def test_website_xredirect_private_relative(): k.delete() bucket.delete() -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='x-amz-website-redirect-location should fire websiteconf, absolute, private key') -@attr('s3website') -@attr('x-amz-website-redirect-location') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website +@pytest.mark.s3website_redirect_location +@pytest.mark.fails_on_dbstore def test_website_xredirect_private_abs(): bucket = get_new_bucket() f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc']) @@ -989,7 +827,7 @@ def test_website_xredirect_private_abs(): headers = {'x-amz-website-redirect-location': redirect_dest} k.set_contents_from_string(content, headers=headers, policy='private') redirect = k.get_redirect() - eq(k.get_redirect(), redirect_dest) + assert k.get_redirect() == redirect_dest res = _website_request(bucket.name, '/page') new_url = get_website_url(proto='http', hostname='example.com', path='/foo') @@ -1202,8 +1040,6 @@ def routing_teardown(**kwargs): print('Deleting', str(o)) o.delete() -@common.with_setup_kwargs(setup=routing_setup, teardown=routing_teardown) -#@timed(10) def routing_check(*args, **kwargs): bucket = kwargs['bucket'] args=args[0] @@ -1229,8 +1065,8 @@ def routing_check(*args, **kwargs): if args['code'] >= 200 and args['code'] < 300: #body = res.read() #print(body) - #eq(body, args['content'], 'default content should match index.html set content') - ok(int(res.getheader('Content-Length', -1)) > 0) + #assert body == args['content'], 'default content should match index.html set content' + assert int(res.getheader('Content-Length', -1)) > 0 elif args['code'] >= 300 and args['code'] < 400: _website_expected_redirect_response(res, args['code'], IGNORE_FIELD, new_url) elif args['code'] >= 400: @@ -1238,10 +1074,9 @@ def routing_check(*args, **kwargs): else: assert(False) -@attr('s3website_RoutingRules') -@attr('s3website') -@attr('fails_on_dbstore') -@nose.with_setup(setup=check_can_test_website, teardown=common.teardown) +@pytest.mark.s3website_routing_rules +@pytest.mark.s3website +@pytest.mark.fails_on_dbstore def test_routing_generator(): for t in ROUTING_RULES_TESTS: if 'xml' in t and 'RoutingRules' in t['xml'] and len(t['xml']['RoutingRules']) > 0: diff --git a/s3tests/functional/test_utils.py b/s3tests/functional/test_utils.py index 59c3c74..c0dd398 100644 --- a/s3tests/functional/test_utils.py +++ b/s3tests/functional/test_utils.py @@ -1,11 +1,9 @@ -from nose.tools import eq_ as eq - from . import utils def test_generate(): FIVE_MB = 5 * 1024 * 1024 - eq(len(''.join(utils.generate_random(0))), 0) - eq(len(''.join(utils.generate_random(1))), 1) - eq(len(''.join(utils.generate_random(FIVE_MB - 1))), FIVE_MB - 1) - eq(len(''.join(utils.generate_random(FIVE_MB))), FIVE_MB) - eq(len(''.join(utils.generate_random(FIVE_MB + 1))), FIVE_MB + 1) + assert len(''.join(utils.generate_random(0))) == 0 + assert len(''.join(utils.generate_random(1))) == 1 + assert len(''.join(utils.generate_random(FIVE_MB - 1))) == FIVE_MB - 1 + assert len(''.join(utils.generate_random(FIVE_MB))) == FIVE_MB + assert len(''.join(utils.generate_random(FIVE_MB + 1))) == FIVE_MB + 1 diff --git a/s3tests/functional/utils.py b/s3tests/functional/utils.py index 85bcaf7..3083415 100644 --- a/s3tests/functional/utils.py +++ b/s3tests/functional/utils.py @@ -3,8 +3,6 @@ import requests import string import time -from nose.tools import eq_ as eq - def assert_raises(excClass, callableObj, *args, **kwargs): """ Like unittest.TestCase.assertRaises, but returns the exception. @@ -48,7 +46,7 @@ def region_sync_meta(targets, region): conf = r.conf if conf.sync_agent_addr: ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port)) - eq(ret.status_code, 200) + assert ret.status_code == 200 if conf.sync_meta_wait: time.sleep(conf.sync_meta_wait) diff --git a/s3tests_boto3/functional/__init__.py b/s3tests_boto3/functional/__init__.py index 4d03d65..22b136c 100644 --- a/s3tests_boto3/functional/__init__.py +++ b/s3tests_boto3/functional/__init__.py @@ -1,3 +1,4 @@ +import pytest import boto3 from botocore import UNSIGNED from botocore.client import Config @@ -308,6 +309,16 @@ def teardown(): except: pass +@pytest.fixture(scope="package") +def configfile(): + setup() + return config + +@pytest.fixture(autouse=True) +def setup_teardown(configfile): + yield + teardown() + def check_webidentity(): cfg = configparser.RawConfigParser() try: diff --git a/s3tests_boto3/functional/test_headers.py b/s3tests_boto3/functional/test_headers.py index 04b2757..66cabe5 100644 --- a/s3tests_boto3/functional/test_headers.py +++ b/s3tests_boto3/functional/test_headers.py @@ -1,7 +1,5 @@ import boto3 -from nose.tools import eq_ as eq -from nose.plugins.attrib import attr -import nose +import pytest from botocore.exceptions import ClientError from email.utils import formatdate @@ -10,6 +8,8 @@ from .utils import _get_status_and_error_code from .utils import _get_status from . import ( + configfile, + setup_teardown, get_client, get_v2_client, get_new_bucket, @@ -149,156 +149,97 @@ def _remove_header_create_bad_bucket(remove, client=None): return e -def tag(*tags): - def wrap(func): - for tag in tags: - setattr(func, tag, True) - return func - return wrap - # # common tests # -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid MD5') -@attr(assertion='fails 400') +@pytest.mark.auth_common def test_object_create_bad_md5_invalid_short(): e = _add_header_create_bad_object({'Content-MD5':'YWJyYWNhZGFicmE='}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidDigest') + assert status == 400 + assert error_code == 'InvalidDigest' -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/mismatched MD5') -@attr(assertion='fails 400') +@pytest.mark.auth_common def test_object_create_bad_md5_bad(): e = _add_header_create_bad_object({'Content-MD5':'rL0Y20xC+Fzt72VPzMSk2A=='}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'BadDigest') + assert status == 400 + assert error_code == 'BadDigest' -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty MD5') -@attr(assertion='fails 400') +@pytest.mark.auth_common def test_object_create_bad_md5_empty(): e = _add_header_create_bad_object({'Content-MD5':''}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidDigest') + assert status == 400 + assert error_code == 'InvalidDigest' -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no MD5 header') -@attr(assertion='succeeds') +@pytest.mark.auth_common def test_object_create_bad_md5_none(): bucket_name, key_name = _remove_header_create_object('Content-MD5') client = get_client() client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/Expect 200') -@attr(assertion='garbage, but S3 succeeds!') +@pytest.mark.auth_common def test_object_create_bad_expect_mismatch(): bucket_name, key_name = _add_header_create_object({'Expect': 200}) client = get_client() client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty expect') -@attr(assertion='succeeds ... should it?') +@pytest.mark.auth_common def test_object_create_bad_expect_empty(): bucket_name, key_name = _add_header_create_object({'Expect': ''}) client = get_client() client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no expect') -@attr(assertion='succeeds') +@pytest.mark.auth_common def test_object_create_bad_expect_none(): bucket_name, key_name = _remove_header_create_object('Expect') client = get_client() client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty content length') -@attr(assertion='fails 400') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_contentlength_empty(): e = _add_header_create_bad_object({'Content-Length':''}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/negative content length') -@attr(assertion='fails 400') -@attr('fails_on_mod_proxy_fcgi') +@pytest.mark.auth_common +@pytest.mark.fails_on_mod_proxy_fcgi def test_object_create_bad_contentlength_negative(): client = get_client() bucket_name = get_new_bucket() key_name = 'foo' e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, ContentLength=-1) status = _get_status(e.response) - eq(status, 400) + assert status == 400 -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no content length') -@attr(assertion='fails 411') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_contentlength_none(): remove = 'Content-Length' e = _remove_header_create_bad_object('Content-Length') status, error_code = _get_status_and_error_code(e.response) - eq(status, 411) - eq(error_code, 'MissingContentLength') + assert status == 411 + assert error_code == 'MissingContentLength' -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/content type text/plain') -@attr(assertion='succeeds') +@pytest.mark.auth_common def test_object_create_bad_contenttype_invalid(): bucket_name, key_name = _add_header_create_object({'Content-Type': 'text/plain'}) client = get_client() client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty content type') -@attr(assertion='succeeds') +@pytest.mark.auth_common def test_object_create_bad_contenttype_empty(): client = get_client() key_name = 'foo' bucket_name = get_new_bucket() client.put_object(Bucket=bucket_name, Key=key_name, Body='bar', ContentType='') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no content type') -@attr(assertion='succeeds') +@pytest.mark.auth_common def test_object_create_bad_contenttype_none(): bucket_name = get_new_bucket() key_name = 'foo' @@ -307,38 +248,26 @@ def test_object_create_bad_contenttype_none(): client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty authorization') -@attr(assertion='fails 403') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_authorization_empty(): e = _add_header_create_bad_object({'Authorization': ''}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date and x-amz-date') -@attr(assertion='succeeds') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_date_and_amz_date(): date = formatdate(usegmt=True) bucket_name, key_name = _add_header_create_object({'Date': date, 'X-Amz-Date': date}) client = get_client() client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/x-amz-date and no date') -@attr(assertion='succeeds') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_amz_date_and_no_date(): date = formatdate(usegmt=True) bucket_name, key_name = _add_header_create_object({'Date': '', 'X-Amz-Date': date}) @@ -346,36 +275,24 @@ def test_object_create_amz_date_and_no_date(): client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') # the teardown is really messed up here. check it out -@tag('auth_common') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no authorization') -@attr(assertion='fails 403') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_authorization_none(): e = _remove_header_create_bad_object('Authorization') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no content length') -@attr(assertion='succeeds') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_create_contentlength_none(): remove = 'Content-Length' _remove_header_create_bucket(remove) -@tag('auth_common') -@attr(resource='bucket') -@attr(method='acls') -@attr(operation='set w/no content length') -@attr(assertion='succeeds') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_acl_create_contentlength_none(): bucket_name = get_new_bucket() client = get_client() @@ -389,11 +306,7 @@ def test_object_acl_create_contentlength_none(): client.meta.events.register('before-call.s3.PutObjectAcl', remove_header) client.put_object_acl(Bucket=bucket_name, Key='foo', ACL='public-read') -@tag('auth_common') -@attr(resource='bucket') -@attr(method='acls') -@attr(operation='set w/invalid permission') -@attr(assertion='fails 400') +@pytest.mark.auth_common def test_bucket_put_bad_canned_acl(): bucket_name = get_new_bucket() client = get_client() @@ -404,13 +317,9 @@ def test_bucket_put_bad_canned_acl(): e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, ACL='public-read') status = _get_status(e.response) - eq(status, 400) + assert status == 400 -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/expect 200') -@attr(assertion='garbage, but S3 succeeds!') +@pytest.mark.auth_common def test_bucket_create_bad_expect_mismatch(): bucket_name = get_new_bucket_name() client = get_client() @@ -420,99 +329,67 @@ def test_bucket_create_bad_expect_mismatch(): client.meta.events.register('before-call.s3.CreateBucket', add_headers) client.create_bucket(Bucket=bucket_name) -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/expect empty') -@attr(assertion='garbage, but S3 succeeds!') +@pytest.mark.auth_common def test_bucket_create_bad_expect_empty(): headers = {'Expect': ''} _add_header_create_bucket(headers) -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty content length') -@attr(assertion='fails 400') +@pytest.mark.auth_common # TODO: The request isn't even making it to the RGW past the frontend # This test had 'fails_on_rgw' before the move to boto3 -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_create_bad_contentlength_empty(): headers = {'Content-Length': ''} e = _add_header_create_bad_bucket(headers) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/negative content length') -@attr(assertion='fails 400') -@attr('fails_on_mod_proxy_fcgi') +@pytest.mark.auth_common +@pytest.mark.fails_on_mod_proxy_fcgi def test_bucket_create_bad_contentlength_negative(): headers = {'Content-Length': '-1'} e = _add_header_create_bad_bucket(headers) status = _get_status(e.response) - eq(status, 400) + assert status == 400 -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no content length') -@attr(assertion='succeeds') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_create_bad_contentlength_none(): remove = 'Content-Length' _remove_header_create_bucket(remove) -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty authorization') -@attr(assertion='fails 403') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_create_bad_authorization_empty(): headers = {'Authorization': ''} e = _add_header_create_bad_bucket(headers) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_common') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no authorization') -@attr(assertion='fails 403') +@pytest.mark.auth_common # TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_create_bad_authorization_none(): e = _remove_header_create_bad_bucket('Authorization') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid MD5') -@attr(assertion='fails 400') +@pytest.mark.auth_aws2 def test_object_create_bad_md5_invalid_garbage_aws2(): v2_client = get_v2_client() headers = {'Content-MD5': 'AWS HAHAHA'} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidDigest') + assert status == 400 + assert error_code == 'InvalidDigest' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/content length too short') -@attr(assertion='fails 400') +@pytest.mark.auth_aws2 # TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the Content-Length header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_contentlength_mismatch_below_aws2(): v2_client = get_v2_client() content = 'bar' @@ -520,252 +397,176 @@ def test_object_create_bad_contentlength_mismatch_below_aws2(): headers = {'Content-Length': str(length)} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'BadDigest') + assert status == 400 + assert error_code == 'BadDigest' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/incorrect authorization') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 # TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_authorization_incorrect_aws2(): v2_client = get_v2_client() headers = {'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'InvalidDigest') + assert status == 403 + assert error_code == 'InvalidDigest' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid authorization') -@attr(assertion='fails 400') +@pytest.mark.auth_aws2 # TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_authorization_invalid_aws2(): v2_client = get_v2_client() headers = {'Authorization': 'AWS HAHAHA'} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty user agent') -@attr(assertion='succeeds') +@pytest.mark.auth_aws2 def test_object_create_bad_ua_empty_aws2(): v2_client = get_v2_client() headers = {'User-Agent': ''} bucket_name, key_name = _add_header_create_object(headers, v2_client) v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no user agent') -@attr(assertion='succeeds') +@pytest.mark.auth_aws2 def test_object_create_bad_ua_none_aws2(): v2_client = get_v2_client() remove = 'User-Agent' bucket_name, key_name = _remove_header_create_object(remove, v2_client) v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar') -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/invalid date') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_object_create_bad_date_invalid_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Bad Date'} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/empty date') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_object_create_bad_date_empty_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': ''} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/no date') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 # TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_create_bad_date_none_aws2(): v2_client = get_v2_client() remove = 'x-amz-date' e = _remove_header_create_bad_object(remove, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date in past') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_object_create_bad_date_before_today_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'RequestTimeTooSkewed') + assert status == 403 + assert error_code == 'RequestTimeTooSkewed' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date before epoch') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_object_create_bad_date_before_epoch_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='object') -@attr(method='put') -@attr(operation='create w/date after 9999') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_object_create_bad_date_after_end_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Tue, 07 Jul 9999 21:53:04 GMT'} e = _add_header_create_bad_object(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'RequestTimeTooSkewed') + assert status == 403 + assert error_code == 'RequestTimeTooSkewed' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/invalid authorization') -@attr(assertion='fails 400') +@pytest.mark.auth_aws2 # TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_create_bad_authorization_invalid_aws2(): v2_client = get_v2_client() headers = {'Authorization': 'AWS HAHAHA'} e = _add_header_create_bad_bucket(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty user agent') -@attr(assertion='succeeds') +@pytest.mark.auth_aws2 def test_bucket_create_bad_ua_empty_aws2(): v2_client = get_v2_client() headers = {'User-Agent': ''} _add_header_create_bucket(headers, v2_client) -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no user agent') -@attr(assertion='succeeds') +@pytest.mark.auth_aws2 def test_bucket_create_bad_ua_none_aws2(): v2_client = get_v2_client() remove = 'User-Agent' _remove_header_create_bucket(remove, v2_client) -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/invalid date') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_bucket_create_bad_date_invalid_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Bad Date'} e = _add_header_create_bad_bucket(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/empty date') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_bucket_create_bad_date_empty_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': ''} e = _add_header_create_bad_bucket(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/no date') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 # TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_create_bad_date_none_aws2(): v2_client = get_v2_client() remove = 'x-amz-date' e = _remove_header_create_bad_bucket(remove, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/date in past') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_bucket_create_bad_date_before_today_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'} e = _add_header_create_bad_bucket(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'RequestTimeTooSkewed') + assert status == 403 + assert error_code == 'RequestTimeTooSkewed' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/date in future') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_bucket_create_bad_date_after_today_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Tue, 07 Jul 2030 21:53:04 GMT'} e = _add_header_create_bad_bucket(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'RequestTimeTooSkewed') + assert status == 403 + assert error_code == 'RequestTimeTooSkewed' -@tag('auth_aws2') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/date before epoch') -@attr(assertion='fails 403') +@pytest.mark.auth_aws2 def test_bucket_create_bad_date_before_epoch_aws2(): v2_client = get_v2_client() headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'} e = _add_header_create_bad_bucket(headers, v2_client) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' diff --git a/s3tests_boto3/functional/test_iam.py b/s3tests_boto3/functional/test_iam.py index bf7236f..0995f97 100644 --- a/s3tests_boto3/functional/test_iam.py +++ b/s3tests_boto3/functional/test_iam.py @@ -1,12 +1,13 @@ import json from botocore.exceptions import ClientError -from nose.plugins.attrib import attr -from nose.tools import eq_ as eq +import pytest from s3tests_boto3.functional.utils import assert_raises from s3tests_boto3.functional.test_s3 import _multipart_upload from . import ( + configfile, + setup_teardown, get_alt_client, get_iam_client, get_new_bucket, @@ -17,12 +18,8 @@ from . import ( from .utils import _get_status, _get_status_and_error_code -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify Put User Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_put_user_policy(): client = get_iam_client() @@ -35,18 +32,14 @@ def test_put_user_policy(): ) response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify Put User Policy with invalid user') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_put_user_policy_invalid_user(): client = get_iam_client() @@ -60,15 +53,11 @@ def test_put_user_policy_invalid_user(): e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName="some-non-existing-user-id") status = _get_status(e.response) - eq(status, 404) + assert status == 404 -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify Put User Policy using parameter value outside limit') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_put_user_policy_parameter_limit(): client = get_iam_client() @@ -83,16 +72,12 @@ def test_put_user_policy_parameter_limit(): e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document, PolicyName='AllAccessPolicy' * 10, UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify Put User Policy using invalid policy document elements') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_rgw') +@pytest.mark.user_policy +@pytest.mark.test_of_iam +@pytest.mark.fails_on_rgw def test_put_user_policy_invalid_element(): client = get_iam_client() @@ -108,7 +93,7 @@ def test_put_user_policy_invalid_element(): e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 400) + assert status == 400 # With no Statement policy_document = json.dumps( @@ -119,7 +104,7 @@ def test_put_user_policy_invalid_element(): e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 400) + assert status == 400 # with same Sid for 2 statements policy_document = json.dumps( @@ -138,7 +123,7 @@ def test_put_user_policy_invalid_element(): e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 400) + assert status == 400 # with Principal policy_document = json.dumps( @@ -153,15 +138,11 @@ def test_put_user_policy_invalid_element(): e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify Put a policy that already exists') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_put_existing_user_policy(): client = get_iam_client() @@ -175,18 +156,14 @@ def test_put_existing_user_policy(): ) response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify List User policies') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_list_user_policy(): client = get_iam_client() @@ -200,31 +177,23 @@ def test_list_user_policy(): ) response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.list_user_policies(UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify List User policies with invalid user') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_list_user_policy_invalid_user(): client = get_iam_client() e = assert_raises(ClientError, client.list_user_policies, UserName="some-non-existing-user-id") status = _get_status(e.response) - eq(status, 404) + assert status == 404 -@attr(resource='user-policy') -@attr(method='get') -@attr(operation='Verify Get User policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_get_user_policy(): client = get_iam_client() @@ -237,21 +206,17 @@ def test_get_user_policy(): ) response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='get') -@attr(operation='Verify Get User Policy with invalid user') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_get_user_policy_invalid_user(): client = get_iam_client() @@ -264,21 +229,17 @@ def test_get_user_policy_invalid_user(): ) response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy', UserName="some-non-existing-user-id") status = _get_status(e.response) - eq(status, 404) + assert status == 404 client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) -@attr(resource='user-policy') -@attr(method='get') -@attr(operation='Verify Get User Policy with invalid policy name') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_rgw') +@pytest.mark.user_policy +@pytest.mark.test_of_iam +@pytest.mark.fails_on_rgw def test_get_user_policy_invalid_policy_name(): client = get_iam_client() @@ -294,17 +255,13 @@ def test_get_user_policy_invalid_policy_name(): e = assert_raises(ClientError, client.get_user_policy, PolicyName='non-existing-policy-name', UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 404) + assert status == 404 client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) -@attr(resource='user-policy') -@attr(method='get') -@attr(operation='Verify Get Deleted User Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_rgw') +@pytest.mark.user_policy +@pytest.mark.test_of_iam +@pytest.mark.fails_on_rgw def test_get_deleted_user_policy(): client = get_iam_client() @@ -321,15 +278,11 @@ def test_get_deleted_user_policy(): e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy', UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 404) + assert status == 404 -@attr(resource='user-policy') -@attr(method='get') -@attr(operation='Verify Get a policy from multiple policies for a user') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_get_user_policy_from_multiple_policies(): client = get_iam_client() @@ -344,29 +297,25 @@ def test_get_user_policy_from_multiple_policies(): response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy1', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy2', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_user_policy(PolicyName='AllowAccessPolicy2', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllowAccessPolicy1', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllowAccessPolicy2', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='delete') -@attr(operation='Verify Delete User Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_delete_user_policy(): client = get_iam_client() @@ -381,18 +330,14 @@ def test_delete_user_policy(): response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='delete') -@attr(operation='Verify Delete User Policy with invalid user') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_delete_user_policy_invalid_user(): client = get_iam_client() @@ -407,22 +352,18 @@ def test_delete_user_policy_invalid_user(): response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, client.delete_user_policy, PolicyName='AllAccessPolicy', UserName="some-non-existing-user-id") status = _get_status(e.response) - eq(status, 404) + assert status == 404 response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='delete') -@attr(operation='Verify Delete User Policy with invalid policy name') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_delete_user_policy_invalid_policy_name(): client = get_iam_client() @@ -437,22 +378,18 @@ def test_delete_user_policy_invalid_policy_name(): response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, client.delete_user_policy, PolicyName='non-existing-policy-name', UserName=get_alt_user_id()) status = _get_status(e.response) - eq(status, 404) + assert status == 404 response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='delete') -@attr(operation='Verify Delete multiple User policies for a user') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_delete_user_policy_from_multiple_policies(): client = get_iam_client() @@ -467,36 +404,32 @@ def test_delete_user_policy_from_multiple_policies(): response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy1', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy2', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy3', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllowAccessPolicy1', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllowAccessPolicy2', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_user_policy(PolicyName='AllowAccessPolicy3', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='AllowAccessPolicy3', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Allow Bucket Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_allow_bucket_actions_in_user_policy(): client = get_iam_client() s3_client_alt = get_alt_client() @@ -515,7 +448,7 @@ def test_allow_bucket_actions_in_user_policy(): response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = s3_client_alt.list_objects(Bucket=bucket) object_found = False @@ -527,10 +460,10 @@ def test_allow_bucket_actions_in_user_policy(): raise AssertionError("Object is not listed") response = s3_client_iam.delete_object(Bucket=bucket, Key='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = s3_client_alt.delete_bucket(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = s3_client_iam.list_buckets() for bucket in response['Buckets']: @@ -539,16 +472,11 @@ def test_allow_bucket_actions_in_user_policy(): response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Deny Bucket Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_dbstore') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_deny_bucket_actions_in_user_policy(): client = get_iam_client() s3_client = get_alt_client() @@ -565,29 +493,25 @@ def test_deny_bucket_actions_in_user_policy(): response = client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, s3_client.list_buckets, Bucket=bucket) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' e = assert_raises(ClientError, s3_client.delete_bucket, Bucket=bucket) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = client.delete_user_policy(PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = s3_client.delete_bucket(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Allow Object Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_allow_object_actions_in_user_policy(): client = get_iam_client() s3_client_alt = get_alt_client() @@ -603,35 +527,30 @@ def test_allow_object_actions_in_user_policy(): ) response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client_alt.put_object(Bucket=bucket, Key='foo', Body='bar') response = s3_client_alt.get_object(Bucket=bucket, Key='foo') body = response['Body'].read() if type(body) is bytes: body = body.decode() - eq(body, "bar") + assert body == "bar" response = s3_client_alt.delete_object(Bucket=bucket, Key='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 e = assert_raises(ClientError, s3_client_iam.get_object, Bucket=bucket, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' response = s3_client_iam.delete_bucket(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Deny Object Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_dbstore') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_deny_object_actions_in_user_policy(): client = get_iam_client() s3_client_alt = get_alt_client() @@ -653,28 +572,24 @@ def test_deny_object_actions_in_user_policy(): e = assert_raises(ClientError, s3_client_alt.put_object, Bucket=bucket, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' e = assert_raises(ClientError, s3_client_alt.get_object, Bucket=bucket, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' e = assert_raises(ClientError, s3_client_alt.delete_object, Bucket=bucket, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = client.delete_user_policy(PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Allow Multipart Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_allow_multipart_actions_in_user_policy(): client = get_iam_client() s3_client_alt = get_alt_client() @@ -690,31 +605,26 @@ def test_allow_multipart_actions_in_user_policy(): ) response = client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 key = "mymultipart" mb = 1024 * 1024 (upload_id, _, _) = _multipart_upload(client=s3_client_iam, bucket_name=bucket, key=key, size=5 * mb) response = s3_client_alt.list_multipart_uploads(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = s3_client_alt.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = s3_client_iam.delete_bucket(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Deny Multipart Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_dbstore') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_deny_multipart_actions_in_user_policy(): client = get_iam_client() s3_client = get_alt_client() @@ -730,7 +640,7 @@ def test_deny_multipart_actions_in_user_policy(): response = client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 key = "mymultipart" mb = 1024 * 1024 @@ -739,29 +649,24 @@ def test_deny_multipart_actions_in_user_policy(): e = assert_raises(ClientError, s3_client.list_multipart_uploads, Bucket=bucket) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' e = assert_raises(ClientError, s3_client.abort_multipart_upload, Bucket=bucket, Key=key, UploadId=upload_id) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = s3_client.delete_bucket(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = client.delete_user_policy(PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Allow Tagging Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_dbstore') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_allow_tagging_actions_in_user_policy(): client = get_iam_client() s3_client_alt = get_alt_client() @@ -781,37 +686,32 @@ def test_allow_tagging_actions_in_user_policy(): tags = {'TagSet': [{'Key': 'Hello', 'Value': 'World'}, ]} response = s3_client_alt.put_bucket_tagging(Bucket=bucket, Tagging=tags) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = s3_client_alt.get_bucket_tagging(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(response['TagSet'][0]['Key'], 'Hello') - eq(response['TagSet'][0]['Value'], 'World') + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert response['TagSet'][0]['Key'] == 'Hello' + assert response['TagSet'][0]['Value'] == 'World' obj_key = 'obj' response = s3_client_iam.put_object(Bucket=bucket, Key=obj_key, Body='obj_body') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = s3_client_alt.put_object_tagging(Bucket=bucket, Key=obj_key, Tagging=tags) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = s3_client_alt.get_object_tagging(Bucket=bucket, Key=obj_key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(response['TagSet'], tags['TagSet']) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert response['TagSet'] == tags['TagSet'] response = s3_client_iam.delete_object(Bucket=bucket, Key=obj_key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = s3_client_iam.delete_bucket(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='s3 Actions') -@attr(operation='Verify Deny Tagging Actions in user Policy') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_dbstore') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_deny_tagging_actions_in_user_policy(): client = get_iam_client() s3_client = get_alt_client() @@ -831,43 +731,38 @@ def test_deny_tagging_actions_in_user_policy(): e = assert_raises(ClientError, s3_client.put_bucket_tagging, Bucket=bucket, Tagging=tags) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' e = assert_raises(ClientError, s3_client.get_bucket_tagging, Bucket=bucket) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' obj_key = 'obj' response = s3_client.put_object(Bucket=bucket, Key=obj_key, Body='obj_body') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, s3_client.put_object_tagging, Bucket=bucket, Key=obj_key, Tagging=tags) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' e = assert_raises(ClientError, s3_client.delete_object_tagging, Bucket=bucket, Key=obj_key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = s3_client.delete_object(Bucket=bucket, Key=obj_key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = s3_client.delete_bucket(Bucket=bucket) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = client.delete_user_policy(PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify conflicting user policy statements') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_dbstore') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_verify_conflicting_user_policy_statements(): s3client = get_alt_client() bucket = get_new_bucket(client=s3client) @@ -887,23 +782,18 @@ def test_verify_conflicting_user_policy_statements(): client = get_iam_client() response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = client.delete_user_policy(PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(method='put') -@attr(operation='Verify conflicting user policies') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') -@attr('fails_on_dbstore') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_verify_conflicting_user_policies(): s3client = get_alt_client() bucket = get_new_bucket(client=s3client) @@ -924,27 +814,24 @@ def test_verify_conflicting_user_policies(): client = get_iam_client() response = client.put_user_policy(PolicyDocument=policy_allow, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.put_user_policy(PolicyDocument=policy_deny, PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = client.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.delete_user_policy(PolicyName='DenyAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='user-policy') -@attr(operation='Verify Allow Actions for IAM user policies') -@attr(assertion='succeeds') -@attr('user-policy') -@attr('test_of_iam') +@pytest.mark.user_policy +@pytest.mark.test_of_iam def test_verify_allow_iam_actions(): policy1 = json.dumps( {"Version": "2012-10-17", @@ -959,12 +846,12 @@ def test_verify_allow_iam_actions(): response = client1.put_user_policy(PolicyDocument=policy1, PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = iam_client_alt.get_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = iam_client_alt.list_user_policies(UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = iam_client_alt.delete_user_policy(PolicyName='AllowAccessPolicy', UserName=get_alt_user_id()) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py index 20ae4f1..5b77e2f 100644 --- a/s3tests_boto3/functional/test_s3.py +++ b/s3tests_boto3/functional/test_s3.py @@ -2,9 +2,6 @@ import boto3 import botocore.session from botocore.exceptions import ClientError from botocore.exceptions import ParamValidationError -from nose.tools import eq_ as eq -from nose.plugins.attrib import attr -from nose.plugins.skip import SkipTest import isodate import email.utils import datetime @@ -20,7 +17,7 @@ import hashlib import xml.etree.ElementTree as ET import time import operator -import nose +import pytest import os import string import random @@ -41,6 +38,8 @@ from .utils import _get_status from .policy import Policy, Statement, make_json_policy from . import ( + configfile, + setup_teardown, get_client, get_prefix, get_unauthenticated_client, @@ -93,26 +92,18 @@ def _bucket_is_empty(bucket): break return is_empty -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty buckets return no contents') def test_bucket_list_empty(): bucket = get_new_bucket_resource() is_empty = _bucket_is_empty(bucket) - eq(is_empty, True) + assert is_empty == True -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='distinct buckets have different contents') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_list_distinct(): bucket1 = get_new_bucket_resource() bucket2 = get_new_bucket_resource() obj = bucket1.put_object(Body='str', Key='asdf') is_empty = _bucket_is_empty(bucket2) - eq(is_empty, True) + assert is_empty == True def _create_objects(bucket=None, bucket_name=None, keys=[]): """ @@ -149,54 +140,42 @@ def _get_prefixes(response): prefixes = [prefix['Prefix'] for prefix in prefix_list] return prefixes -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='pagination w/max_keys=2, no marker') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_list_many(): bucket_name = _create_objects(keys=['foo', 'bar', 'baz']) client = get_client() response = client.list_objects(Bucket=bucket_name, MaxKeys=2) keys = _get_keys(response) - eq(len(keys), 2) - eq(keys, ['bar', 'baz']) - eq(response['IsTruncated'], True) + assert len(keys) == 2 + assert keys == ['bar', 'baz'] + assert response['IsTruncated'] == True response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2) keys = _get_keys(response) - eq(len(keys), 1) - eq(response['IsTruncated'], False) - eq(keys, ['foo']) + assert len(keys) == 1 + assert response['IsTruncated'] == False + assert keys == ['foo'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='pagination w/max_keys=2, no marker') -@attr('list-objects-v2') -@attr('fails_on_dbstore') +@pytest.mark.list_objects_v2 +@pytest.mark.fails_on_dbstore def test_bucket_listv2_many(): bucket_name = _create_objects(keys=['foo', 'bar', 'baz']) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2) keys = _get_keys(response) - eq(len(keys), 2) - eq(keys, ['bar', 'baz']) - eq(response['IsTruncated'], True) + assert len(keys) == 2 + assert keys == ['bar', 'baz'] + assert response['IsTruncated'] == True response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2) keys = _get_keys(response) - eq(len(keys), 1) - eq(response['IsTruncated'], False) - eq(keys, ['foo']) + assert len(keys) == 1 + assert response['IsTruncated'] == False + assert keys == ['foo'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='keycount in listobjectsv2') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_basic_key_count(): client = get_client() bucket_names = [] @@ -205,80 +184,63 @@ def test_basic_key_count(): for j in range(5): client.put_object(Bucket=bucket_name, Key=str(j)) response1 = client.list_objects_v2(Bucket=bucket_name) - eq(response1['KeyCount'], 5) + assert response1['KeyCount'] == 5 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefixes in multi-component object names') def test_bucket_list_delimiter_basic(): bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf']) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='/') - eq(response['Delimiter'], '/') + assert response['Delimiter'] == '/' keys = _get_keys(response) - eq(keys, ['asdf']) + assert keys == ['asdf'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) - eq(prefixes, ['foo/', 'quux/']) + assert len(prefixes) == 2 + assert prefixes == ['foo/', 'quux/'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefixes in multi-component object names') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_basic(): bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf']) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/') - eq(response['Delimiter'], '/') + assert response['Delimiter'] == '/' keys = _get_keys(response) - eq(keys, ['asdf']) + assert keys == ['asdf'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) - eq(prefixes, ['foo/', 'quux/']) - eq(response['KeyCount'], len(prefixes) + len(keys)) + assert len(prefixes) == 2 + assert prefixes == ['foo/', 'quux/'] + assert response['KeyCount'] == len(prefixes) + len(keys) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='test url encoding') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_encoding_basic(): bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b']) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', EncodingType='url') - eq(response['Delimiter'], '/') + assert response['Delimiter'] == '/' keys = _get_keys(response) - eq(keys, ['asdf%2Bb']) + assert keys == ['asdf%2Bb'] prefixes = _get_prefixes(response) - eq(len(prefixes), 3) - eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/']) + assert len(prefixes) == 3 + assert prefixes == ['foo%2B1/', 'foo/', 'quux%20ab/'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='test url encoding') -@attr('list-objects') def test_bucket_list_encoding_basic(): bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b']) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='/', EncodingType='url') - eq(response['Delimiter'], '/') + assert response['Delimiter'] == '/' keys = _get_keys(response) - eq(keys, ['asdf%2Bb']) + assert keys == ['asdf%2Bb'] prefixes = _get_prefixes(response) - eq(len(prefixes), 3) - eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/']) + assert len(prefixes) == 3 + assert prefixes == ['foo%2B1/', 'foo/', 'quux%20ab/'] def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys, @@ -286,18 +248,18 @@ def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys, client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix) - eq(response['IsTruncated'], is_truncated) + assert response['IsTruncated'] == is_truncated if 'NextMarker' not in response: response['NextMarker'] = None - eq(response['NextMarker'], next_marker) + assert response['NextMarker'] == next_marker keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(len(keys), len(check_objs)) - eq(len(prefixes), len(check_prefixes)) - eq(keys, check_objs) - eq(prefixes, check_prefixes) + assert len(keys) == len(check_objs) + assert len(prefixes) == len(check_prefixes) + assert keys == check_objs + assert prefixes == check_prefixes return response['NextMarker'] @@ -311,28 +273,24 @@ def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, m else: params['StartAfter'] = '' response = client.list_objects_v2(**params) - eq(response['IsTruncated'], is_truncated) + assert response['IsTruncated'] == is_truncated if 'NextContinuationToken' not in response: response['NextContinuationToken'] = None if last: - eq(response['NextContinuationToken'], None) + assert response['NextContinuationToken'] == None keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(len(keys), len(check_objs)) - eq(len(prefixes), len(check_prefixes)) - eq(keys, check_objs) - eq(prefixes, check_prefixes) + assert len(keys) == len(check_objs) + assert len(prefixes) == len(check_prefixes) + assert keys == check_objs + assert prefixes == check_prefixes return response['NextContinuationToken'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefixes in multi-component object names') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_list_delimiter_prefix(): bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla']) @@ -354,12 +312,8 @@ def test_bucket_list_delimiter_prefix(): marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefixes in multi-component object names') -@attr('list-objects-v2') -@attr('fails_on_dbstore') +@pytest.mark.list_objects_v2 +@pytest.mark.fails_on_dbstore def test_bucket_listv2_delimiter_prefix(): bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla']) @@ -382,68 +336,49 @@ def test_bucket_listv2_delimiter_prefix(): continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefix and delimiter handling when object ends with delimiter') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_prefix_ends_with_delimiter(): bucket_name = _create_objects(keys=['asdf/']) validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefix and delimiter handling when object ends with delimiter') def test_bucket_list_delimiter_prefix_ends_with_delimiter(): bucket_name = _create_objects(keys=['asdf/']) validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-slash delimiter characters') def test_bucket_list_delimiter_alt(): bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo']) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='a') - eq(response['Delimiter'], 'a') + assert response['Delimiter'] == 'a' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] # bar, baz, and cab should be broken up by the 'a' delimiters prefixes = _get_prefixes(response) - eq(len(prefixes), 2) - eq(prefixes, ['ba', 'ca']) + assert len(prefixes) == 2 + assert prefixes == ['ba', 'ca'] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='non-slash delimiter characters') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_alt(): bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo']) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a') - eq(response['Delimiter'], 'a') + assert response['Delimiter'] == 'a' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] # bar, baz, and cab should be broken up by the 'a' delimiters prefixes = _get_prefixes(response) - eq(len(prefixes), 2) - eq(prefixes, ['ba', 'ca']) + assert len(prefixes) == 2 + assert prefixes == ['ba', 'ca'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefixes starting with underscore') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_list_delimiter_prefix_underscore(): bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla']) @@ -464,12 +399,8 @@ def test_bucket_list_delimiter_prefix_underscore(): marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='prefixes starting with underscore') -@attr('list-objects-v2') -@attr('fails_on_dbstore') +@pytest.mark.list_objects_v2 +@pytest.mark.fails_on_dbstore def test_bucket_listv2_delimiter_prefix_underscore(): bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla']) @@ -491,158 +422,126 @@ def test_bucket_listv2_delimiter_prefix_underscore(): continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='percentage delimiter characters') def test_bucket_list_delimiter_percentage(): bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo']) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='%') - eq(response['Delimiter'], '%') + assert response['Delimiter'] == '%' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) + assert len(prefixes) == 2 # bar, baz, and cab should be broken up by the 'a' delimiters - eq(prefixes, ['b%', 'c%']) + assert prefixes == ['b%', 'c%'] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='percentage delimiter characters') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_percentage(): bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo']) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%') - eq(response['Delimiter'], '%') + assert response['Delimiter'] == '%' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) + assert len(prefixes) == 2 # bar, baz, and cab should be broken up by the 'a' delimiters - eq(prefixes, ['b%', 'c%']) + assert prefixes == ['b%', 'c%'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='whitespace delimiter characters') def test_bucket_list_delimiter_whitespace(): bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo']) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter=' ') - eq(response['Delimiter'], ' ') + assert response['Delimiter'] == ' ' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) + assert len(prefixes) == 2 # bar, baz, and cab should be broken up by the 'a' delimiters - eq(prefixes, ['b ', 'c ']) + assert prefixes == ['b ', 'c '] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='whitespace delimiter characters') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_whitespace(): bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo']) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ') - eq(response['Delimiter'], ' ') + assert response['Delimiter'] == ' ' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) + assert len(prefixes) == 2 # bar, baz, and cab should be broken up by the 'a' delimiters - eq(prefixes, ['b ', 'c ']) + assert prefixes == ['b ', 'c '] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='dot delimiter characters') def test_bucket_list_delimiter_dot(): bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo']) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='.') - eq(response['Delimiter'], '.') + assert response['Delimiter'] == '.' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) + assert len(prefixes) == 2 # bar, baz, and cab should be broken up by the 'a' delimiters - eq(prefixes, ['b.', 'c.']) + assert prefixes == ['b.', 'c.'] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='dot delimiter characters') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_dot(): bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo']) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.') - eq(response['Delimiter'], '.') + assert response['Delimiter'] == '.' keys = _get_keys(response) # foo contains no 'a' and so is a complete key - eq(keys, ['foo']) + assert keys == ['foo'] prefixes = _get_prefixes(response) - eq(len(prefixes), 2) + assert len(prefixes) == 2 # bar, baz, and cab should be broken up by the 'a' delimiters - eq(prefixes, ['b.', 'c.']) + assert prefixes == ['b.', 'c.'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='non-printable delimiter can be specified') def test_bucket_list_delimiter_unreadable(): key_names=['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a') - eq(response['Delimiter'], '\x0a') + assert response['Delimiter'] == '\x0a' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='non-printable delimiter can be specified') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_unreadable(): key_names=['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a') - eq(response['Delimiter'], '\x0a') + assert response['Delimiter'] == '\x0a' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='empty delimiter can be specified') def test_bucket_list_delimiter_empty(): key_names = ['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) @@ -650,17 +549,14 @@ def test_bucket_list_delimiter_empty(): response = client.list_objects(Bucket=bucket_name, Delimiter='') # putting an empty value into Delimiter will not return a value in the response - eq('Delimiter' in response, False) + assert not 'Delimiter' in response keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='empty delimiter can be specified') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_empty(): key_names = ['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) @@ -668,17 +564,13 @@ def test_bucket_listv2_delimiter_empty(): response = client.list_objects_v2(Bucket=bucket_name, Delimiter='') # putting an empty value into Delimiter will not return a value in the response - eq('Delimiter' in response, False) + assert not 'Delimiter' in response keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='unspecified delimiter defaults to none') def test_bucket_list_delimiter_none(): key_names = ['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) @@ -686,17 +578,14 @@ def test_bucket_list_delimiter_none(): response = client.list_objects(Bucket=bucket_name) # putting an empty value into Delimiter will not return a value in the response - eq('Delimiter' in response, False) + assert not 'Delimiter' in response keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='unspecified delimiter defaults to none') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_none(): key_names = ['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) @@ -704,14 +593,14 @@ def test_bucket_listv2_delimiter_none(): response = client.list_objects_v2(Bucket=bucket_name) # putting an empty value into Delimiter will not return a value in the response - eq('Delimiter' in response, False) + assert not 'Delimiter' in response keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_fetchowner_notempty(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) @@ -719,9 +608,9 @@ def test_bucket_listv2_fetchowner_notempty(): response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True) objs_list = response['Contents'] - eq('Owner' in objs_list[0], True) + assert 'Owner' in objs_list[0] -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_fetchowner_defaultempty(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) @@ -729,9 +618,9 @@ def test_bucket_listv2_fetchowner_defaultempty(): response = client.list_objects_v2(Bucket=bucket_name) objs_list = response['Contents'] - eq('Owner' in objs_list[0], False) + assert not 'Owner' in objs_list[0] -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_fetchowner_empty(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) @@ -739,15 +628,8 @@ def test_bucket_listv2_fetchowner_empty(): response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False) objs_list = response['Contents'] - eq('Owner' in objs_list[0], False) + assert not 'Owner' in objs_list[0] - - - -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='unused delimiter is not found') def test_bucket_list_delimiter_not_exist(): key_names = ['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) @@ -755,17 +637,14 @@ def test_bucket_list_delimiter_not_exist(): response = client.list_objects(Bucket=bucket_name, Delimiter='/') # putting an empty value into Delimiter will not return a value in the response - eq(response['Delimiter'], '/') + assert response['Delimiter'] == '/' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(assertion='unused delimiter is not found') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_delimiter_not_exist(): key_names = ['bar', 'baz', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) @@ -773,19 +652,15 @@ def test_bucket_listv2_delimiter_not_exist(): response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/') # putting an empty value into Delimiter will not return a value in the response - eq(response['Delimiter'], '/') + assert response['Delimiter'] == '/' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list') -@attr(assertion='list with delimiter not skip special keys') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_list_delimiter_not_skip_special(): key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)] key_names2 = ['1999', '1999#', '1999+', '2000'] @@ -794,298 +669,234 @@ def test_bucket_list_delimiter_not_skip_special(): client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='/') - eq(response['Delimiter'], '/') + assert response['Delimiter'] == '/' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names2) - eq(prefixes, ['0/']) + assert keys == key_names2 + assert prefixes == ['0/'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix') -@attr(assertion='returns only objects under prefix') def test_bucket_list_prefix_basic(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Prefix='foo/') - eq(response['Prefix'], 'foo/') + assert response['Prefix'] == 'foo/' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['foo/bar', 'foo/baz']) - eq(prefixes, []) + assert keys == ['foo/bar', 'foo/baz'] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix with list-objects-v2') -@attr(assertion='returns only objects under prefix') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_basic(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/') - eq(response['Prefix'], 'foo/') + assert response['Prefix'] == 'foo/' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['foo/bar', 'foo/baz']) - eq(prefixes, []) + assert keys == ['foo/bar', 'foo/baz'] + assert prefixes == [] # just testing that we can do the delimeter and prefix logic on non-slashes -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix') -@attr(assertion='prefixes w/o delimiters') def test_bucket_list_prefix_alt(): key_names = ['bar', 'baz', 'foo'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Prefix='ba') - eq(response['Prefix'], 'ba') + assert response['Prefix'] == 'ba' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['bar', 'baz']) - eq(prefixes, []) + assert keys == ['bar', 'baz'] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix with list-objects-v2') -@attr(assertion='prefixes w/o delimiters') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_alt(): key_names = ['bar', 'baz', 'foo'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba') - eq(response['Prefix'], 'ba') + assert response['Prefix'] == 'ba' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['bar', 'baz']) - eq(prefixes, []) + assert keys == ['bar', 'baz'] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix') -@attr(assertion='empty prefix returns everything') def test_bucket_list_prefix_empty(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Prefix='') - eq(response['Prefix'], '') + assert response['Prefix'] == '' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix with list-objects-v2') -@attr(assertion='empty prefix returns everything') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_empty(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Prefix='') - eq(response['Prefix'], '') + assert response['Prefix'] == '' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix') -@attr(assertion='unspecified prefix returns everything') def test_bucket_list_prefix_none(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Prefix='') - eq(response['Prefix'], '') + assert response['Prefix'] == '' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix with list-objects-v2') -@attr(assertion='unspecified prefix returns everything') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_none(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Prefix='') - eq(response['Prefix'], '') + assert response['Prefix'] == '' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, key_names) - eq(prefixes, []) + assert keys == key_names + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix') -@attr(assertion='nonexistent prefix returns nothing') def test_bucket_list_prefix_not_exist(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Prefix='d') - eq(response['Prefix'], 'd') + assert response['Prefix'] == 'd' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix with list-objects-v2') -@attr(assertion='nonexistent prefix returns nothing') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_not_exist(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Prefix='d') - eq(response['Prefix'], 'd') + assert response['Prefix'] == 'd' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix') -@attr(assertion='non-printable prefix can be specified') def test_bucket_list_prefix_unreadable(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Prefix='\x0a') - eq(response['Prefix'], '\x0a') + assert response['Prefix'] == '\x0a' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix with list-objects-v2') -@attr(assertion='non-printable prefix can be specified') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_unreadable(): key_names = ['foo/bar', 'foo/baz', 'quux'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a') - eq(response['Prefix'], '\x0a') + assert response['Prefix'] == '\x0a' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix w/delimiter') -@attr(assertion='returns only objects directly under prefix') def test_bucket_list_prefix_delimiter_basic(): key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/') - eq(response['Prefix'], 'foo/') - eq(response['Delimiter'], '/') + assert response['Prefix'] == 'foo/' + assert response['Delimiter'] == '/' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['foo/bar']) - eq(prefixes, ['foo/baz/']) + assert keys == ['foo/bar'] + assert prefixes == ['foo/baz/'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list-objects-v2 under prefix w/delimiter') -@attr(assertion='returns only objects directly under prefix') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_delimiter_basic(): key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/') - eq(response['Prefix'], 'foo/') - eq(response['Delimiter'], '/') + assert response['Prefix'] == 'foo/' + assert response['Delimiter'] == '/' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['foo/bar']) - eq(prefixes, ['foo/baz/']) + assert keys == ['foo/bar'] + assert prefixes == ['foo/baz/'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix w/delimiter') -@attr(assertion='non-slash delimiters') def test_bucket_list_prefix_delimiter_alt(): key_names = ['bar', 'bazar', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba') - eq(response['Prefix'], 'ba') - eq(response['Delimiter'], 'a') + assert response['Prefix'] == 'ba' + assert response['Delimiter'] == 'a' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['bar']) - eq(prefixes, ['baza']) + assert keys == ['bar'] + assert prefixes == ['baza'] -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_delimiter_alt(): key_names = ['bar', 'bazar', 'cab', 'foo'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba') - eq(response['Prefix'], 'ba') - eq(response['Delimiter'], 'a') + assert response['Prefix'] == 'ba' + assert response['Delimiter'] == 'a' keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['bar']) - eq(prefixes, ['baza']) + assert keys == ['bar'] + assert prefixes == ['baza'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix w/delimiter') -@attr(assertion='finds nothing w/unmatched prefix') def test_bucket_list_prefix_delimiter_prefix_not_exist(): key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g'] bucket_name = _create_objects(keys=key_names) @@ -1095,14 +906,10 @@ def test_bucket_list_prefix_delimiter_prefix_not_exist(): keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list-objects-v2 under prefix w/delimiter') -@attr(assertion='finds nothing w/unmatched prefix') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_delimiter_prefix_not_exist(): key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g'] bucket_name = _create_objects(keys=key_names) @@ -1112,13 +919,9 @@ def test_bucket_listv2_prefix_delimiter_prefix_not_exist(): keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix w/delimiter') -@attr(assertion='over-ridden slash ceases to be a delimiter') def test_bucket_list_prefix_delimiter_delimiter_not_exist(): key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g'] bucket_name = _create_objects(keys=key_names) @@ -1128,14 +931,10 @@ def test_bucket_list_prefix_delimiter_delimiter_not_exist(): keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['b/a/c', 'b/a/g', 'b/a/r']) - eq(prefixes, []) + assert keys == ['b/a/c', 'b/a/g', 'b/a/r'] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list-objects-v2 under prefix w/delimiter') -@attr(assertion='over-ridden slash ceases to be a delimiter') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_delimiter_delimiter_not_exist(): key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g'] bucket_name = _create_objects(keys=key_names) @@ -1145,13 +944,9 @@ def test_bucket_listv2_prefix_delimiter_delimiter_not_exist(): keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, ['b/a/c', 'b/a/g', 'b/a/r']) - eq(prefixes, []) + assert keys == ['b/a/c', 'b/a/g', 'b/a/r'] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list under prefix w/delimiter') -@attr(assertion='finds nothing w/unmatched prefix and delimiter') def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist(): key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g'] bucket_name = _create_objects(keys=key_names) @@ -1161,14 +956,10 @@ def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist(): keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list-objects-v2 under prefix w/delimiter') -@attr(assertion='finds nothing w/unmatched prefix and delimiter') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist(): key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g'] bucket_name = _create_objects(keys=key_names) @@ -1178,58 +969,46 @@ def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist(): keys = _get_keys(response) prefixes = _get_prefixes(response) - eq(keys, []) - eq(prefixes, []) + assert keys == [] + assert prefixes == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='pagination w/max_keys=1, marker') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_list_maxkeys_one(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, MaxKeys=1) - eq(response['IsTruncated'], True) + assert response['IsTruncated'] == True keys = _get_keys(response) - eq(keys, key_names[0:1]) + assert keys == key_names[0:1] response = client.list_objects(Bucket=bucket_name, Marker=key_names[0]) - eq(response['IsTruncated'], False) + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names[1:]) + assert keys == key_names[1:] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='pagination w/max_keys=1, marker') -@attr('list-objects-v2') -@attr('fails_on_dbstore') +@pytest.mark.list_objects_v2 +@pytest.mark.fails_on_dbstore def test_bucket_listv2_maxkeys_one(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1) - eq(response['IsTruncated'], True) + assert response['IsTruncated'] == True keys = _get_keys(response) - eq(keys, key_names[0:1]) + assert keys == key_names[0:1] response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0]) - eq(response['IsTruncated'], False) + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names[1:]) + assert keys == key_names[1:] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='pagination w/max_keys=0') def test_bucket_list_maxkeys_zero(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) @@ -1237,15 +1016,11 @@ def test_bucket_list_maxkeys_zero(): response = client.list_objects(Bucket=bucket_name, MaxKeys=0) - eq(response['IsTruncated'], False) + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, []) + assert keys == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='pagination w/max_keys=0') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_maxkeys_zero(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) @@ -1253,40 +1028,32 @@ def test_bucket_listv2_maxkeys_zero(): response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0) - eq(response['IsTruncated'], False) + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, []) + assert keys == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='pagination w/o max_keys') def test_bucket_list_maxkeys_none(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name) - eq(response['IsTruncated'], False) + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names) - eq(response['MaxKeys'], 1000) + assert keys == key_names + assert response['MaxKeys'] == 1000 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='pagination w/o max_keys') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_maxkeys_none(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name) - eq(response['IsTruncated'], False) + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names) - eq(response['MaxKeys'], 1000) + assert keys == key_names + assert response['MaxKeys'] == 1000 def get_http_response_body(**kwargs): global http_response_body @@ -1306,11 +1073,7 @@ def parseXmlToJson(xml): return response -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get usage by client') -@attr(assertion='account usage api') -@attr('fails_on_aws') # allow-unordered is a non-standard extension +@pytest.mark.fails_on_aws def test_account_usage(): # boto3.set_stream_logger(name='botocore') client = get_client() @@ -1323,18 +1086,14 @@ def test_account_usage(): xml = ET.fromstring(http_response_body.decode('utf-8')) parsed = parseXmlToJson(xml) summary = parsed['Summary'] - eq(summary['QuotaMaxBytes'], '-1') - eq(summary['QuotaMaxBuckets'], '1000') - eq(summary['QuotaMaxObjCount'], '-1') - eq(summary['QuotaMaxBytesPerBucket'], '-1') - eq(summary['QuotaMaxObjCountPerBucket'], '-1') + assert summary['QuotaMaxBytes'] == '-1' + assert summary['QuotaMaxBuckets'] == '1000' + assert summary['QuotaMaxObjCount'] == '-1' + assert summary['QuotaMaxBytesPerBucket'] == '-1' + assert summary['QuotaMaxObjCountPerBucket'] == '-1' -@attr(resource='bucket') -@attr(method='head') -@attr(operation='get usage by client') -@attr(assertion='account usage by head bucket') -@attr('fails_on_aws') # allow-unordered is a non-standard extension -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_head_bucket_usage(): # boto3.set_stream_logger(name='botocore') client = get_client() @@ -1343,20 +1102,16 @@ def test_head_bucket_usage(): client.meta.events.register('after-call.s3.HeadBucket', get_http_response) client.head_bucket(Bucket=bucket_name) hdrs = http_response['headers'] - eq(hdrs['X-RGW-Object-Count'], '1') - eq(hdrs['X-RGW-Bytes-Used'], '3') - eq(hdrs['X-RGW-Quota-User-Size'], '-1') - eq(hdrs['X-RGW-Quota-User-Objects'], '-1') - eq(hdrs['X-RGW-Quota-Max-Buckets'], '1000') - eq(hdrs['X-RGW-Quota-Bucket-Size'], '-1') - eq(hdrs['X-RGW-Quota-Bucket-Objects'], '-1') + assert hdrs['X-RGW-Object-Count'] == '1' + assert hdrs['X-RGW-Bytes-Used'] == '3' + assert hdrs['X-RGW-Quota-User-Size'] == '-1' + assert hdrs['X-RGW-Quota-User-Objects'] == '-1' + assert hdrs['X-RGW-Quota-Max-Buckets'] == '1000' + assert hdrs['X-RGW-Quota-Bucket-Size'] == '-1' + assert hdrs['X-RGW-Quota-Bucket-Objects'] == '-1' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='bucket list unordered') -@attr('fails_on_aws') # allow-unordered is a non-standard extension -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_bucket_list_unordered(): # boto3.set_stream_logger(name='botocore') keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex', @@ -1375,46 +1130,42 @@ def test_bucket_list_unordered(): # test simple retrieval response = client.list_objects(Bucket=bucket_name, MaxKeys=1000) unordered_keys_out = _get_keys(response) - eq(len(keys_in), len(unordered_keys_out)) - eq(keys_in.sort(), unordered_keys_out.sort()) + assert len(keys_in) == len(unordered_keys_out) + assert keys_in.sort() == unordered_keys_out.sort() # test retrieval with prefix response = client.list_objects(Bucket=bucket_name, MaxKeys=1000, Prefix="abc/") unordered_keys_out = _get_keys(response) - eq(5, len(unordered_keys_out)) + assert 5 == len(unordered_keys_out) # test incremental retrieval with marker response = client.list_objects(Bucket=bucket_name, MaxKeys=6) unordered_keys_out = _get_keys(response) - eq(6, len(unordered_keys_out)) + assert 6 == len(unordered_keys_out) # now get the next bunch response = client.list_objects(Bucket=bucket_name, MaxKeys=6, Marker=unordered_keys_out[-1]) unordered_keys_out2 = _get_keys(response) - eq(6, len(unordered_keys_out2)) + assert 6 == len(unordered_keys_out2) # make sure there's no overlap between the incremental retrievals intersect = set(unordered_keys_out).intersection(unordered_keys_out2) - eq(0, len(intersect)) + assert 0 == len(intersect) # verify that unordered used with delimiter results in error e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name, Delimiter="/") status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='bucket list unordered') -@attr('fails_on_aws') # allow-unordered is a non-standard extension -@attr('list-objects-v2') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.list_objects_v2 +@pytest.mark.fails_on_dbstore def test_bucket_listv2_unordered(): # boto3.set_stream_logger(name='botocore') keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex', @@ -1433,44 +1184,40 @@ def test_bucket_listv2_unordered(): # test simple retrieval response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000) unordered_keys_out = _get_keys(response) - eq(len(keys_in), len(unordered_keys_out)) - eq(keys_in.sort(), unordered_keys_out.sort()) + assert len(keys_in) == len(unordered_keys_out) + assert keys_in.sort() == unordered_keys_out.sort() # test retrieval with prefix response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000, Prefix="abc/") unordered_keys_out = _get_keys(response) - eq(5, len(unordered_keys_out)) + assert 5 == len(unordered_keys_out) # test incremental retrieval with marker response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6) unordered_keys_out = _get_keys(response) - eq(6, len(unordered_keys_out)) + assert 6 == len(unordered_keys_out) # now get the next bunch response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6, StartAfter=unordered_keys_out[-1]) unordered_keys_out2 = _get_keys(response) - eq(6, len(unordered_keys_out2)) + assert 6 == len(unordered_keys_out2) # make sure there's no overlap between the incremental retrievals intersect = set(unordered_keys_out).intersection(unordered_keys_out2) - eq(0, len(intersect)) + assert 0 == len(intersect) # verify that unordered used with delimiter results in error e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name, Delimiter="/") status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='invalid max_keys') def test_bucket_list_maxkeys_invalid(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) @@ -1484,60 +1231,44 @@ def test_bucket_list_maxkeys_invalid(): e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='no pagination, no marker') def test_bucket_list_marker_none(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name) - eq(response['Marker'], '') + assert response['Marker'] == '' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='no pagination, empty marker') def test_bucket_list_marker_empty(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Marker='') - eq(response['Marker'], '') - eq(response['IsTruncated'], False) + assert response['Marker'] == '' + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names) + assert keys == key_names -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='no pagination, empty continuationtoken') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_continuationtoken_empty(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='') - eq(response['ContinuationToken'], '') - eq(response['IsTruncated'], False) + assert response['ContinuationToken'] == '' + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names) + assert keys == key_names -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list keys with list-objects-v2') -@attr(assertion='no pagination, non-empty continuationtoken') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_continuationtoken(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) @@ -1547,18 +1278,14 @@ def test_bucket_listv2_continuationtoken(): next_continuation_token = response1['NextContinuationToken'] response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token) - eq(response2['ContinuationToken'], next_continuation_token) - eq(response2['IsTruncated'], False) + assert response2['ContinuationToken'] == next_continuation_token + assert response2['IsTruncated'] == False key_names2 = ['baz', 'foo', 'quxx'] keys = _get_keys(response2) - eq(keys, key_names2) + assert keys == key_names2 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list keys with list-objects-v2') -@attr(assertion='no pagination, non-empty continuationtoken and startafter') -@attr('list-objects-v2') -@attr('fails_on_dbstore') +@pytest.mark.list_objects_v2 +@pytest.mark.fails_on_dbstore def test_bucket_listv2_both_continuationtoken_startafter(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) @@ -1568,103 +1295,79 @@ def test_bucket_listv2_both_continuationtoken_startafter(): next_continuation_token = response1['NextContinuationToken'] response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token) - eq(response2['ContinuationToken'], next_continuation_token) - eq(response2['StartAfter'], 'bar') - eq(response2['IsTruncated'], False) + assert response2['ContinuationToken'] == next_continuation_token + assert response2['StartAfter'] == 'bar' + assert response2['IsTruncated'] == False key_names2 = ['foo', 'quxx'] keys = _get_keys(response2) - eq(keys, key_names2) + assert keys == key_names2 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='non-printing marker') def test_bucket_list_marker_unreadable(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Marker='\x0a') - eq(response['Marker'], '\x0a') - eq(response['IsTruncated'], False) + assert response['Marker'] == '\x0a' + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names) + assert keys == key_names -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='non-printing startafter') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_startafter_unreadable(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a') - eq(response['StartAfter'], '\x0a') - eq(response['IsTruncated'], False) + assert response['StartAfter'] == '\x0a' + assert response['IsTruncated'] == False keys = _get_keys(response) - eq(keys, key_names) + assert keys == key_names -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='marker not-in-list') def test_bucket_list_marker_not_in_list(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Marker='blah') - eq(response['Marker'], 'blah') + assert response['Marker'] == 'blah' keys = _get_keys(response) - eq(keys, [ 'foo','quxx']) + assert keys == [ 'foo','quxx'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='startafter not-in-list') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_startafter_not_in_list(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah') - eq(response['StartAfter'], 'blah') + assert response['StartAfter'] == 'blah' keys = _get_keys(response) - eq(keys, ['foo', 'quxx']) + assert keys == ['foo', 'quxx'] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys') -@attr(assertion='marker after list') def test_bucket_list_marker_after_list(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name, Marker='zzz') - eq(response['Marker'], 'zzz') + assert response['Marker'] == 'zzz' keys = _get_keys(response) - eq(response['IsTruncated'], False) - eq(keys, []) + assert response['IsTruncated'] == False + assert keys == [] -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all keys with list-objects-v2') -@attr(assertion='startafter after list') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_startafter_after_list(): key_names = ['bar', 'baz', 'foo', 'quxx'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz') - eq(response['StartAfter'], 'zzz') + assert response['StartAfter'] == 'zzz' keys = _get_keys(response) - eq(response['IsTruncated'], False) - eq(keys, []) + assert response['IsTruncated'] == False + assert keys == [] def _compare_dates(datetime1, datetime2): """ @@ -1673,13 +1376,9 @@ def _compare_dates(datetime1, datetime2): # both times are in datetime format but datetime1 has # microseconds and datetime2 does not datetime1 = datetime1.replace(microsecond=0) - eq(datetime1, datetime2) + assert datetime1 == datetime2 -@attr(resource='object') -@attr(method='head') -@attr(operation='compare w/bucket list') -@attr(assertion='return same metadata') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_list_return_data(): key_names = ['bar', 'baz', 'foo'] bucket_name = _create_objects(keys=key_names) @@ -1704,19 +1403,14 @@ def test_bucket_list_return_data(): for obj in objs_list: key_name = obj['Key'] key_data = data[key_name] - eq(obj['ETag'],key_data['ETag']) - eq(obj['Size'],key_data['ContentLength']) - eq(obj['Owner']['DisplayName'],key_data['DisplayName']) - eq(obj['Owner']['ID'],key_data['ID']) + assert obj['ETag'] == key_data['ETag'] + assert obj['Size'] == key_data['ContentLength'] + assert obj['Owner']['DisplayName'] == key_data['DisplayName'] + assert obj['Owner']['ID'] == key_data['ID'] _compare_dates(obj['LastModified'],key_data['LastModified']) -@attr(resource='object') -@attr(method='head') -@attr(operation='compare w/bucket list when bucket versioning is configured') -@attr(assertion='return same metadata') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_list_return_data_versioning(): bucket_name = get_new_bucket() check_configure_versioning_retry(bucket_name, "Enabled", "Enabled") @@ -1746,17 +1440,13 @@ def test_bucket_list_return_data_versioning(): for obj in objs_list: key_name = obj['Key'] key_data = data[key_name] - eq(obj['Owner']['DisplayName'],key_data['DisplayName']) - eq(obj['ETag'],key_data['ETag']) - eq(obj['Size'],key_data['ContentLength']) - eq(obj['Owner']['ID'],key_data['ID']) - eq(obj['VersionId'], key_data['VersionId']) + assert obj['Owner']['DisplayName'] == key_data['DisplayName'] + assert obj['ETag'] == key_data['ETag'] + assert obj['Size'] == key_data['ContentLength'] + assert obj['Owner']['ID'] == key_data['ID'] + assert obj['VersionId'] == key_data['VersionId'] _compare_dates(obj['LastModified'],key_data['LastModified']) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all objects (anonymous)') -@attr(assertion='succeeds') def test_bucket_list_objects_anonymous(): bucket_name = get_new_bucket() client = get_client() @@ -1765,11 +1455,7 @@ def test_bucket_list_objects_anonymous(): unauthenticated_client = get_unauthenticated_client() unauthenticated_client.list_objects(Bucket=bucket_name) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all objects (anonymous) with list-objects-v2') -@attr(assertion='succeeds') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_objects_anonymous(): bucket_name = get_new_bucket() client = get_client() @@ -1778,10 +1464,6 @@ def test_bucket_listv2_objects_anonymous(): unauthenticated_client = get_unauthenticated_client() unauthenticated_client.list_objects_v2(Bucket=bucket_name) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all objects (anonymous)') -@attr(assertion='fails') def test_bucket_list_objects_anonymous_fail(): bucket_name = get_new_bucket() @@ -1789,14 +1471,10 @@ def test_bucket_list_objects_anonymous_fail(): e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all objects (anonymous) with list-objects-v2') -@attr(assertion='fails') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucket_listv2_objects_anonymous_fail(): bucket_name = get_new_bucket() @@ -1804,13 +1482,9 @@ def test_bucket_listv2_objects_anonymous_fail(): e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='non-existant bucket') -@attr(assertion='fails 404') def test_bucket_notexist(): bucket_name = get_new_bucket_name() client = get_client() @@ -1818,14 +1492,10 @@ def test_bucket_notexist(): e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='non-existant bucket with list-objects-v2') -@attr(assertion='fails 404') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_bucketv2_notexist(): bucket_name = get_new_bucket_name() client = get_client() @@ -1833,13 +1503,9 @@ def test_bucketv2_notexist(): e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='non-existant bucket') -@attr(assertion='fails 404') def test_bucket_delete_notexist(): bucket_name = get_new_bucket_name() client = get_client() @@ -1847,13 +1513,9 @@ def test_bucket_delete_notexist(): e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='non-empty bucket') -@attr(assertion='fails 409') def test_bucket_delete_nonempty(): key_names = ['foo'] bucket_name = _create_objects(keys=key_names) @@ -1862,8 +1524,8 @@ def test_bucket_delete_nonempty(): e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 409) - eq(error_code, 'BucketNotEmpty') + assert status == 409 + assert error_code == 'BucketNotEmpty' def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results): try: @@ -1884,10 +1546,6 @@ def _do_wait_completion(t): for thr in t: thr.join() -@attr(resource='bucket') -@attr(method='put') -@attr(operation='concurrent set of acls on a bucket') -@attr(assertion='works') def test_bucket_concurrent_set_canned_acl(): bucket_name = get_new_bucket() client = get_client() @@ -1901,12 +1559,8 @@ def test_bucket_concurrent_set_canned_acl(): _do_wait_completion(t) for r in results: - eq(r, True) + assert r == True -@attr(resource='object') -@attr(method='put') -@attr(operation='non-existant bucket') -@attr(assertion='fails 404') def test_object_write_to_nonexist_bucket(): key_names = ['foo'] bucket_name = 'whatchutalkinboutwillis' @@ -1915,14 +1569,10 @@ def test_object_write_to_nonexist_bucket(): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='bucket') -@attr(method='del') -@attr(operation='deleted bucket') -@attr(assertion='fails 404') def test_bucket_create_delete(): bucket_name = get_new_bucket() client = get_client() @@ -1931,13 +1581,9 @@ def test_bucket_create_delete(): e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='object') -@attr(method='get') -@attr(operation='read contents that were never written') -@attr(assertion='fails 404') def test_object_read_not_exist(): bucket_name = get_new_bucket() client = get_client() @@ -1945,8 +1591,8 @@ def test_object_read_not_exist(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' http_response = None @@ -1954,11 +1600,7 @@ def get_http_response(**kwargs): global http_response http_response = kwargs['http_response'].__dict__ -@attr(resource='object') -@attr(method='get') -@attr(operation='read contents that were never written to raise one error response') -@attr(assertion='RequestId appears in the error response') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_requestid_matches_header_on_error(): bucket_name = get_new_bucket() client = get_client() @@ -1972,7 +1614,7 @@ def test_object_requestid_matches_header_on_error(): request_id = resp_body_xml.find('.//RequestId').text assert request_id is not None - eq(request_id, e.response['ResponseMetadata']['RequestId']) + assert request_id == e.response['ResponseMetadata']['RequestId'] def _make_objs_dict(key_names): objs_list = [] @@ -1982,61 +1624,49 @@ def _make_objs_dict(key_names): objs_dict = {'Objects': objs_list} return objs_dict -@attr(resource='object') -@attr(method='post') -@attr(operation='delete multiple objects') -@attr(assertion='deletes multiple objects with a single call') def test_multi_object_delete(): key_names = ['key0', 'key1', 'key2'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects(Bucket=bucket_name) - eq(len(response['Contents']), 3) + assert len(response['Contents']) == 3 objs_dict = _make_objs_dict(key_names=key_names) response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict) - eq(len(response['Deleted']), 3) + assert len(response['Deleted']) == 3 assert 'Errors' not in response response = client.list_objects(Bucket=bucket_name) assert 'Contents' not in response response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict) - eq(len(response['Deleted']), 3) + assert len(response['Deleted']) == 3 assert 'Errors' not in response response = client.list_objects(Bucket=bucket_name) assert 'Contents' not in response -@attr(resource='object') -@attr(method='post') -@attr(operation='delete multiple objects with list-objects-v2') -@attr(assertion='deletes multiple objects with a single call') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_multi_objectv2_delete(): key_names = ['key0', 'key1', 'key2'] bucket_name = _create_objects(keys=key_names) client = get_client() response = client.list_objects_v2(Bucket=bucket_name) - eq(len(response['Contents']), 3) + assert len(response['Contents']) == 3 objs_dict = _make_objs_dict(key_names=key_names) response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict) - eq(len(response['Deleted']), 3) + assert len(response['Deleted']) == 3 assert 'Errors' not in response response = client.list_objects_v2(Bucket=bucket_name) assert 'Contents' not in response response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict) - eq(len(response['Deleted']), 3) + assert len(response['Deleted']) == 3 assert 'Errors' not in response response = client.list_objects_v2(Bucket=bucket_name) assert 'Contents' not in response -@attr(resource='object') -@attr(method='post') -@attr(operation='delete multiple objects has upper limit of 1000 keys') -@attr(assertion='fails 400') def test_multi_object_delete_key_limit(): key_names = [f"key-{i}" for i in range(1001)] bucket_name = _create_objects(keys=key_names) @@ -2047,17 +1677,13 @@ def test_multi_object_delete_key_limit(): numKeys = 0 for page in pages: numKeys += len(page['Contents']) - eq(numKeys, 1001) + assert numKeys == 1001 objs_dict = _make_objs_dict(key_names=key_names) e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='delete multiple objects has upper limit of 1000 keys with list-objects-v2') -@attr(assertion='fails 400') def test_multi_objectv2_delete_key_limit(): key_names = [f"key-{i}" for i in range(1001)] bucket_name = _create_objects(keys=key_names) @@ -2068,40 +1694,28 @@ def test_multi_objectv2_delete_key_limit(): numKeys = 0 for page in pages: numKeys += len(page['Contents']) - eq(numKeys, 1001) + assert numKeys == 1001 objs_dict = _make_objs_dict(key_names=key_names) e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='write zero-byte key') -@attr(assertion='correct content length') def test_object_head_zero_bytes(): bucket_name = get_new_bucket() client = get_client() client.put_object(Bucket=bucket_name, Key='foo', Body='') response = client.head_object(Bucket=bucket_name, Key='foo') - eq(response['ContentLength'], 0) + assert response['ContentLength'] == 0 -@attr(resource='object') -@attr(method='put') -@attr(operation='write key') -@attr(assertion='correct etag') def test_object_write_check_etag(): bucket_name = get_new_bucket() client = get_client() response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(response['ETag'], '"37b51d194a7513e45b56f6524f2d51f2"') + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert response['ETag'] == '"37b51d194a7513e45b56f6524f2d51f2"' -@attr(resource='object') -@attr(method='put') -@attr(operation='write key') -@attr(assertion='correct cache control header') def test_object_write_cache_control(): bucket_name = get_new_bucket() client = get_client() @@ -2109,12 +1723,8 @@ def test_object_write_cache_control(): client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control) response = client.head_object(Bucket=bucket_name, Key='foo') - eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], cache_control) + assert response['ResponseMetadata']['HTTPHeaders']['cache-control'] == cache_control -@attr(resource='object') -@attr(method='put') -@attr(operation='write key') -@attr(assertion='correct expires header') def test_object_write_expires(): bucket_name = get_new_bucket() client = get_client() @@ -2133,10 +1743,6 @@ def _get_body(response): got = got.decode() return got -@attr(resource='object') -@attr(method='all') -@attr(operation='complete object life cycle') -@attr(assertion='read back what we wrote and rewrote') def test_object_write_read_update_read_delete(): bucket_name = get_new_bucket() client = get_client() @@ -2146,13 +1752,13 @@ def test_object_write_read_update_read_delete(): # Read response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' # Update client.put_object(Bucket=bucket_name, Key='foo', Body='soup') # Read response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'soup') + assert body == 'soup' # Delete client.delete_object(Bucket=bucket_name, Key='foo') @@ -2173,39 +1779,23 @@ def _set_get_metadata(metadata, bucket_name=None): response = client.get_object(Bucket=bucket_name, Key='foo') return response['Metadata']['meta1'] -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write/re-read') -@attr(assertion='reread what we wrote') def test_object_set_get_metadata_none_to_good(): got = _set_get_metadata('mymeta') - eq(got, 'mymeta') + assert got == 'mymeta' -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write/re-read') -@attr(assertion='write empty value, returns empty value') def test_object_set_get_metadata_none_to_empty(): got = _set_get_metadata('') - eq(got, '') + assert got == '' -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write/re-write') -@attr(assertion='empty value replaces old') def test_object_set_get_metadata_overwrite_to_empty(): bucket_name = get_new_bucket() got = _set_get_metadata('oldmeta', bucket_name) - eq(got, 'oldmeta') + assert got == 'oldmeta' got = _set_get_metadata('', bucket_name) - eq(got, '') + assert got == '' -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write/re-write') -@attr(assertion='UTF-8 values passed through') # TODO: the decoding of this unicode metadata is not happening properly for unknown reasons -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_set_get_unicode_metadata(): bucket_name = get_new_bucket() client = get_client() @@ -2217,11 +1807,10 @@ def test_object_set_get_unicode_metadata(): client.put_object(Bucket=bucket_name, Key='foo', Body='bar') response = client.get_object(Bucket=bucket_name, Key='foo') - got = response['Metadata']['meta1'].decode('utf-8') got = response['Metadata']['meta1'] print(got) print(u"Hello World\xe9") - eq(got, u"Hello World\xe9") + assert got == u"Hello World\xe9" def _set_get_metadata_unreadable(metadata, bucket_name=None): """ @@ -2239,58 +1828,6 @@ def _set_get_metadata_unreadable(metadata, bucket_name=None): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='bar', Metadata=metadata_dict) return e -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write/re-write') -@attr(assertion='non-UTF-8 values detected, but rejected by webserver') -@attr('fails_strict_rfc2616') -@attr(assertion='fails 400') -def test_object_set_get_non_utf8_metadata(): - metadata = '\x04mymeta' - e = _set_get_metadata_unreadable(metadata) - status, error_code = _get_status_and_error_code(e.response) - eq(status, 400 or 403) - -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write') -@attr(assertion='non-printing prefixes rejected by webserver') -@attr('fails_strict_rfc2616') -@attr(assertion='fails 400') -def test_object_set_get_metadata_empty_to_unreadable_prefix(): - metadata = '\x04w' - e = _set_get_metadata_unreadable(metadata) - status, error_code = _get_status_and_error_code(e.response) - eq(status, 400 or 403) - -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write') -@attr(assertion='non-printing suffixes rejected by webserver') -@attr('fails_strict_rfc2616') -@attr(assertion='fails 400') -def test_object_set_get_metadata_empty_to_unreadable_suffix(): - metadata = 'h\x04' - e = _set_get_metadata_unreadable(metadata) - status, error_code = _get_status_and_error_code(e.response) - eq(status, 400 or 403) - -@attr(resource='object.metadata') -@attr(method='put') -@attr(operation='metadata write') -@attr(assertion='non-priting in-fixes rejected by webserver') -@attr('fails_strict_rfc2616') -@attr(assertion='fails 400') -def test_object_set_get_metadata_empty_to_unreadable_infix(): - metadata = 'h\x04w' - e = _set_get_metadata_unreadable(metadata) - status, error_code = _get_status_and_error_code(e.response) - eq(status, 400 or 403) - -@attr(resource='object') -@attr(method='put') -@attr(operation='data re-write') -@attr(assertion='replaces previous metadata') def test_object_metadata_replaced_on_put(): bucket_name = get_new_bucket() client = get_client() @@ -2301,12 +1838,8 @@ def test_object_metadata_replaced_on_put(): response = client.get_object(Bucket=bucket_name, Key='foo') got = response['Metadata'] - eq(got, {}) + assert got == {} -@attr(resource='object') -@attr(method='put') -@attr(operation='data write from file (w/100-Continue)') -@attr(assertion='succeeds and returns written data') def test_object_write_file(): bucket_name = get_new_bucket() client = get_client() @@ -2315,16 +1848,12 @@ def test_object_write_file(): client.put_object(Bucket=bucket_name, Key='foo', Body=data) response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' def _get_post_url(bucket_name): endpoint = get_config_endpoint() return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name) -@attr(resource='object') -@attr(method='post') -@attr(operation='anonymous browser based upload via POST request') -@attr(assertion='succeeds and returns written data') def test_post_object_anonymous_request(): bucket_name = get_new_bucket_name() client = get_client() @@ -2334,15 +1863,11 @@ def test_post_object_anonymous_request(): client.create_bucket(ACL='public-read-write', Bucket=bucket_name) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds and returns written data') def test_post_object_authenticated_request(): bucket_name = get_new_bucket() client = get_client() @@ -2376,15 +1901,11 @@ def test_post_object_authenticated_request(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request, no content-type header') -@attr(assertion='succeeds and returns written data') def test_post_object_authenticated_no_content_type(): bucket_name = get_new_bucket_name() client = get_client() @@ -2417,15 +1938,11 @@ def test_post_object_authenticated_no_content_type(): ('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key="foo.txt") body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request, bad access key') -@attr(assertion='fails') def test_post_object_authenticated_request_bad_access_key(): bucket_name = get_new_bucket_name() client = get_client() @@ -2459,12 +1976,8 @@ def test_post_object_authenticated_request_bad_access_key(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 403) + assert r.status_code == 403 -@attr(resource='object') -@attr(method='post') -@attr(operation='anonymous browser based upload via POST request') -@attr(assertion='succeeds with status 201') def test_post_object_set_success_code(): bucket_name = get_new_bucket_name() client = get_client() @@ -2476,14 +1989,10 @@ def test_post_object_set_success_code(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 201) + assert r.status_code == 201 message = ET.fromstring(r.content).find('Key') - eq(message.text,'foo.txt') + assert message.text == 'foo.txt' -@attr(resource='object') -@attr(method='post') -@attr(operation='anonymous browser based upload via POST request') -@attr(assertion='succeeds with status 204') def test_post_object_set_invalid_success_code(): bucket_name = get_new_bucket_name() client = get_client() @@ -2495,14 +2004,10 @@ def test_post_object_set_invalid_success_code(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 content = r.content.decode() - eq(content,'') + assert content == '' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds and returns written data') def test_post_object_upload_larger_than_chunk(): bucket_name = get_new_bucket() client = get_client() @@ -2537,15 +2042,11 @@ def test_post_object_upload_larger_than_chunk(): ("Content-Type" , "text/plain"),('file', foo_string)]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') body = _get_body(response) - eq(body, foo_string) + assert body == foo_string -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds and returns written data') def test_post_object_set_key_from_filename(): bucket_name = get_new_bucket() client = get_client() @@ -2577,15 +2078,11 @@ def test_post_object_set_key_from_filename(): ("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds with status 204') def test_post_object_ignored_header(): bucket_name = get_new_bucket() client = get_client() @@ -2618,12 +2115,8 @@ def test_post_object_ignored_header(): ("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds with status 204') def test_post_object_case_insensitive_condition_fields(): bucket_name = get_new_bucket() client = get_client() @@ -2657,12 +2150,8 @@ def test_post_object_case_insensitive_condition_fields(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds with escaped leading $ and returns written data') def test_post_object_escaped_field_values(): bucket_name = get_new_bucket() client = get_client() @@ -2694,15 +2183,11 @@ def test_post_object_escaped_field_values(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='\$foo.txt') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds and returns redirect url') def test_post_object_success_redirect_action(): bucket_name = get_new_bucket_name() client = get_client() @@ -2739,17 +2224,12 @@ def test_post_object_success_redirect_action(): ('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 200) + assert r.status_code == 200 url = r.url response = client.get_object(Bucket=bucket_name, Key='foo.txt') - eq(url, - '{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(rurl = redirect_url,\ - bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"'))) + assert url == '{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(\ + rurl = redirect_url, bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"')) -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with invalid signature error') def test_post_object_invalid_signature(): bucket_name = get_new_bucket() client = get_client() @@ -2781,12 +2261,8 @@ def test_post_object_invalid_signature(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 403) + assert r.status_code == 403 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with access key does not exist error') def test_post_object_invalid_access_key(): bucket_name = get_new_bucket() client = get_client() @@ -2818,12 +2294,8 @@ def test_post_object_invalid_access_key(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 403) + assert r.status_code == 403 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with invalid expiration error') def test_post_object_invalid_date_format(): bucket_name = get_new_bucket() client = get_client() @@ -2855,12 +2327,8 @@ def test_post_object_invalid_date_format(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with missing key error') def test_post_object_no_key_specified(): bucket_name = get_new_bucket() client = get_client() @@ -2891,12 +2359,8 @@ def test_post_object_no_key_specified(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with missing signature error') def test_post_object_missing_signature(): bucket_name = get_new_bucket() client = get_client() @@ -2928,12 +2392,8 @@ def test_post_object_missing_signature(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with extra input fields policy error') def test_post_object_missing_policy_condition(): bucket_name = get_new_bucket() client = get_client() @@ -2964,12 +2424,8 @@ def test_post_object_missing_policy_condition(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 403) + assert r.status_code == 403 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds using starts-with restriction on metadata header') def test_post_object_user_specified_header(): bucket_name = get_new_bucket() client = get_client() @@ -3002,14 +2458,10 @@ def test_post_object_user_specified_header(): ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') - eq(response['Metadata']['foo'], 'barclamp') + assert response['Metadata']['foo'] == 'barclamp' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with policy condition failed error due to missing field in POST request') def test_post_object_request_missing_policy_specified_field(): bucket_name = get_new_bucket() client = get_client() @@ -3042,12 +2494,8 @@ def test_post_object_request_missing_policy_specified_field(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 403) + assert r.status_code == 403 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with conditions must be list error') def test_post_object_condition_is_case_sensitive(): bucket_name = get_new_bucket() client = get_client() @@ -3079,12 +2527,8 @@ def test_post_object_condition_is_case_sensitive(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with expiration must be string error') def test_post_object_expires_is_case_sensitive(): bucket_name = get_new_bucket() client = get_client() @@ -3116,12 +2560,8 @@ def test_post_object_expires_is_case_sensitive(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with policy expired error') def test_post_object_expired_policy(): bucket_name = get_new_bucket() client = get_client() @@ -3153,12 +2593,8 @@ def test_post_object_expired_policy(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 403) + assert r.status_code == 403 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails using equality restriction on metadata header') def test_post_object_invalid_request_field_value(): bucket_name = get_new_bucket() client = get_client() @@ -3190,12 +2626,8 @@ def test_post_object_invalid_request_field_value(): ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 403) + assert r.status_code == 403 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with policy missing expiration error') def test_post_object_missing_expires_condition(): bucket_name = get_new_bucket() client = get_client() @@ -3227,12 +2659,8 @@ def test_post_object_missing_expires_condition(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with policy missing conditions error') def test_post_object_missing_conditions_list(): bucket_name = get_new_bucket() client = get_client() @@ -3256,12 +2684,8 @@ def test_post_object_missing_conditions_list(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with allowable upload size exceeded error') def test_post_object_upload_size_limit_exceeded(): bucket_name = get_new_bucket() client = get_client() @@ -3293,12 +2717,8 @@ def test_post_object_upload_size_limit_exceeded(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with invalid content length error') def test_post_object_missing_content_length_argument(): bucket_name = get_new_bucket() client = get_client() @@ -3330,12 +2750,8 @@ def test_post_object_missing_content_length_argument(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with invalid JSON error') def test_post_object_invalid_content_length_argument(): bucket_name = get_new_bucket() client = get_client() @@ -3367,12 +2783,8 @@ def test_post_object_invalid_content_length_argument(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='fails with upload size less than minimum allowable error') def test_post_object_upload_size_below_minimum(): bucket_name = get_new_bucket() client = get_client() @@ -3404,12 +2816,8 @@ def test_post_object_upload_size_below_minimum(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='empty conditions return appropriate error response') def test_post_object_empty_conditions(): bucket_name = get_new_bucket() client = get_client() @@ -3437,12 +2845,8 @@ def test_post_object_empty_conditions(): ("Content-Type" , "text/plain"),('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 400) + assert r.status_code == 400 -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-Match: the latest ETag') -@attr(assertion='succeeds') def test_get_object_ifmatch_good(): bucket_name = get_new_bucket() client = get_client() @@ -3451,12 +2855,8 @@ def test_get_object_ifmatch_good(): response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag) body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-Match: bogus ETag') -@attr(assertion='fails 412') def test_get_object_ifmatch_failed(): bucket_name = get_new_bucket() client = get_client() @@ -3464,13 +2864,9 @@ def test_get_object_ifmatch_failed(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-None-Match: the latest ETag') -@attr(assertion='fails 304') def test_get_object_ifnonematch_good(): bucket_name = get_new_bucket() client = get_client() @@ -3479,13 +2875,9 @@ def test_get_object_ifnonematch_good(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag) status, error_code = _get_status_and_error_code(e.response) - eq(status, 304) - eq(e.response['Error']['Message'], 'Not Modified') + assert status == 304 + assert e.response['Error']['Message'] == 'Not Modified' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-None-Match: bogus ETag') -@attr(assertion='succeeds') def test_get_object_ifnonematch_failed(): bucket_name = get_new_bucket() client = get_client() @@ -3493,12 +2885,8 @@ def test_get_object_ifnonematch_failed(): response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-Modified-Since: before') -@attr(assertion='succeeds') def test_get_object_ifmodifiedsince_good(): bucket_name = get_new_bucket() client = get_client() @@ -3506,13 +2894,9 @@ def test_get_object_ifmodifiedsince_good(): response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-Modified-Since: after') -@attr(assertion='fails 304') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_get_object_ifmodifiedsince_failed(): bucket_name = get_new_bucket() client = get_client() @@ -3530,14 +2914,10 @@ def test_get_object_ifmodifiedsince_failed(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str) status, error_code = _get_status_and_error_code(e.response) - eq(status, 304) - eq(e.response['Error']['Message'], 'Not Modified') + assert status == 304 + assert e.response['Error']['Message'] == 'Not Modified' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-Unmodified-Since: before') -@attr(assertion='fails 412') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_get_object_ifunmodifiedsince_good(): bucket_name = get_new_bucket() client = get_client() @@ -3545,13 +2925,9 @@ def test_get_object_ifunmodifiedsince_good(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-Unmodified-Since: after') -@attr(assertion='succeeds') def test_get_object_ifunmodifiedsince_failed(): bucket_name = get_new_bucket() client = get_client() @@ -3559,14 +2935,10 @@ def test_get_object_ifunmodifiedsince_failed(): response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='put') -@attr(operation='data re-write w/ If-Match: the latest ETag') -@attr(assertion='replaces previous data and metadata') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws def test_put_object_ifmatch_good(): bucket_name = get_new_bucket() client = get_client() @@ -3574,7 +2946,7 @@ def test_put_object_ifmatch_good(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' etag = response['ETag'].replace('"', '') @@ -3585,20 +2957,16 @@ def test_put_object_ifmatch_good(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'zar') + assert body == 'zar' -@attr(resource='object') -@attr(method='get') -@attr(operation='get w/ If-Match: bogus ETag') -@attr(assertion='fails 412') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_put_object_ifmatch_failed(): bucket_name = get_new_bucket() client = get_client() client.put_object(Bucket=bucket_name, Key='foo', Body='bar') response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' # pass in custom header 'If-Match' before PutObject call lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'})) @@ -3606,25 +2974,21 @@ def test_put_object_ifmatch_failed(): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='put') -@attr(operation='overwrite existing object w/ If-Match: *') -@attr(assertion='replaces previous data and metadata') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws def test_put_object_ifmatch_overwrite_existed_good(): bucket_name = get_new_bucket() client = get_client() client.put_object(Bucket=bucket_name, Key='foo', Body='bar') response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'})) client.meta.events.register('before-call.s3.PutObject', lf) @@ -3632,14 +2996,10 @@ def test_put_object_ifmatch_overwrite_existed_good(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'zar') + assert body == 'zar' -@attr(resource='object') -@attr(method='put') -@attr(operation='overwrite non-existing object w/ If-Match: *') -@attr(assertion='fails 412') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_put_object_ifmatch_nonexisted_failed(): bucket_name = get_new_bucket() client = get_client() @@ -3648,26 +3008,22 @@ def test_put_object_ifmatch_nonexisted_failed(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' -@attr(resource='object') -@attr(method='put') -@attr(operation='overwrite existing object w/ If-None-Match: outdated ETag') -@attr(assertion='replaces previous data and metadata') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws def test_put_object_ifnonmatch_good(): bucket_name = get_new_bucket() client = get_client() client.put_object(Bucket=bucket_name, Key='foo', Body='bar') response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'})) client.meta.events.register('before-call.s3.PutObject', lf) @@ -3675,14 +3031,10 @@ def test_put_object_ifnonmatch_good(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'zar') + assert body == 'zar' -@attr(resource='object') -@attr(method='put') -@attr(operation='overwrite existing object w/ If-None-Match: the latest ETag') -@attr(assertion='fails 412') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_put_object_ifnonmatch_failed(): bucket_name = get_new_bucket() client = get_client() @@ -3690,7 +3042,7 @@ def test_put_object_ifnonmatch_failed(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' etag = response['ETag'].replace('"', '') @@ -3699,18 +3051,14 @@ def test_put_object_ifnonmatch_failed(): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='put') -@attr(operation='overwrite non-existing object w/ If-None-Match: *') -@attr(assertion='succeeds') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws def test_put_object_ifnonmatch_nonexisted_good(): bucket_name = get_new_bucket() client = get_client() @@ -3721,14 +3069,10 @@ def test_put_object_ifnonmatch_nonexisted_good(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='put') -@attr(operation='overwrite existing object w/ If-None-Match: *') -@attr(assertion='fails 412') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_put_object_ifnonmatch_overwrite_existed_failed(): bucket_name = get_new_bucket() client = get_client() @@ -3736,19 +3080,19 @@ def test_put_object_ifnonmatch_overwrite_existed_failed(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'})) client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' def _setup_bucket_object_acl(bucket_acl, object_acl): """ @@ -3772,21 +3116,13 @@ def _setup_bucket_acl(bucket_acl=None): return bucket_name -@attr(resource='object') -@attr(method='get') -@attr(operation='publically readable bucket') -@attr(assertion='bucket is readable') def test_object_raw_get(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') unauthenticated_client = get_unauthenticated_client() response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object') -@attr(method='get') -@attr(operation='deleted object and bucket') -@attr(assertion='fails 404') def test_object_raw_get_bucket_gone(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -3798,13 +3134,9 @@ def test_object_raw_get_bucket_gone(): e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='object') -@attr(method='get') -@attr(operation='deleted object and bucket') -@attr(assertion='fails 404') def test_object_delete_key_bucket_gone(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -3816,13 +3148,9 @@ def test_object_delete_key_bucket_gone(): e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='object') -@attr(method='get') -@attr(operation='deleted object') -@attr(assertion='fails 404') def test_object_raw_get_object_gone(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -3833,24 +3161,16 @@ def test_object_raw_get_object_gone(): e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' -@attr(resource='bucket') -@attr(method='head') -@attr(operation='head bucket') -@attr(assertion='succeeds') def test_bucket_head(): bucket_name = get_new_bucket() client = get_client() response = client.head_bucket(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='head') -@attr(operation='non-existant bucket') -@attr(assertion='fails 404') def test_bucket_head_notexist(): bucket_name = get_new_bucket_name() client = get_client() @@ -3858,110 +3178,78 @@ def test_bucket_head_notexist(): e = assert_raises(ClientError, client.head_bucket, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) + assert status == 404 # n.b., RGW does not send a response document for this operation, # which seems consistent with # https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html - #eq(error_code, 'NoSuchKey') + #assert error_code == 'NoSuchKey' -@attr('fails_on_aws') -@attr(resource='bucket') -@attr(method='head') -@attr(operation='read bucket extended information') -@attr(assertion='extended information is getting updated') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_bucket_head_extended(): bucket_name = get_new_bucket() client = get_client() response = client.head_bucket(Bucket=bucket_name) - eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 0) - eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 0) + assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']) == 0 + assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']) == 0 _create_objects(bucket_name=bucket_name, keys=['foo','bar','baz']) response = client.head_bucket(Bucket=bucket_name) - eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 3) - eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 9) + assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']) == 3 + assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']) == 9 -@attr(resource='bucket.acl') -@attr(method='get') -@attr(operation='unauthenticated on private bucket') -@attr(assertion='succeeds') def test_object_raw_get_bucket_acl(): bucket_name = _setup_bucket_object_acl('private', 'public-read') unauthenticated_client = get_unauthenticated_client() response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object.acl') -@attr(method='get') -@attr(operation='unauthenticated on private object') -@attr(assertion='fails 403') def test_object_raw_get_object_acl(): bucket_name = _setup_bucket_object_acl('public-read', 'private') unauthenticated_client = get_unauthenticated_client() e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='authenticated on public bucket/object') -@attr(assertion='succeeds') def test_object_raw_authenticated(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() response = client.get_object(Bucket=bucket_name, Key='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object') -@attr(method='get') -@attr(operation='authenticated on private bucket/private object with modified response headers') -@attr(assertion='succeeds') def test_object_raw_response_headers(): bucket_name = _setup_bucket_object_acl('private', 'private') client = get_client() response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], 'foo/bar') - eq(response['ResponseMetadata']['HTTPHeaders']['content-disposition'], 'bla') - eq(response['ResponseMetadata']['HTTPHeaders']['content-language'], 'esperanto') - eq(response['ResponseMetadata']['HTTPHeaders']['content-encoding'], 'aaa') - eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], 'no-cache') + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == 'foo/bar' + assert response['ResponseMetadata']['HTTPHeaders']['content-disposition'] == 'bla' + assert response['ResponseMetadata']['HTTPHeaders']['content-language'] == 'esperanto' + assert response['ResponseMetadata']['HTTPHeaders']['content-encoding'] == 'aaa' + assert response['ResponseMetadata']['HTTPHeaders']['cache-control'] == 'no-cache' -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='authenticated on private bucket/public object') -@attr(assertion='succeeds') def test_object_raw_authenticated_bucket_acl(): bucket_name = _setup_bucket_object_acl('private', 'public-read') client = get_client() response = client.get_object(Bucket=bucket_name, Key='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='authenticated on public bucket/private object') -@attr(assertion='succeeds') def test_object_raw_authenticated_object_acl(): bucket_name = _setup_bucket_object_acl('public-read', 'private') client = get_client() response = client.get_object(Bucket=bucket_name, Key='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object') -@attr(method='get') -@attr(operation='authenticated on deleted object and bucket') -@attr(assertion='fails 404') def test_object_raw_authenticated_bucket_gone(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -3971,13 +3259,9 @@ def test_object_raw_authenticated_bucket_gone(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='object') -@attr(method='get') -@attr(operation='authenticated on deleted object') -@attr(assertion='fails 404') def test_object_raw_authenticated_object_gone(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -3986,13 +3270,9 @@ def test_object_raw_authenticated_object_gone(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' -@attr(resource='object') -@attr(method='get') -@attr(operation='x-amz-expires check not expired') -@attr(assertion='succeeds') def test_object_raw_get_x_amz_expires_not_expired(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -4001,12 +3281,8 @@ def test_object_raw_get_x_amz_expires_not_expired(): url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET') res = requests.get(url, verify=get_config_ssl_verify()).__dict__ - eq(res['status_code'], 200) + assert res['status_code'] == 200 -@attr(resource='object') -@attr(method='get') -@attr(operation='check x-amz-expires value out of range zero') -@attr(assertion='fails 403') def test_object_raw_get_x_amz_expires_out_range_zero(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -4015,12 +3291,8 @@ def test_object_raw_get_x_amz_expires_out_range_zero(): url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET') res = requests.get(url, verify=get_config_ssl_verify()).__dict__ - eq(res['status_code'], 403) + assert res['status_code'] == 403 -@attr(resource='object') -@attr(method='get') -@attr(operation='check x-amz-expires value out of max range') -@attr(assertion='fails 403') def test_object_raw_get_x_amz_expires_out_max_range(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -4029,12 +3301,8 @@ def test_object_raw_get_x_amz_expires_out_max_range(): url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET') res = requests.get(url, verify=get_config_ssl_verify()).__dict__ - eq(res['status_code'], 403) + assert res['status_code'] == 403 -@attr(resource='object') -@attr(method='get') -@attr(operation='check x-amz-expires value out of positive range') -@attr(assertion='succeeds') def test_object_raw_get_x_amz_expires_out_positive_range(): bucket_name = _setup_bucket_object_acl('public-read', 'public-read') client = get_client() @@ -4043,13 +3311,9 @@ def test_object_raw_get_x_amz_expires_out_positive_range(): url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET') res = requests.get(url, verify=get_config_ssl_verify()).__dict__ - eq(res['status_code'], 403) + assert res['status_code'] == 403 -@attr(resource='object') -@attr(method='put') -@attr(operation='unauthenticated, no object acls') -@attr(assertion='fails 403') def test_object_anon_put(): bucket_name = get_new_bucket() client = get_client() @@ -4060,13 +3324,9 @@ def test_object_anon_put(): e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@attr(resource='object') -@attr(method='put') -@attr(operation='unauthenticated, publically writable object') -@attr(assertion='succeeds') def test_object_anon_put_write_access(): bucket_name = _setup_bucket_acl('public-read-write') client = get_client() @@ -4075,23 +3335,15 @@ def test_object_anon_put_write_access(): unauthenticated_client = get_unauthenticated_client() response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object') -@attr(method='put') -@attr(operation='authenticated, no object acls') -@attr(assertion='succeeds') def test_object_put_authenticated(): bucket_name = get_new_bucket() client = get_client() response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object') -@attr(method='put') -@attr(operation='authenticated, no object acls') -@attr(assertion='succeeds') def test_object_raw_put_authenticated_expired(): bucket_name = get_new_bucket() client = get_client() @@ -4102,7 +3354,7 @@ def test_object_raw_put_authenticated_expired(): # params wouldn't take a 'Body' parameter so we're passing it in here res = requests.put(url, data="foo", verify=get_config_ssl_verify()).__dict__ - eq(res['status_code'], 403) + assert res['status_code'] == 403 def check_bad_bucket_name(bucket_name): """ @@ -4112,19 +3364,14 @@ def check_bad_bucket_name(bucket_name): client = get_client() e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidBucketName') + assert status == 400 + assert error_code == 'InvalidBucketName' # AWS does not enforce all documented bucket restrictions. # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html -@attr('fails_on_aws') +@pytest.mark.fails_on_aws # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='name begins with underscore') -@attr(assertion='fails with subdomain: 400') def test_bucket_create_naming_bad_starts_nonalpha(): bucket_name = get_new_bucket_name() check_bad_bucket_name('_' + bucket_name) @@ -4147,17 +3394,9 @@ def check_invalid_bucketname(invalid_name): status, error_code = _get_status_and_error_code(e.response) return (status, error_code) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='short (one character) name') -@attr(assertion='fails 400') def test_bucket_create_naming_bad_short_one(): check_bad_bucket_name('a') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='short (two character) name') -@attr(assertion='fails 400') def test_bucket_create_naming_bad_short_two(): check_bad_bucket_name('aa') @@ -4181,7 +3420,7 @@ def check_good_bucket_name(name, _prefix=None): ) client = get_client() response = client.create_bucket(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 def _test_bucket_create_naming_good_long(length): """ @@ -4205,59 +3444,34 @@ def _test_bucket_create_naming_good_long(length): ) client = get_client() response = client.create_bucket(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/60 byte name') -@attr(assertion='fails with subdomain') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_good_long_60(): _test_bucket_create_naming_good_long(60) # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/61 byte name') -@attr(assertion='fails with subdomain') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_good_long_61(): _test_bucket_create_naming_good_long(61) # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/62 byte name') -@attr(assertion='fails with subdomain') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_good_long_62(): _test_bucket_create_naming_good_long(62) # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/63 byte name') -@attr(assertion='fails with subdomain') def test_bucket_create_naming_good_long_63(): _test_bucket_create_naming_good_long(63) # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list w/61 byte name') -@attr(assertion='fails with subdomain') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_list_long_name(): prefix = get_new_bucket_name() @@ -4271,38 +3485,25 @@ def test_bucket_list_long_name(): ) bucket = get_new_bucket_resource(name=bucket_name) is_empty = _bucket_is_empty(bucket) - eq(is_empty, True) + assert is_empty == True # AWS does not enforce all documented bucket restrictions. # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html -@attr('fails_on_aws') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/ip address for name') -@attr(assertion='fails on aws') +@pytest.mark.fails_on_aws def test_bucket_create_naming_bad_ip(): check_bad_bucket_name('192.168.5.123') # test_bucket_create_naming_dns_* are valid but not recommended -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/underscore in name') -@attr(assertion='fails') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_dns_underscore(): invalid_bucketname = 'foo_bar' status, error_code = check_invalid_bucketname(invalid_bucketname) - eq(status, 400) - eq(error_code, 'InvalidBucketName') + assert status == 400 + assert error_code == 'InvalidBucketName' # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/100 byte name') -@attr(assertion='fails with subdomain') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws def test_bucket_create_naming_dns_long(): prefix = get_prefix() assert len(prefix) < 50 @@ -4310,67 +3511,44 @@ def test_bucket_create_naming_dns_long(): check_good_bucket_name(num * 'a') # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/dash at end of name') -@attr(assertion='fails') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_dns_dash_at_end(): invalid_bucketname = 'foo-' status, error_code = check_invalid_bucketname(invalid_bucketname) - eq(status, 400) - eq(error_code, 'InvalidBucketName') + assert status == 400 + assert error_code == 'InvalidBucketName' # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/.. in name') -@attr(assertion='fails') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_dns_dot_dot(): invalid_bucketname = 'foo..bar' status, error_code = check_invalid_bucketname(invalid_bucketname) - eq(status, 400) - eq(error_code, 'InvalidBucketName') + assert status == 400 + assert error_code == 'InvalidBucketName' # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/.- in name') -@attr(assertion='fails') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_dns_dot_dash(): invalid_bucketname = 'foo.-bar' status, error_code = check_invalid_bucketname(invalid_bucketname) - eq(status, 400) - eq(error_code, 'InvalidBucketName') + assert status == 400 + assert error_code == 'InvalidBucketName' # Breaks DNS with SubdomainCallingFormat -@attr('fails_with_subdomain') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create w/-. in name') -@attr(assertion='fails') -@attr('fails_on_aws') # InvalidBucketNameThe specified bucket is not valid.... +@pytest.mark.fails_on_aws # Should now pass on AWS even though it has 'fails_on_aws' attr. def test_bucket_create_naming_dns_dash_dot(): invalid_bucketname = 'foo-.bar' status, error_code = check_invalid_bucketname(invalid_bucketname) - eq(status, 400) - eq(error_code, 'InvalidBucketName') + assert status == 400 + assert error_code == 'InvalidBucketName' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='re-create') def test_bucket_create_exists(): # aws-s3 default region allows recreation of buckets # but all other regions fail with BucketAlreadyOwnedByYou. @@ -4382,17 +3560,14 @@ def test_bucket_create_exists(): response = client.create_bucket(Bucket=bucket_name) except ClientError as e: status, error_code = _get_status_and_error_code(e.response) - eq(e.status, 409) - eq(e.error_code, 'BucketAlreadyOwnedByYou') + assert e.status == 409 + assert e.error_code == 'BucketAlreadyOwnedByYou' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get location') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_get_location(): location_constraint = get_main_api_name() if not location_constraint: - raise SkipTest + pytest.skip('no api_name configured') bucket_name = get_new_bucket_name() client = get_client() @@ -4401,13 +3576,9 @@ def test_bucket_get_location(): response = client.get_bucket_location(Bucket=bucket_name) if location_constraint == "": location_constraint = None - eq(response['LocationConstraint'], location_constraint) + assert response['LocationConstraint'] == location_constraint -@attr(resource='bucket') -@attr(method='put') -@attr(operation='re-create by non-owner') -@attr(assertion='fails 409') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_create_exists_nonowner(): # Names are shared across a global namespace. As such, no two # users can create a bucket with that same name. @@ -4419,14 +3590,10 @@ def test_bucket_create_exists_nonowner(): client.create_bucket(Bucket=bucket_name) e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 409) - eq(error_code, 'BucketAlreadyExists') + assert status == 409 + assert error_code == 'BucketAlreadyExists' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='re-create with existing acl') -@attr(assertion='fails 409') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_recreate_overwrite_acl(): bucket_name = get_new_bucket_name() client = get_client() @@ -4434,14 +3601,10 @@ def test_bucket_recreate_overwrite_acl(): client.create_bucket(Bucket=bucket_name, ACL='public-read') e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 409) - eq(error_code, 'BucketAlreadyExists') + assert status == 409 + assert error_code == 'BucketAlreadyExists' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='re-create with new acl') -@attr(assertion='fails 409') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_recreate_new_acl(): bucket_name = get_new_bucket_name() client = get_client() @@ -4449,13 +3612,13 @@ def test_bucket_recreate_new_acl(): client.create_bucket(Bucket=bucket_name) e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name, ACL='public-read') status, error_code = _get_status_and_error_code(e.response) - eq(status, 409) - eq(error_code, 'BucketAlreadyExists') + assert status == 409 + assert error_code == 'BucketAlreadyExists' def check_access_denied(fn, *args, **kwargs): e = assert_raises(ClientError, fn, *args, **kwargs) status = _get_status(e.response) - eq(status, 403) + assert status == 403 def check_grants(got, want): @@ -4463,7 +3626,7 @@ def check_grants(got, want): Check that grants list in got matches the dictionaries in want, in any order. """ - eq(len(got), len(want)) + assert len(got) == len(want) # There are instances when got does not match due the order of item. if got[0]["Grantee"].get("DisplayName"): @@ -4473,19 +3636,15 @@ def check_grants(got, want): for g, w in zip(got, want): w = dict(w) g = dict(g) - eq(g.pop('Permission', None), w['Permission']) - eq(g['Grantee'].pop('DisplayName', None), w['DisplayName']) - eq(g['Grantee'].pop('ID', None), w['ID']) - eq(g['Grantee'].pop('Type', None), w['Type']) - eq(g['Grantee'].pop('URI', None), w['URI']) - eq(g['Grantee'].pop('EmailAddress', None), w['EmailAddress']) - eq(g, {'Grantee': {}}) + assert g.pop('Permission', None) == w['Permission'] + assert g['Grantee'].pop('DisplayName', None) == w['DisplayName'] + assert g['Grantee'].pop('ID', None) == w['ID'] + assert g['Grantee'].pop('Type', None) == w['Type'] + assert g['Grantee'].pop('URI', None) == w['URI'] + assert g['Grantee'].pop('EmailAddress', None) == w['EmailAddress'] + assert g == {'Grantee': {}} -@attr(resource='bucket') -@attr(method='get') -@attr(operation='default acl') -@attr(assertion='read back expected defaults') def test_bucket_acl_default(): bucket_name = get_new_bucket() client = get_client() @@ -4495,8 +3654,8 @@ def test_bucket_acl_default(): display_name = get_main_display_name() user_id = get_main_user_id() - eq(response['Owner']['DisplayName'], display_name) - eq(response['Owner']['ID'], user_id) + assert response['Owner']['DisplayName'] == display_name + assert response['Owner']['ID'] == user_id grants = response['Grants'] check_grants( @@ -4513,11 +3672,7 @@ def test_bucket_acl_default(): ], ) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='public-read acl') -@attr(assertion='read back expected defaults') -@attr('fails_on_aws') # IllegalLocationConstraintExceptionThe unspecified location constraint is incompatible for the region specific endpoint this request was sent to. +@pytest.mark.fails_on_aws def test_bucket_acl_canned_during_create(): bucket_name = get_new_bucket_name() client = get_client() @@ -4550,10 +3705,6 @@ def test_bucket_acl_canned_during_create(): ], ) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='acl: public-read,private') -@attr(assertion='read back expected values') def test_bucket_acl_canned(): bucket_name = get_new_bucket_name() client = get_client() @@ -4604,10 +3755,6 @@ def test_bucket_acl_canned(): ], ) -@attr(resource='bucket.acls') -@attr(method='put') -@attr(operation='acl: public-read-write') -@attr(assertion='read back expected values') def test_bucket_acl_canned_publicreadwrite(): bucket_name = get_new_bucket_name() client = get_client() @@ -4647,10 +3794,6 @@ def test_bucket_acl_canned_publicreadwrite(): ], ) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='acl: authenticated-read') -@attr(assertion='read back expected values') def test_bucket_acl_canned_authenticatedread(): bucket_name = get_new_bucket_name() client = get_client() @@ -4683,10 +3826,6 @@ def test_bucket_acl_canned_authenticatedread(): ], ) -@attr(resource='object.acls') -@attr(method='get') -@attr(operation='default acl') -@attr(assertion='read back expected defaults') def test_object_acl_default(): bucket_name = get_new_bucket() client = get_client() @@ -4713,10 +3852,6 @@ def test_object_acl_default(): ], ) -@attr(resource='object.acls') -@attr(method='put') -@attr(operation='acl public-read') -@attr(assertion='read back expected values') def test_object_acl_canned_during_create(): bucket_name = get_new_bucket() client = get_client() @@ -4751,10 +3886,6 @@ def test_object_acl_canned_during_create(): ], ) -@attr(resource='object.acls') -@attr(method='put') -@attr(operation='acl public-read,private') -@attr(assertion='read back expected values') def test_object_acl_canned(): bucket_name = get_new_bucket() client = get_client() @@ -4808,10 +3939,6 @@ def test_object_acl_canned(): ], ) -@attr(resource='object') -@attr(method='put') -@attr(operation='acl public-read-write') -@attr(assertion='read back expected values') def test_object_acl_canned_publicreadwrite(): bucket_name = get_new_bucket() client = get_client() @@ -4853,10 +3980,6 @@ def test_object_acl_canned_publicreadwrite(): ], ) -@attr(resource='object.acls') -@attr(method='put') -@attr(operation='acl authenticated-read') -@attr(assertion='read back expected values') def test_object_acl_canned_authenticatedread(): bucket_name = get_new_bucket() client = get_client() @@ -4890,10 +4013,6 @@ def test_object_acl_canned_authenticatedread(): ], ) -@attr(resource='object.acls') -@attr(method='put') -@attr(operation='acl bucket-owner-read') -@attr(assertion='read back expected values') def test_object_acl_canned_bucketownerread(): bucket_name = get_new_bucket_name() main_client = get_client() @@ -4936,10 +4055,6 @@ def test_object_acl_canned_bucketownerread(): ], ) -@attr(resource='object.acls') -@attr(method='put') -@attr(operation='acl bucket-owner-read') -@attr(assertion='read back expected values') def test_object_acl_canned_bucketownerfullcontrol(): bucket_name = get_new_bucket_name() main_client = get_client() @@ -4982,11 +4097,7 @@ def test_object_acl_canned_bucketownerfullcontrol(): ], ) -@attr(resource='object.acls') -@attr(method='put') -@attr(operation='set write-acp') -@attr(assertion='does not modify owner') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${ALTUSER} +@pytest.mark.fails_on_aws def test_object_acl_full_control_verify_owner(): bucket_name = get_new_bucket_name() main_client = get_client() @@ -5011,7 +4122,7 @@ def test_object_acl_full_control_verify_owner(): alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant) response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo') - eq(response['Owner']['ID'], main_user_id) + assert response['Owner']['ID'] == main_user_id def add_obj_user_grant(bucket_name, key, grant): """ @@ -5035,10 +4146,6 @@ def add_obj_user_grant(bucket_name, key, grant): return grant -@attr(resource='object.acls') -@attr(method='put') -@attr(operation='set write-acp') -@attr(assertion='does not modify other attributes') def test_object_acl_full_control_verify_attributes(): bucket_name = get_new_bucket_name() main_client = get_client() @@ -5066,19 +4173,15 @@ def test_object_acl_full_control_verify_attributes(): main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants) response = main_client.get_object(Bucket=bucket_name, Key='foo') - eq(content_type, response['ContentType']) - eq(etag, response['ETag']) + assert content_type == response['ContentType'] + assert etag == response['ETag'] -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='set acl private') -@attr(assertion='a private object can be set to private') def test_bucket_acl_canned_private_to_private(): bucket_name = get_new_bucket() client = get_client() response = client.put_bucket_acl(Bucket=bucket_name, ACL='private') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 def add_bucket_user_grant(bucket_name, grant): """ @@ -5141,45 +4244,25 @@ def _check_object_acl(permission): ) -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set acl FULL_CONTRO') -@attr(assertion='reads back correctly') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${USER} +@pytest.mark.fails_on_aws def test_object_acl(): _check_object_acl('FULL_CONTROL') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set acl WRITE') -@attr(assertion='reads back correctly') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${USER} +@pytest.mark.fails_on_aws def test_object_acl_write(): _check_object_acl('WRITE') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set acl WRITE_ACP') -@attr(assertion='reads back correctly') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${USER} +@pytest.mark.fails_on_aws def test_object_acl_writeacp(): _check_object_acl('WRITE_ACP') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set acl READ') -@attr(assertion='reads back correctly') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${USER} +@pytest.mark.fails_on_aws def test_object_acl_read(): _check_object_acl('READ') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set acl READ_ACP') -@attr(assertion='reads back correctly') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${USER} +@pytest.mark.fails_on_aws def test_object_acl_readacp(): _check_object_acl('READ_ACP') @@ -5288,11 +4371,7 @@ def _check_bucket_acl_grant_cant_writeacp(bucket_name): alt_client = get_alt_client() check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read') -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='set acl w/userid FULL_CONTROL') -@attr(assertion='can read/write data/acls') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${USER} +@pytest.mark.fails_on_aws def test_bucket_acl_grant_userid_fullcontrol(): bucket_name = _bucket_acl_grant_userid('FULL_CONTROL') @@ -5314,14 +4393,10 @@ def test_bucket_acl_grant_userid_fullcontrol(): main_display_name = get_main_display_name() main_user_id = get_main_user_id() - eq(owner_id, main_user_id) - eq(owner_display_name, main_display_name) + assert owner_id == main_user_id + assert owner_display_name == main_display_name -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='set acl w/userid READ') -@attr(assertion='can read data, no other r/w') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${ALTUSER} +@pytest.mark.fails_on_aws def test_bucket_acl_grant_userid_read(): bucket_name = _bucket_acl_grant_userid('READ') @@ -5334,11 +4409,7 @@ def test_bucket_acl_grant_userid_read(): # can't write acl _check_bucket_acl_grant_cant_writeacp(bucket_name) -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='set acl w/userid READ_ACP') -@attr(assertion='can read acl, no other r/w') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${ALTUSER} +@pytest.mark.fails_on_aws def test_bucket_acl_grant_userid_readacp(): bucket_name = _bucket_acl_grant_userid('READ_ACP') @@ -5352,11 +4423,7 @@ def test_bucket_acl_grant_userid_readacp(): #_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket) _check_bucket_acl_grant_cant_writeacp(bucket_name) -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='set acl w/userid WRITE') -@attr(assertion='can write data, no other r/w') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${ALTUSER} +@pytest.mark.fails_on_aws def test_bucket_acl_grant_userid_write(): bucket_name = _bucket_acl_grant_userid('WRITE') @@ -5369,11 +4436,7 @@ def test_bucket_acl_grant_userid_write(): # can't write acl _check_bucket_acl_grant_cant_writeacp(bucket_name) -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='set acl w/userid WRITE_ACP') -@attr(assertion='can write acls, no other r/w') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${ALTUSER} +@pytest.mark.fails_on_aws def test_bucket_acl_grant_userid_writeacp(): bucket_name = _bucket_acl_grant_userid('WRITE_ACP') @@ -5386,10 +4449,6 @@ def test_bucket_acl_grant_userid_writeacp(): # can write acl _check_bucket_acl_grant_can_writeacp(bucket_name) -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='set acl w/invalid userid') -@attr(assertion='fails 400') def test_bucket_acl_grant_nonexist_user(): bucket_name = get_new_bucket() client = get_client() @@ -5403,13 +4462,9 @@ def test_bucket_acl_grant_nonexist_user(): e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='revoke all ACLs') -@attr(assertion='can: read obj, get/set bucket acl, cannot write objs') def test_bucket_acl_no_grants(): bucket_name = get_new_bucket() client = get_client() @@ -5463,12 +4518,8 @@ def _get_acl_header(user_id=None, perms=None): return headers -@attr(resource='object') -@attr(method='PUT') -@attr(operation='add all grants to user through headers') -@attr(assertion='adds all grants individually to second user') -@attr('fails_on_dho') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${ALTUSER} +@pytest.mark.fails_on_dho +@pytest.mark.fails_on_aws def test_object_header_acl_grants(): bucket_name = get_new_bucket() client = get_client() @@ -5535,12 +4586,8 @@ def test_object_header_acl_grants(): ], ) -@attr(resource='bucket') -@attr(method='PUT') -@attr(operation='add all grants to user through headers') -@attr(assertion='adds all grants individually to second user') -@attr('fails_on_dho') -@attr('fails_on_aws') # InvalidArgumentInvalid idCanonicalUser/ID${ALTUSER} +@pytest.mark.fails_on_dho +@pytest.mark.fails_on_aws def test_bucket_header_acl_grants(): headers = _get_acl_header() bucket_name = get_new_bucket_name() @@ -5619,11 +4666,7 @@ def test_bucket_header_acl_grants(): # This test will fail on DH Objects. DHO allows multiple users with one account, which # would violate the uniqueness requirement of a user's email. As such, DHO users are # created without an email. -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='add second FULL_CONTROL user') -@attr(assertion='works for S3, fails for DHO') -@attr('fails_on_aws') # AmbiguousGrantByEmailAddressThe e-mail address you provided is associated with more than one account. Please retry your request using a different identification method or after resolving the ambiguity. +@pytest.mark.fails_on_aws def test_bucket_acl_grant_email(): bucket_name = get_new_bucket() client = get_client() @@ -5666,10 +4709,6 @@ def test_bucket_acl_grant_email(): ] ) -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='add acl for nonexistent user') -@attr(assertion='fail 400') def test_bucket_acl_grant_email_not_exist(): # behavior not documented by amazon bucket_name = get_new_bucket() @@ -5686,13 +4725,9 @@ def test_bucket_acl_grant_email_not_exist(): e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'UnresolvableGrantByEmailAddress') + assert status == 400 + assert error_code == 'UnresolvableGrantByEmailAddress' -@attr(resource='bucket') -@attr(method='ACLs') -@attr(operation='revoke all ACLs') -@attr(assertion='acls read back as empty') def test_bucket_acl_revoke_all(): # revoke all access, including the owner's access bucket_name = get_new_bucket() @@ -5711,7 +4746,7 @@ def test_bucket_acl_revoke_all(): response = client.get_bucket_acl(Bucket=bucket_name) - eq(len(response['Grants']), 0) + assert len(response['Grants']) == 0 # set policy back to original so that bucket can be cleaned up policy['Grants'] = old_grants @@ -5719,11 +4754,7 @@ def test_bucket_acl_revoke_all(): # TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden # http://tracker.newdream.net/issues/984 -@attr(resource='bucket.log') -@attr(method='put') -@attr(operation='set/enable/disable logging target') -@attr(assertion='operations succeed') -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_logging_toggle(): bucket_name = get_new_bucket() client = get_client() @@ -5781,10 +4812,6 @@ def list_bucket_versions(client, bucket_name): return result -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: private/private') -@attr(assertion='public has no access to bucket or objects') def test_access_bucket_private_object_private(): # all the test_access_* tests follow this template bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private') @@ -5812,11 +4839,7 @@ def test_access_bucket_private_object_private(): alt_client3 = get_alt_client() check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: private/private with list-objects-v2') -@attr(assertion='public has no access to bucket or objects') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_access_bucket_private_objectv2_private(): # all the test_access_* tests follow this template bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private') @@ -5844,10 +4867,6 @@ def test_access_bucket_private_objectv2_private(): alt_client3 = get_alt_client() check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: private/public-read') -@attr(assertion='public can only read readable object') def test_access_bucket_private_object_publicread(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read') @@ -5857,7 +4876,7 @@ def test_access_bucket_private_object_publicread(): body = _get_body(response) # a should be public-read, b gets default (private) - eq(body, 'foocontent') + assert body == 'foocontent' check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite') alt_client2 = get_alt_client() @@ -5868,11 +4887,7 @@ def test_access_bucket_private_object_publicread(): check_access_denied(alt_client3.list_objects, Bucket=bucket_name) check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: private/public-read with list-objects-v2') -@attr(assertion='public can only read readable object') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_access_bucket_private_objectv2_publicread(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read') @@ -5882,7 +4897,7 @@ def test_access_bucket_private_objectv2_publicread(): body = _get_body(response) # a should be public-read, b gets default (private) - eq(body, 'foocontent') + assert body == 'foocontent' check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite') alt_client2 = get_alt_client() @@ -5893,10 +4908,6 @@ def test_access_bucket_private_objectv2_publicread(): check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name) check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: private/public-read/write') -@attr(assertion='public can only read the readable object') def test_access_bucket_private_object_publicreadwrite(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write') alt_client = get_alt_client() @@ -5906,7 +4917,7 @@ def test_access_bucket_private_object_publicreadwrite(): # a should be public-read-only ... because it is in a private bucket # b gets default (private) - eq(body, 'foocontent') + assert body == 'foocontent' check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite') alt_client2 = get_alt_client() @@ -5917,11 +4928,7 @@ def test_access_bucket_private_object_publicreadwrite(): check_access_denied(alt_client3.list_objects, Bucket=bucket_name) check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: private/public-read/write with list-objects-v2') -@attr(assertion='public can only read the readable object') -@attr('list-objects-v2') +@pytest.mark.list_objects_v2 def test_access_bucket_private_objectv2_publicreadwrite(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write') alt_client = get_alt_client() @@ -5931,7 +4938,7 @@ def test_access_bucket_private_objectv2_publicreadwrite(): # a should be public-read-only ... because it is in a private bucket # b gets default (private) - eq(body, 'foocontent') + assert body == 'foocontent' check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite') alt_client2 = get_alt_client() @@ -5942,10 +4949,6 @@ def test_access_bucket_private_objectv2_publicreadwrite(): check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name) check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: public-read/private') -@attr(assertion='public can only list the bucket') def test_access_bucket_publicread_object_private(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private') alt_client = get_alt_client() @@ -5962,13 +4965,9 @@ def test_access_bucket_publicread_object_private(): objs = get_objects_list(bucket=bucket_name, client=alt_client3) - eq(objs, ['bar', 'foo']) + assert objs == ['bar', 'foo'] check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: public-read/public-read') -@attr(assertion='public can read readable objects and list bucket') def test_access_bucket_publicread_object_publicread(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read') alt_client = get_alt_client() @@ -5977,7 +4976,7 @@ def test_access_bucket_publicread_object_publicread(): # a should be public-read, b gets default (private) body = _get_body(response) - eq(body, 'foocontent') + assert body == 'foocontent' check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite') @@ -5989,14 +4988,10 @@ def test_access_bucket_publicread_object_publicread(): objs = get_objects_list(bucket=bucket_name, client=alt_client3) - eq(objs, ['bar', 'foo']) + assert objs == ['bar', 'foo'] check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: public-read/public-read-write') -@attr(assertion='public can read readable objects and list bucket') def test_access_bucket_publicread_object_publicreadwrite(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write') alt_client = get_alt_client() @@ -6007,7 +5002,7 @@ def test_access_bucket_publicread_object_publicreadwrite(): # a should be public-read-only ... because it is in a r/o bucket # b gets default (private) - eq(body, 'foocontent') + assert body == 'foocontent' check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite') @@ -6019,14 +5014,10 @@ def test_access_bucket_publicread_object_publicreadwrite(): objs = get_objects_list(bucket=bucket_name, client=alt_client3) - eq(objs, ['bar', 'foo']) + assert objs == ['bar', 'foo'] check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: public-read-write/private') -@attr(assertion='private objects cannot be read, but can be overwritten') def test_access_bucket_publicreadwrite_object_private(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private') alt_client = get_alt_client() @@ -6039,13 +5030,9 @@ def test_access_bucket_publicreadwrite_object_private(): alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite') objs = get_objects_list(bucket=bucket_name, client=alt_client) - eq(objs, ['bar', 'foo']) + assert objs == ['bar', 'foo'] alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: public-read-write/public-read') -@attr(assertion='private objects cannot be read, but can be overwritten') def test_access_bucket_publicreadwrite_object_publicread(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read') alt_client = get_alt_client() @@ -6054,20 +5041,16 @@ def test_access_bucket_publicreadwrite_object_publicread(): response = alt_client.get_object(Bucket=bucket_name, Key=key1) body = _get_body(response) - eq(body, 'foocontent') + assert body == 'foocontent' alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent') check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2) alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite') objs = get_objects_list(bucket=bucket_name, client=alt_client) - eq(objs, ['bar', 'foo']) + assert objs == ['bar', 'foo'] alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='object') -@attr(method='ACLs') -@attr(operation='set bucket/object acls: public-read-write/public-read-write') -@attr(assertion='private objects cannot be read, but can be overwritten') def test_access_bucket_publicreadwrite_object_publicreadwrite(): bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write') alt_client = get_alt_client() @@ -6075,18 +5058,14 @@ def test_access_bucket_publicreadwrite_object_publicreadwrite(): body = _get_body(response) # a should be public-read-write, b gets default (private) - eq(body, 'foocontent') + assert body == 'foocontent' alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite') check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2) alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite') objs = get_objects_list(bucket=bucket_name, client=alt_client) - eq(objs, ['bar', 'foo']) + assert objs == ['bar', 'foo'] alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent') -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all buckets') -@attr(assertion='returns all expected buckets') def test_buckets_create_then_list(): client = get_client() bucket_names = [] @@ -6107,10 +5086,6 @@ def test_buckets_create_then_list(): if name not in buckets_list: raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all buckets') -@attr(assertion='all buckets have a sane creation time') def test_buckets_list_ctime(): # check that creation times are within a day before = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1) @@ -6124,11 +5099,7 @@ def test_buckets_list_ctime(): ctime = bucket['CreationDate'] assert before <= ctime, '%r > %r' % (before, ctime) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all buckets (anonymous)') -@attr(assertion='succeeds') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws def test_list_buckets_anonymous(): # Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism, # emulating standard HTTP access. @@ -6137,93 +5108,65 @@ def test_list_buckets_anonymous(): # allowing us to vary the calling format in testing. unauthenticated_client = get_unauthenticated_client() response = unauthenticated_client.list_buckets() - eq(len(response['Buckets']), 0) + assert len(response['Buckets']) == 0 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all buckets (bad auth)') -@attr(assertion='fails 403') def test_list_buckets_invalid_auth(): bad_auth_client = get_bad_auth_client() e = assert_raises(ClientError, bad_auth_client.list_buckets) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'InvalidAccessKeyId') + assert status == 403 + assert error_code == 'InvalidAccessKeyId' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list all buckets (bad auth)') -@attr(assertion='fails 403') def test_list_buckets_bad_auth(): main_access_key = get_main_aws_access_key() bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key) e = assert_raises(ClientError, bad_auth_client.list_buckets) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'SignatureDoesNotMatch') + assert status == 403 + assert error_code == 'SignatureDoesNotMatch' + +@pytest.fixture +def override_prefix_a(): + nuke_prefixed_buckets(prefix='a'+get_prefix()) + yield + nuke_prefixed_buckets(prefix='a'+get_prefix()) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create bucket') -@attr(assertion='name starts with alphabetic works') # this test goes outside the user-configure prefix because it needs to # control the initial character of the bucket name -@nose.with_setup( - setup=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()), - teardown=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()), - ) -def test_bucket_create_naming_good_starts_alpha(): +def test_bucket_create_naming_good_starts_alpha(override_prefix_a): check_good_bucket_name('foo', _prefix='a'+get_prefix()) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create bucket') -@attr(assertion='name starts with numeric works') +@pytest.fixture +def override_prefix_0(): + nuke_prefixed_buckets(prefix='0'+get_prefix()) + yield + nuke_prefixed_buckets(prefix='0'+get_prefix()) + # this test goes outside the user-configure prefix because it needs to # control the initial character of the bucket name -@nose.with_setup( - setup=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()), - teardown=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()), - ) -def test_bucket_create_naming_good_starts_digit(): +def test_bucket_create_naming_good_starts_digit(override_prefix_0): check_good_bucket_name('foo', _prefix='0'+get_prefix()) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create bucket') -@attr(assertion='name containing dot works') def test_bucket_create_naming_good_contains_period(): check_good_bucket_name('aaa.111') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create bucket') -@attr(assertion='name containing hyphen works') def test_bucket_create_naming_good_contains_hyphen(): check_good_bucket_name('aaa-111') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='create bucket with objects and recreate it') -@attr(assertion='bucket recreation not overriding index') def test_bucket_recreate_not_overriding(): key_names = ['mykey1', 'mykey2'] bucket_name = _create_objects(keys=key_names) objs_list = get_objects_list(bucket_name) - eq(key_names, objs_list) + assert key_names == objs_list client = get_client() client.create_bucket(Bucket=bucket_name) objs_list = get_objects_list(bucket_name) - eq(key_names, objs_list) + assert key_names == objs_list -@attr(resource='object') -@attr(method='put') -@attr(operation='create and list objects with special names') -@attr(assertion='special names work') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_bucket_create_special_key_names(): key_names = [ ' ', @@ -6243,37 +5186,29 @@ def test_bucket_create_special_key_names(): bucket_name = _create_objects(keys=key_names) objs_list = get_objects_list(bucket_name) - eq(key_names, objs_list) + assert key_names == objs_list client = get_client() for name in key_names: - eq((name in objs_list), True) + assert name in objs_list response = client.get_object(Bucket=bucket_name, Key=name) body = _get_body(response) - eq(name, body) + assert name == body client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private') -@attr(resource='bucket') -@attr(method='get') -@attr(operation='create and list objects with underscore as prefix, list using prefix') -@attr(assertion='listing works correctly') def test_bucket_list_special_prefix(): key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd'] bucket_name = _create_objects(keys=key_names) objs_list = get_objects_list(bucket_name) - eq(len(objs_list), 5) + assert len(objs_list) == 5 objs_list = get_objects_list(bucket_name, prefix='_bla/') - eq(len(objs_list), 4) + assert len(objs_list) == 4 -@attr(resource='object') -@attr(method='put') -@attr(operation='copy zero sized object in same bucket') -@attr(assertion='works') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_zero_size(): key = 'foo123bar' bucket_name = _create_objects(keys=[key]) @@ -6285,13 +5220,9 @@ def test_object_copy_zero_size(): client.copy(copy_source, bucket_name, 'bar321foo') response = client.get_object(Bucket=bucket_name, Key='bar321foo') - eq(response['ContentLength'], 0) + assert response['ContentLength'] == 0 -@attr(resource='object') -@attr(method='put') -@attr(operation='copy 16mb object in same bucket') -@attr(assertion='works') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_16m(): bucket_name = get_new_bucket() key1 = 'obj1' @@ -6302,13 +5233,9 @@ def test_object_copy_16m(): key2 = 'obj2' client.copy_object(Bucket=bucket_name, Key=key2, CopySource=copy_source) response = client.get_object(Bucket=bucket_name, Key=key2) - eq(response['ContentLength'], 16*1024*1024) + assert response['ContentLength'] == 16*1024*1024 -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object in same bucket') -@attr(assertion='works') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_same_bucket(): bucket_name = get_new_bucket() client = get_client() @@ -6320,13 +5247,9 @@ def test_object_copy_same_bucket(): response = client.get_object(Bucket=bucket_name, Key='bar321foo') body = _get_body(response) - eq('foo', body) + assert 'foo' == body -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object with content-type') -@attr(assertion='works') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_verify_contenttype(): bucket_name = get_new_bucket() client = get_client() @@ -6340,14 +5263,10 @@ def test_object_copy_verify_contenttype(): response = client.get_object(Bucket=bucket_name, Key='bar321foo') body = _get_body(response) - eq('foo', body) + assert 'foo' == body response_content_type = response['ContentType'] - eq(response_content_type, content_type) + assert response_content_type == content_type -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object to itself') -@attr(assertion='fails') def test_object_copy_to_itself(): bucket_name = get_new_bucket() client = get_client() @@ -6357,14 +5276,10 @@ def test_object_copy_to_itself(): e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidRequest') + assert status == 400 + assert error_code == 'InvalidRequest' -@attr(resource='object') -@attr(method='put') -@attr(operation='modify object metadata by copying') -@attr(assertion='fails') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_to_itself_with_metadata(): bucket_name = get_new_bucket() client = get_client() @@ -6374,13 +5289,9 @@ def test_object_copy_to_itself_with_metadata(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE') response = client.get_object(Bucket=bucket_name, Key='foo123bar') - eq(response['Metadata'], metadata) + assert response['Metadata'] == metadata -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object from different bucket') -@attr(assertion='works') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_diff_bucket(): bucket_name1 = get_new_bucket() bucket_name2 = get_new_bucket() @@ -6394,12 +5305,8 @@ def test_object_copy_diff_bucket(): response = client.get_object(Bucket=bucket_name2, Key='bar321foo') body = _get_body(response) - eq('foo', body) + assert 'foo' == body -@attr(resource='object') -@attr(method='put') -@attr(operation='copy to an inaccessible bucket') -@attr(assertion='fails w/AttributeError') def test_object_copy_not_owned_bucket(): client = get_client() alt_client = get_alt_client() @@ -6414,12 +5321,8 @@ def test_object_copy_not_owned_bucket(): e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='object') -@attr(method='put') -@attr(operation='copy a non-owned object in a non-owned bucket, but with perms') -@attr(assertion='works') def test_object_copy_not_owned_object_bucket(): client = get_client() alt_client = get_alt_client() @@ -6441,11 +5344,7 @@ def test_object_copy_not_owned_object_bucket(): copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'} alt_client.copy(copy_source, bucket_name, 'bar321foo') -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object and change acl') -@attr(assertion='works') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_canned_acl(): bucket_name = get_new_bucket() client = get_client() @@ -6465,10 +5364,7 @@ def test_object_copy_canned_acl(): # check ACL is applied by doing GET from another user alt_client.get_object(Bucket=bucket_name, Key='foo123bar') -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object and retain metadata') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_retaining_metadata(): for size in [3, 1024 * 1024]: bucket_name = get_new_bucket() @@ -6482,15 +5378,12 @@ def test_object_copy_retaining_metadata(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo') response = client.get_object(Bucket=bucket_name, Key='bar321foo') - eq(content_type, response['ContentType']) - eq(metadata, response['Metadata']) + assert content_type == response['ContentType'] + assert metadata == response['Metadata'] body = _get_body(response) - eq(size, response['ContentLength']) + assert size == response['ContentLength'] -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object and replace metadata') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_replacing_metadata(): for size in [3, 1024 * 1024]: bucket_name = get_new_bucket() @@ -6507,13 +5400,10 @@ def test_object_copy_replacing_metadata(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type) response = client.get_object(Bucket=bucket_name, Key='bar321foo') - eq(content_type, response['ContentType']) - eq(metadata, response['Metadata']) - eq(size, response['ContentLength']) + assert content_type == response['ContentType'] + assert metadata == response['Metadata'] + assert size == response['ContentLength'] -@attr(resource='object') -@attr(method='put') -@attr(operation='copy from non-existent bucket') def test_object_copy_bucket_not_found(): bucket_name = get_new_bucket() client = get_client() @@ -6521,11 +5411,8 @@ def test_object_copy_bucket_not_found(): copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'} e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo') status = _get_status(e.response) - eq(status, 404) + assert status == 404 -@attr(resource='object') -@attr(method='put') -@attr(operation='copy from non-existent object') def test_object_copy_key_not_found(): bucket_name = get_new_bucket() client = get_client() @@ -6533,14 +5420,9 @@ def test_object_copy_key_not_found(): copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'} e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo') status = _get_status(e.response) - eq(status, 404) + assert status == 404 -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object to/from versioned bucket') -@attr(assertion='works') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_versioned_bucket(): bucket_name = get_new_bucket() client = get_client() @@ -6560,8 +5442,8 @@ def test_object_copy_versioned_bucket(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2) response = client.get_object(Bucket=bucket_name, Key=key2) body = _get_body(response) - eq(data_str, body) - eq(size, response['ContentLength']) + assert data_str == body + assert size == response['ContentLength'] # second copy @@ -6571,8 +5453,8 @@ def test_object_copy_versioned_bucket(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3) response = client.get_object(Bucket=bucket_name, Key=key3) body = _get_body(response) - eq(data_str, body) - eq(size, response['ContentLength']) + assert data_str == body + assert size == response['ContentLength'] # copy to another versioned bucket bucket_name2 = get_new_bucket() @@ -6582,8 +5464,8 @@ def test_object_copy_versioned_bucket(): client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4) response = client.get_object(Bucket=bucket_name2, Key=key4) body = _get_body(response) - eq(data_str, body) - eq(size, response['ContentLength']) + assert data_str == body + assert size == response['ContentLength'] # copy to another non versioned bucket bucket_name3 = get_new_bucket() @@ -6592,8 +5474,8 @@ def test_object_copy_versioned_bucket(): client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5) response = client.get_object(Bucket=bucket_name3, Key=key5) body = _get_body(response) - eq(data_str, body) - eq(size, response['ContentLength']) + assert data_str == body + assert size == response['ContentLength'] # copy from a non versioned bucket copy_source = {'Bucket': bucket_name3, 'Key': key5} @@ -6601,15 +5483,10 @@ def test_object_copy_versioned_bucket(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6) response = client.get_object(Bucket=bucket_name, Key=key6) body = _get_body(response) - eq(data_str, body) - eq(size, response['ContentLength']) + assert data_str == body + assert size == response['ContentLength'] -@attr(resource='object') -@attr(method='put') -@attr(operation='copy object to/from versioned bucket with url-encoded name') -@attr(assertion='works') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_versioned_url_encoding(): bucket = get_new_bucket_resource() check_configure_versioning_retry(bucket.name, "Enabled", "Enabled") @@ -6672,12 +5549,7 @@ def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None return (upload_id, s, parts) -@attr(resource='object') -@attr(method='put') -@attr(operation='test copy object of a multipart upload') -@attr(assertion='successful') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_copy_versioning_multipart_upload(): bucket_name = get_new_bucket() client = get_client() @@ -6701,10 +5573,10 @@ def test_object_copy_versioning_multipart_upload(): response = client.get_object(Bucket=bucket_name, Key=key2) version_id2 = response['VersionId'] body = _get_body(response) - eq(data, body) - eq(key1_size, response['ContentLength']) - eq(key1_metadata, response['Metadata']) - eq(content_type, response['ContentType']) + assert data == body + assert key1_size == response['ContentLength'] + assert key1_metadata == response['Metadata'] + assert content_type == response['ContentType'] # second copy copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2} @@ -6712,10 +5584,10 @@ def test_object_copy_versioning_multipart_upload(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3) response = client.get_object(Bucket=bucket_name, Key=key3) body = _get_body(response) - eq(data, body) - eq(key1_size, response['ContentLength']) - eq(key1_metadata, response['Metadata']) - eq(content_type, response['ContentType']) + assert data == body + assert key1_size == response['ContentLength'] + assert key1_metadata == response['Metadata'] + assert content_type == response['ContentType'] # copy to another versioned bucket bucket_name2 = get_new_bucket() @@ -6726,10 +5598,10 @@ def test_object_copy_versioning_multipart_upload(): client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4) response = client.get_object(Bucket=bucket_name2, Key=key4) body = _get_body(response) - eq(data, body) - eq(key1_size, response['ContentLength']) - eq(key1_metadata, response['Metadata']) - eq(content_type, response['ContentType']) + assert data == body + assert key1_size == response['ContentLength'] + assert key1_metadata == response['Metadata'] + assert content_type == response['ContentType'] # copy to another non versioned bucket bucket_name3 = get_new_bucket() @@ -6738,10 +5610,10 @@ def test_object_copy_versioning_multipart_upload(): client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5) response = client.get_object(Bucket=bucket_name3, Key=key5) body = _get_body(response) - eq(data, body) - eq(key1_size, response['ContentLength']) - eq(key1_metadata, response['Metadata']) - eq(content_type, response['ContentType']) + assert data == body + assert key1_size == response['ContentLength'] + assert key1_metadata == response['Metadata'] + assert content_type == response['ContentType'] # copy from a non versioned bucket copy_source = {'Bucket': bucket_name3, 'Key': key5} @@ -6749,14 +5621,11 @@ def test_object_copy_versioning_multipart_upload(): client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6) response = client.get_object(Bucket=bucket_name3, Key=key6) body = _get_body(response) - eq(data, body) - eq(key1_size, response['ContentLength']) - eq(key1_metadata, response['Metadata']) - eq(content_type, response['ContentType']) + assert data == body + assert key1_size == response['ContentLength'] + assert key1_metadata == response['Metadata'] + assert content_type == response['ContentType'] -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart upload without parts') def test_multipart_upload_empty(): bucket_name = get_new_bucket() client = get_client() @@ -6766,13 +5635,10 @@ def test_multipart_upload_empty(): (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen) e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart uploads with single small part') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_multipart_upload_small(): bucket_name = get_new_bucket() client = get_client() @@ -6782,7 +5648,7 @@ def test_multipart_upload_small(): (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen) response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts}) response = client.get_object(Bucket=bucket_name, Key=key1) - eq(response['ContentLength'], objlen) + assert response['ContentLength'] == objlen # check extra client.complete_multipart_upload response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts}) @@ -6845,12 +5711,9 @@ def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, ver else: response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id) src_data = _get_body(response) - eq(src_data, dest_data) + assert src_data == dest_data -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart copies with single small part') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_multipart_copy_small(): src_key = 'foo' src_bucket_name = _create_key_with_random_content(src_key) @@ -6864,12 +5727,9 @@ def test_multipart_copy_small(): client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts}) response = client.get_object(Bucket=dest_bucket_name, Key=dest_key) - eq(size, response['ContentLength']) + assert size == response['ContentLength'] _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name) -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart copies with an invalid range') def test_multipart_copy_invalid_range(): client = get_client() src_key = 'source' @@ -6886,14 +5746,11 @@ def test_multipart_copy_invalid_range(): valid_status = [400, 416] if not status in valid_status: raise AssertionError("Invalid response " + str(status)) - eq(error_code, 'InvalidRange') + assert error_code == 'InvalidRange' -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart copy with an improperly formatted range') # TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40795 is resolved -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_multipart_copy_improper_range(): client = get_client() src_key = 'source' @@ -6919,13 +5776,10 @@ def test_multipart_copy_improper_range(): CopySourceRange=test_range, PartNumber=1) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart copies without x-amz-copy-source-range') def test_multipart_copy_without_range(): client = get_client() src_key = 'source' @@ -6948,13 +5802,10 @@ def test_multipart_copy_without_range(): client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts}) response = client.get_object(Bucket=dest_bucket_name, Key=dest_key) - eq(response['ContentLength'], 10) + assert response['ContentLength'] == 10 _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name) -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart copies with single small part') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_multipart_copy_special_names(): src_bucket_name = get_new_bucket() @@ -6969,7 +5820,7 @@ def test_multipart_copy_special_names(): (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size) response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts}) response = client.get_object(Bucket=dest_bucket_name, Key=dest_key) - eq(size, response['ContentLength']) + assert size == response['ContentLength'] _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name) def _check_content_using_range(key, bucket_name, data, step): @@ -6984,16 +5835,12 @@ def _check_content_using_range(key, bucket_name, data, step): end = ofs + toread - 1 r = 'bytes={s}-{e}'.format(s=ofs, e=end) response = client.get_object(Bucket=bucket_name, Key=key, Range=r) - eq(response['ContentLength'], toread) + assert response['ContentLength'] == toread body = _get_body(response) - eq(body, data[ofs:end+1]) + assert body == data[ofs:end+1] -@attr(resource='object') -@attr(method='put') -@attr(operation='complete multi-part upload') -@attr(assertion='successful') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_multipart_upload(): bucket_name = get_new_bucket() key="mymultipart" @@ -7009,17 +5856,17 @@ def test_multipart_upload(): response = client.head_bucket(Bucket=bucket_name) rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen)) - eq(rgw_bytes_used, objlen) + assert rgw_bytes_used == objlen rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1)) - eq(rgw_object_count, 1) + assert rgw_object_count == 1 response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['ContentType'], content_type) - eq(response['Metadata'], metadata) + assert response['ContentType'] == content_type + assert response['Metadata'] == metadata body = _get_body(response) - eq(len(body), response['ContentLength']) - eq(body, data) + assert len(body) == response['ContentLength'] + assert body == data _check_content_using_range(key, bucket_name, data, 1000000) _check_content_using_range(key, bucket_name, data, 10000000) @@ -7029,9 +5876,9 @@ def check_versioning(bucket_name, status): try: response = client.get_bucket_versioning(Bucket=bucket_name) - eq(response['Status'], status) + assert response['Status'] == status except KeyError: - eq(status, None) + assert status == None # amazon is eventual consistent, retry a bit if failed def check_configure_versioning_retry(bucket_name, status, expected_string): @@ -7052,13 +5899,9 @@ def check_configure_versioning_retry(bucket_name, status, expected_string): time.sleep(1) - eq(expected_string, read_status) + assert expected_string == read_status -@attr(resource='object') -@attr(method='put') -@attr(operation='check multipart copies of versioned objects') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_multipart_copy_versioned(): src_bucket_name = get_new_bucket() dest_bucket_name = get_new_bucket() @@ -7084,7 +5927,7 @@ def test_multipart_copy_versioned(): (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid) response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts}) response = client.get_object(Bucket=dest_bucket_name, Key=dest_key) - eq(size, response['ContentLength']) + assert size == response['ContentLength'] _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid) def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts): @@ -7095,23 +5938,16 @@ def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts): client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['ContentType'], content_type) - eq(response['Metadata'], metadata) + assert response['ContentType'] == content_type + assert response['Metadata'] == metadata body = _get_body(response) - eq(len(body), response['ContentLength']) - eq(body, data) + assert len(body) == response['ContentLength'] + assert body == data _check_content_using_range(key, bucket_name, data, 1000000) _check_content_using_range(key, bucket_name, data, 10000000) -@attr(resource='object') -@attr(method='put') -@attr(operation='complete multiple multi-part upload with different sizes') -@attr(resource='object') -@attr(method='put') -@attr(operation='complete multi-part upload') -@attr(assertion='successful') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_multipart_upload_resend_part(): bucket_name = get_new_bucket() key="mymultipart" @@ -7123,7 +5959,6 @@ def test_multipart_upload_resend_part(): _check_upload_multipart_resend(bucket_name, key, objlen, [1,2]) _check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5]) -@attr(assertion='successful') def test_multipart_upload_multiple_sizes(): bucket_name = get_new_bucket() key="mymultipart" @@ -7153,8 +5988,7 @@ def test_multipart_upload_multiple_sizes(): (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen) client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) -@attr(assertion='successful') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_multipart_copy_multiple_sizes(): src_key = 'foo' src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024) @@ -7193,10 +6027,6 @@ def test_multipart_copy_multiple_sizes(): client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts}) _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name) -@attr(resource='object') -@attr(method='put') -@attr(operation='check failure on multiple multi-part upload with size too small') -@attr(assertion='fails 400') def test_multipart_upload_size_too_small(): bucket_name = get_new_bucket() key="mymultipart" @@ -7206,8 +6036,8 @@ def test_multipart_upload_size_too_small(): (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024) e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'EntityTooSmall') + assert status == 400 + assert error_code == 'EntityTooSmall' def gen_rand_string(size, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) @@ -7242,19 +6072,11 @@ def _do_test_multipart_upload_contents(bucket_name, key, num_parts): return all_payload -@attr(resource='object') -@attr(method='put') -@attr(operation='check contents of multi-part upload') -@attr(assertion='successful') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_multipart_upload_contents(): bucket_name = get_new_bucket() _do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3) -@attr(resource='object') -@attr(method='put') -@attr(operation=' multi-part upload overwrites existing key') -@attr(assertion='successful') def test_multipart_upload_overwrite_existing_object(): bucket_name = get_new_bucket() client = get_client() @@ -7280,10 +6102,6 @@ def test_multipart_upload_overwrite_existing_object(): assert test_string == payload*num_parts -@attr(resource='object') -@attr(method='put') -@attr(operation='abort multi-part upload') -@attr(assertion='successful') def test_abort_multipart_upload(): bucket_name = get_new_bucket() key="mymultipart" @@ -7295,15 +6113,11 @@ def test_abort_multipart_upload(): response = client.head_bucket(Bucket=bucket_name) rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', 0)) - eq(rgw_bytes_used, 0) + assert rgw_bytes_used == 0 rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 0)) - eq(rgw_object_count, 0) + assert rgw_object_count == 0 -@attr(resource='object') -@attr(method='put') -@attr(operation='abort non-existent multi-part upload') -@attr(assertion='fails 404') def test_abort_multipart_upload_not_found(): bucket_name = get_new_bucket() client = get_client() @@ -7312,14 +6126,10 @@ def test_abort_multipart_upload_not_found(): e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788') status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchUpload') + assert status == 404 + assert error_code == 'NoSuchUpload' -@attr(resource='object') -@attr(method='put') -@attr(operation='concurrent multi-part uploads') -@attr(assertion='successful') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_list_multipart_upload(): bucket_name = get_new_bucket() client = get_client() @@ -7344,17 +6154,13 @@ def test_list_multipart_upload(): resp_uploadids.append(uploads[i]['UploadId']) for i in range(0, len(upload_ids)): - eq(True, (upload_ids[i] in resp_uploadids)) + assert True == (upload_ids[i] in resp_uploadids) client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1) client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2) client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='list multipart uploads with different owners') -@attr(assertion='successful') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_list_multipart_upload_owner(): bucket_name = get_new_bucket() @@ -7377,22 +6183,22 @@ def test_list_multipart_upload_owner(): try: # match fields of an Upload from ListMultipartUploadsResult def match(upload, key, uploadid, userid, username): - eq(upload['Key'], key) - eq(upload['UploadId'], uploadid) - eq(upload['Initiator']['ID'], userid) - eq(upload['Initiator']['DisplayName'], username) - eq(upload['Owner']['ID'], userid) - eq(upload['Owner']['DisplayName'], username) + assert upload['Key'] == key + assert upload['UploadId'] == uploadid + assert upload['Initiator']['ID'] == userid + assert upload['Initiator']['DisplayName'] == username + assert upload['Owner']['ID'] == userid + assert upload['Owner']['DisplayName'] == username # list uploads with client1 uploads1 = client1.list_multipart_uploads(Bucket=bucket_name)['Uploads'] - eq(len(uploads1), 2) + assert len(uploads1) == 2 match(uploads1[0], key1, upload1, user1, name1) match(uploads1[1], key2, upload2, user2, name2) # list uploads with client2 uploads2 = client2.list_multipart_uploads(Bucket=bucket_name)['Uploads'] - eq(len(uploads2), 2) + assert len(uploads2) == 2 match(uploads2[0], key1, upload1, user1, name1) match(uploads2[1], key2, upload2, user2, name2) finally: @@ -7400,9 +6206,6 @@ def test_list_multipart_upload_owner(): finally: client1.abort_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload1) -@attr(resource='object') -@attr(method='put') -@attr(operation='multi-part upload with missing part') def test_multipart_upload_missing_part(): bucket_name = get_new_bucket() client = get_client() @@ -7419,12 +6222,9 @@ def test_multipart_upload_missing_part(): e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidPart') + assert status == 400 + assert error_code == 'InvalidPart' -@attr(resource='object') -@attr(method='put') -@attr(operation='multi-part upload with incorrect ETag') def test_multipart_upload_incorrect_etag(): bucket_name = get_new_bucket() client = get_client() @@ -7441,8 +6241,8 @@ def test_multipart_upload_incorrect_etag(): e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidPart') + assert status == 400 + assert error_code == 'InvalidPart' def _simple_http_req_100_cont(host, port, is_secure, method, resource): """ @@ -7478,12 +6278,6 @@ def _simple_http_req_100_cont(host, port, is_secure, method, resource): return l[1] -@attr(resource='object') -@attr(method='put') -@attr(operation='w/expect continue') -@attr(assertion='succeeds if object is public-read-write') -@attr('100_continue') -@attr('fails_on_mod_proxy_fcgi') def test_100_continue(): bucket_name = get_new_bucket_name() client = get_client() @@ -7497,18 +6291,13 @@ def test_100_continue(): #NOTES: this test needs to be tested when is_secure is True status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource) - eq(status, '403') + assert status == '403' client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write') status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource) - eq(status, '100') + assert status == '100' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set cors') -@attr(assertion='succeeds') -@attr('cors') def test_set_cors(): bucket_name = get_new_bucket() client = get_client() @@ -7525,30 +6314,25 @@ def test_set_cors(): e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name) status = _get_status(e.response) - eq(status, 404) + assert status == 404 client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config) response = client.get_bucket_cors(Bucket=bucket_name) - eq(response['CORSRules'][0]['AllowedMethods'], allowed_methods) - eq(response['CORSRules'][0]['AllowedOrigins'], allowed_origins) + assert response['CORSRules'][0]['AllowedMethods'] == allowed_methods + assert response['CORSRules'][0]['AllowedOrigins'] == allowed_origins client.delete_bucket_cors(Bucket=bucket_name) e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name) status = _get_status(e.response) - eq(status, 404) + assert status == 404 def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods): r = func(url, headers=headers, verify=get_config_ssl_verify()) - eq(r.status_code, expect_status) + assert r.status_code == expect_status assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods -@attr(resource='bucket') -@attr(method='get') -@attr(operation='check cors response when origin header set') -@attr(assertion='returning cors header') -@attr('cors') def test_cors_origin_response(): bucket_name = _setup_bucket_acl(bucket_acl='public-read') client = get_client() @@ -7572,7 +6356,7 @@ def test_cors_origin_response(): e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name) status = _get_status(e.response) - eq(status, 404) + assert status == 404 client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config) @@ -7624,11 +6408,6 @@ def test_cors_origin_response(): _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None) _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT') -@attr(resource='bucket') -@attr(method='get') -@attr(operation='check cors response when origin is set to wildcard') -@attr(assertion='returning cors header') -@attr('cors') def test_cors_origin_wildcard(): bucket_name = _setup_bucket_acl(bucket_acl='public-read') client = get_client() @@ -7643,7 +6422,7 @@ def test_cors_origin_wildcard(): e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name) status = _get_status(e.response) - eq(status, 404) + assert status == 404 client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config) @@ -7654,11 +6433,6 @@ def test_cors_origin_wildcard(): _cors_request_and_check(requests.get, url, None, 200, None, None) _cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET') -@attr(resource='bucket') -@attr(method='get') -@attr(operation='check cors response when Access-Control-Request-Headers is set in option request') -@attr(assertion='returning cors header') -@attr('cors') def test_cors_header_option(): bucket_name = _setup_bucket_acl(bucket_acl='public-read') client = get_client() @@ -7674,7 +6448,7 @@ def test_cors_header_option(): e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name) status = _get_status(e.response) - eq(status, 404) + assert status == 404 client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config) @@ -7685,11 +6459,7 @@ def test_cors_header_option(): _cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='put tags') -@attr(assertion='succeeds') -@attr('tagging') +@pytest.mark.tagging def test_set_bucket_tagging(): bucket_name = get_new_bucket() client = get_client() @@ -7705,23 +6475,23 @@ def test_set_bucket_tagging(): e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchTagSet') + assert status == 404 + assert error_code == 'NoSuchTagSet' client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags) response = client.get_bucket_tagging(Bucket=bucket_name) - eq(len(response['TagSet']), 1) - eq(response['TagSet'][0]['Key'], 'Hello') - eq(response['TagSet'][0]['Value'], 'World') + assert len(response['TagSet']) == 1 + assert response['TagSet'][0]['Key'] == 'Hello' + assert response['TagSet'][0]['Value'] == 'World' response = client.delete_bucket_tagging(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchTagSet') + assert status == 404 + assert error_code == 'NoSuchTagSet' class FakeFile(object): @@ -7775,7 +6545,7 @@ class FakeReadFile(FakeFile): self.expected_size = size def write(self, chars): - eq(chars, self.char*len(chars)) + assert chars == self.char*len(chars) self.offset += len(chars) self.size += len(chars) @@ -7786,7 +6556,7 @@ class FakeReadFile(FakeFile): self.interrupted = True def close(self): - eq(self.size, self.expected_size) + assert self.size == self.expected_size class FakeFileVerifier(object): """ @@ -7801,7 +6571,7 @@ class FakeFileVerifier(object): if self.char == None: self.char = data[0] self.size += size - eq(data.decode(), self.char*size) + assert data.decode() == self.char*size def _verify_atomic_key_data(bucket_name, key, size=-1, char=None): """ @@ -7811,7 +6581,7 @@ def _verify_atomic_key_data(bucket_name, key, size=-1, char=None): client = get_client() client.download_fileobj(bucket_name, key, fp_verify) if size >= 0: - eq(fp_verify.size, size) + assert fp_verify.size == size def _test_atomic_read(file_size): """ @@ -7838,24 +6608,12 @@ def _test_atomic_read(file_size): _verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B') -@attr(resource='object') -@attr(method='put') -@attr(operation='read atomicity') -@attr(assertion='1MB successful') def test_atomic_read_1mb(): _test_atomic_read(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='read atomicity') -@attr(assertion='4MB successful') def test_atomic_read_4mb(): _test_atomic_read(1024*1024*4) -@attr(resource='object') -@attr(method='put') -@attr(operation='read atomicity') -@attr(assertion='8MB successful') def test_atomic_read_8mb(): _test_atomic_read(1024*1024*8) @@ -7891,24 +6649,12 @@ def _test_atomic_write(file_size): # verify B's _verify_atomic_key_data(bucket_name, objname, file_size, 'B') -@attr(resource='object') -@attr(method='put') -@attr(operation='write atomicity') -@attr(assertion='1MB successful') def test_atomic_write_1mb(): _test_atomic_write(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='write atomicity') -@attr(assertion='4MB successful') def test_atomic_write_4mb(): _test_atomic_write(1024*1024*4) -@attr(resource='object') -@attr(method='put') -@attr(operation='write atomicity') -@attr(assertion='8MB successful') def test_atomic_write_8mb(): _test_atomic_write(1024*1024*8) @@ -7936,24 +6682,12 @@ def _test_atomic_dual_write(file_size): # verify the file _verify_atomic_key_data(bucket_name, objname, file_size, 'B') -@attr(resource='object') -@attr(method='put') -@attr(operation='write one or the other') -@attr(assertion='1MB successful') def test_atomic_dual_write_1mb(): _test_atomic_dual_write(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='write one or the other') -@attr(assertion='4MB successful') def test_atomic_dual_write_4mb(): _test_atomic_dual_write(1024*1024*4) -@attr(resource='object') -@attr(method='put') -@attr(operation='write one or the other') -@attr(assertion='8MB successful') def test_atomic_dual_write_8mb(): _test_atomic_dual_write(1024*1024*8) @@ -7986,11 +6720,7 @@ def _test_atomic_conditional_write(file_size): # verify B's _verify_atomic_key_data(bucket_name, objname, file_size, 'B') -@attr(resource='object') -@attr(method='put') -@attr(operation='write atomicity') -@attr(assertion='1MB successful') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws def test_atomic_conditional_write_1mb(): _test_atomic_conditional_write(1024*1024) @@ -8021,29 +6751,21 @@ def _test_atomic_dual_conditional_write(file_size): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c) status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' # verify the file _verify_atomic_key_data(bucket_name, objname, file_size, 'B') -@attr(resource='object') -@attr(method='put') -@attr(operation='write one or the other') -@attr(assertion='1MB successful') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws # TODO: test not passing with SSL, fix this -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_atomic_dual_conditional_write_1mb(): _test_atomic_dual_conditional_write(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='write file in deleted bucket') -@attr(assertion='fail 404') -@attr('fails_on_aws') +@pytest.mark.fails_on_aws # TODO: test not passing with SSL, fix this -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_atomic_write_bucket_gone(): bucket_name = get_new_bucket() client = get_client() @@ -8056,13 +6778,9 @@ def test_atomic_write_bucket_gone(): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchBucket') + assert status == 404 + assert error_code == 'NoSuchBucket' -@attr(resource='object') -@attr(method='put') -@attr(operation='begin to overwrite file with multipart upload then abort') -@attr(assertion='read back original key contents') def test_atomic_multipart_upload_write(): bucket_name = get_new_bucket() client = get_client() @@ -8073,13 +6791,13 @@ def test_atomic_multipart_upload_write(): response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id) response = client.get_object(Bucket=bucket_name, Key='foo') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' class Counter: def __init__(self, default_val): @@ -8101,10 +6819,6 @@ class ActionOnCount: if self.count == self.trigger_count: self.result = self.action() -@attr(resource='object') -@attr(method='put') -@attr(operation='multipart check for two writes of the same part, first write finishes last') -@attr(assertion='object contains correct content') def test_multipart_resend_first_finishes_last(): bucket_name = get_new_bucket() client = get_client() @@ -8161,11 +6875,7 @@ def test_multipart_resend_first_finishes_last(): _verify_atomic_key_data(bucket_name, key_name, file_size, 'A') -@attr(resource='object') -@attr(method='get') -@attr(operation='range') -@attr(assertion='returns correct data, 206') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_ranged_request_response_code(): content = 'testcontent' @@ -8176,18 +6886,14 @@ def test_ranged_request_response_code(): response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7') fetched_content = _get_body(response) - eq(fetched_content, content[4:8]) - eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-7/11') - eq(response['ResponseMetadata']['HTTPStatusCode'], 206) + assert fetched_content == content[4:8] + assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-7/11' + assert response['ResponseMetadata']['HTTPStatusCode'] == 206 def _generate_random_string(size): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size)) -@attr(resource='object') -@attr(method='get') -@attr(operation='range') -@attr(assertion='returns correct data, 206') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_ranged_big_request_response_code(): content = _generate_random_string(8*1024*1024) @@ -8198,15 +6904,11 @@ def test_ranged_big_request_response_code(): response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880') fetched_content = _get_body(response) - eq(fetched_content, content[3145728:5242881]) - eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 3145728-5242880/8388608') - eq(response['ResponseMetadata']['HTTPStatusCode'], 206) + assert fetched_content == content[3145728:5242881] + assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 3145728-5242880/8388608' + assert response['ResponseMetadata']['HTTPStatusCode'] == 206 -@attr(resource='object') -@attr(method='get') -@attr(operation='range') -@attr(assertion='returns correct data, 206') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_ranged_request_skip_leading_bytes_response_code(): content = 'testcontent' @@ -8217,15 +6919,11 @@ def test_ranged_request_skip_leading_bytes_response_code(): response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-') fetched_content = _get_body(response) - eq(fetched_content, content[4:]) - eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11') - eq(response['ResponseMetadata']['HTTPStatusCode'], 206) + assert fetched_content == content[4:] + assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-10/11' + assert response['ResponseMetadata']['HTTPStatusCode'] == 206 -@attr(resource='object') -@attr(method='get') -@attr(operation='range') -@attr(assertion='returns correct data, 206') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_ranged_request_return_trailing_bytes_response_code(): content = 'testcontent' @@ -8236,14 +6934,10 @@ def test_ranged_request_return_trailing_bytes_response_code(): response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7') fetched_content = _get_body(response) - eq(fetched_content, content[-7:]) - eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11') - eq(response['ResponseMetadata']['HTTPStatusCode'], 206) + assert fetched_content == content[-7:] + assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-10/11' + assert response['ResponseMetadata']['HTTPStatusCode'] == 206 -@attr(resource='object') -@attr(method='get') -@attr(operation='range') -@attr(assertion='returns invalid range, 416') def test_ranged_request_invalid_range(): content = 'testcontent' @@ -8255,13 +6949,9 @@ def test_ranged_request_invalid_range(): # test invalid range e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50') status, error_code = _get_status_and_error_code(e.response) - eq(status, 416) - eq(error_code, 'InvalidRange') + assert status == 416 + assert error_code == 'InvalidRange' -@attr(resource='object') -@attr(method='get') -@attr(operation='range') -@attr(assertion='returns invalid range, 416') def test_ranged_request_empty_object(): content = '' @@ -8273,13 +6963,9 @@ def test_ranged_request_empty_object(): # test invalid range e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50') status, error_code = _get_status_and_error_code(e.response) - eq(status, 416) - eq(error_code, 'InvalidRange') + assert status == 416 + assert error_code == 'InvalidRange' -@attr(resource='bucket') -@attr(method='create') -@attr(operation='create versioned bucket') -@attr(assertion='can create and suspend bucket versioning') def test_versioning_bucket_create_suspend(): bucket_name = get_new_bucket() check_versioning(bucket_name, None) @@ -8293,9 +6979,9 @@ def check_obj_content(client, bucket_name, key, version_id, content): response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id) if content is not None: body = _get_body(response) - eq(body, content) + assert body == content else: - eq(response['DeleteMarker'], True) + assert response['DeleteMarker'] == True def check_obj_versions(client, bucket_name, key, version_ids, contents): # check to see if objects is pointing at correct version @@ -8308,8 +6994,8 @@ def check_obj_versions(client, bucket_name, key, version_ids, contents): i = 0 for version in versions: - eq(version['VersionId'], version_ids[i]) - eq(version['Key'], key) + assert version['VersionId'] == version_ids[i] + assert version['Key'] == key check_obj_content(client, bucket_name, key, version['VersionId'], contents[i]) i += 1 @@ -8331,7 +7017,7 @@ def create_multiple_versions(client, bucket_name, key, num_versions, version_ids return (version_ids, contents) def remove_obj_version(client, bucket_name, key, version_ids, contents, index): - eq(len(version_ids), len(contents)) + assert len(version_ids) == len(contents) index = index % len(version_ids) rm_version_id = version_ids.pop(index) rm_content = contents.pop(index) @@ -8363,11 +7049,6 @@ def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remo print(response['Versions']) -@attr(resource='object') -@attr(method='create') -@attr(operation='create and remove versioned object') -@attr(assertion='can create access and remove appropriate versions') -@attr('versioning') def test_versioning_obj_create_read_remove(): bucket_name = get_new_bucket() client = get_client() @@ -8382,11 +7063,6 @@ def test_versioning_obj_create_read_remove(): _do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1) _do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3) -@attr(resource='object') -@attr(method='create') -@attr(operation='create and remove versioned object and head') -@attr(assertion='can create access and remove appropriate versions') -@attr('versioning') def test_versioning_obj_create_read_remove_head(): bucket_name = get_new_bucket() @@ -8405,27 +7081,22 @@ def test_versioning_obj_create_read_remove_head(): response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id) response = client.get_object(Bucket=bucket_name, Key=key) body = _get_body(response) - eq(body, contents[-1]) + assert body == contents[-1] # add a delete marker response = client.delete_object(Bucket=bucket_name, Key=key) - eq(response['DeleteMarker'], True) + assert response['DeleteMarker'] == True delete_marker_version_id = response['VersionId'] version_ids.append(delete_marker_version_id) response = client.list_object_versions(Bucket=bucket_name) - eq(len(response['Versions']), num_versions) - eq(len(response['DeleteMarkers']), 1) - eq(response['DeleteMarkers'][0]['VersionId'], delete_marker_version_id) + assert len(response['Versions']) == num_versions + assert len(response['DeleteMarkers']) == 1 + assert response['DeleteMarkers'][0]['VersionId'] == delete_marker_version_id clean_up_bucket(client, bucket_name, key, version_ids) -@attr(resource='object') -@attr(method='create') -@attr(operation='create object, then switch to versioning') -@attr(assertion='behaves correctly') -@attr('versioning') def test_versioning_obj_plain_null_version_removal(): bucket_name = get_new_bucket() check_versioning(bucket_name, None) @@ -8440,17 +7111,12 @@ def test_versioning_obj_plain_null_version_removal(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) + assert not 'Versions' in response -@attr(resource='object') -@attr(method='create') -@attr(operation='create object, then switch to versioning') -@attr(assertion='behaves correctly') -@attr('versioning') def test_versioning_obj_plain_null_version_overwrite(): bucket_name = get_new_bucket() check_versioning(bucket_name, None) @@ -8466,29 +7132,24 @@ def test_versioning_obj_plain_null_version_overwrite(): response = client.put_object(Bucket=bucket_name, Key=key, Body=content2) response = client.get_object(Bucket=bucket_name, Key=key) body = _get_body(response) - eq(body, content2) + assert body == content2 version_id = response['VersionId'] client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id) response = client.get_object(Bucket=bucket_name, Key=key) body = _get_body(response) - eq(body, content) + assert body == content client.delete_object(Bucket=bucket_name, Key=key, VersionId='null') e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) + assert not 'Versions' in response -@attr(resource='object') -@attr(method='create') -@attr(operation='create object, then switch to versioning') -@attr(assertion='behaves correctly') -@attr('versioning') def test_versioning_obj_plain_null_version_overwrite_suspended(): bucket_name = get_new_bucket() check_versioning(bucket_name, None) @@ -8505,27 +7166,27 @@ def test_versioning_obj_plain_null_version_overwrite_suspended(): response = client.put_object(Bucket=bucket_name, Key=key, Body=content2) response = client.get_object(Bucket=bucket_name, Key=key) body = _get_body(response) - eq(body, content2) + assert body == content2 response = client.list_object_versions(Bucket=bucket_name) # original object with 'null' version id still counts as a version - eq(len(response['Versions']), 1) + assert len(response['Versions']) == 1 client.delete_object(Bucket=bucket_name, Key=key, VersionId='null') e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) + assert not 'Versions' in response def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents): client.delete_object(Bucket=bucket_name, Key=key) # clear out old null objects in lists since they will get overwritten - eq(len(version_ids), len(contents)) + assert len(version_ids) == len(contents) i = 0 for version_id in version_ids: if version_id == 'null': @@ -8539,7 +7200,7 @@ def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, co client.put_object(Bucket=bucket_name, Key=key, Body=content) # clear out old null objects in lists since they will get overwritten - eq(len(version_ids), len(contents)) + assert len(version_ids) == len(contents) i = 0 for version_id in version_ids: if version_id == 'null': @@ -8554,11 +7215,6 @@ def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, co return (version_ids, contents) -@attr(resource='object') -@attr(method='create') -@attr(operation='suspend versioned bucket') -@attr(assertion='suspended versioning behaves correctly') -@attr('versioning') def test_versioning_obj_suspend_versions(): bucket_name = get_new_bucket() client = get_client() @@ -8588,14 +7244,9 @@ def test_versioning_obj_suspend_versions(): for idx in range(num_versions): remove_obj_version(client, bucket_name, key, version_ids, contents, idx) - eq(len(version_ids), 0) - eq(len(version_ids), len(contents)) + assert len(version_ids) == 0 + assert len(version_ids) == len(contents) -@attr(resource='object') -@attr(method='remove') -@attr(operation='create and remove versions') -@attr(assertion='everything works') -@attr('versioning') def test_versioning_obj_create_versions_remove_all(): bucket_name = get_new_bucket() client = get_client() @@ -8609,14 +7260,9 @@ def test_versioning_obj_create_versions_remove_all(): for idx in range(num_versions): remove_obj_version(client, bucket_name, key, version_ids, contents, idx) - eq(len(version_ids), 0) - eq(len(version_ids), len(contents)) + assert len(version_ids) == 0 + assert len(version_ids) == len(contents) -@attr(resource='object') -@attr(method='remove') -@attr(operation='create and remove versions') -@attr(assertion='everything works') -@attr('versioning') def test_versioning_obj_create_versions_remove_special_names(): bucket_name = get_new_bucket() client = get_client() @@ -8631,15 +7277,10 @@ def test_versioning_obj_create_versions_remove_special_names(): for idx in range(num_versions): remove_obj_version(client, bucket_name, key, version_ids, contents, idx) - eq(len(version_ids), 0) - eq(len(version_ids), len(contents)) + assert len(version_ids) == 0 + assert len(version_ids) == len(contents) -@attr(resource='object') -@attr(method='multipart') -@attr(operation='create and test multipart object') -@attr(assertion='everything works') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_versioning_obj_create_overwrite_multipart(): bucket_name = get_new_bucket() client = get_client() @@ -8665,14 +7306,9 @@ def test_versioning_obj_create_overwrite_multipart(): for idx in range(num_versions): remove_obj_version(client, bucket_name, key, version_ids, contents, idx) - eq(len(version_ids), 0) - eq(len(version_ids), len(contents)) + assert len(version_ids) == 0 + assert len(version_ids) == len(contents) -@attr(resource='object') -@attr(method='multipart') -@attr(operation='list versioned objects') -@attr(assertion='everything works') -@attr('versioning') def test_versioning_obj_list_marker(): bucket_name = get_new_bucket() client = get_client() @@ -8715,25 +7351,20 @@ def test_versioning_obj_list_marker(): # test the last 5 created objects first for i in range(5): version = versions[i] - eq(version['VersionId'], version_ids2[i]) - eq(version['Key'], key2) + assert version['VersionId'] == version_ids2[i] + assert version['Key'] == key2 check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i]) i += 1 # then the first 5 for j in range(5): version = versions[i] - eq(version['VersionId'], version_ids[j]) - eq(version['Key'], key) + assert version['VersionId'] == version_ids[j] + assert version['Key'] == key check_obj_content(client, bucket_name, key, version['VersionId'], contents[j]) i += 1 -@attr(resource='object') -@attr(method='multipart') -@attr(operation='create and test versioned object copying') -@attr(assertion='everything works') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_versioning_copy_obj_version(): bucket_name = get_new_bucket() client = get_client() @@ -8751,7 +7382,7 @@ def test_versioning_copy_obj_version(): client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name) response = client.get_object(Bucket=bucket_name, Key=new_key_name) body = _get_body(response) - eq(body, contents[i]) + assert body == contents[i] another_bucket_name = get_new_bucket() @@ -8761,7 +7392,7 @@ def test_versioning_copy_obj_version(): client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name) response = client.get_object(Bucket=another_bucket_name, Key=new_key_name) body = _get_body(response) - eq(body, contents[i]) + assert body == contents[i] new_key_name = 'new_key' copy_source = {'Bucket': bucket_name, 'Key': key} @@ -8769,13 +7400,8 @@ def test_versioning_copy_obj_version(): response = client.get_object(Bucket=another_bucket_name, Key=new_key_name) body = _get_body(response) - eq(body, contents[-1]) + assert body == contents[-1] -@attr(resource='object') -@attr(method='delete') -@attr(operation='delete multiple versions') -@attr(assertion='deletes multiple versions of an object with a single call') -@attr('versioning') def test_versioning_multi_object_delete(): bucket_name = get_new_bucket() client = get_client() @@ -8795,20 +7421,15 @@ def test_versioning_multi_object_delete(): client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId']) response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) + assert not 'Versions' in response # now remove again, should all succeed due to idempotency for version in versions: client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId']) response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) + assert not 'Versions' in response -@attr(resource='object') -@attr(method='delete') -@attr(operation='delete multiple versions') -@attr(assertion='deletes multiple versions of an object and delete marker with a single call') -@attr('versioning') def test_versioning_multi_object_delete_with_marker(): bucket_name = get_new_bucket() client = get_client() @@ -8826,8 +7447,8 @@ def test_versioning_multi_object_delete_with_marker(): delete_markers = response['DeleteMarkers'] version_ids.append(delete_markers[0]['VersionId']) - eq(len(version_ids), 3) - eq(len(delete_markers), 1) + assert len(version_ids) == 3 + assert len(delete_markers) == 1 for version in versions: client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId']) @@ -8836,8 +7457,8 @@ def test_versioning_multi_object_delete_with_marker(): client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId']) response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) - eq(('DeleteMarkers' in response), False) + assert not 'Versions' in response + assert not 'DeleteMarkers' in response for version in versions: client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId']) @@ -8847,15 +7468,10 @@ def test_versioning_multi_object_delete_with_marker(): # now remove again, should all succeed due to idempotency response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) - eq(('DeleteMarkers' in response), False) + assert not 'Versions' in response + assert not 'DeleteMarkers' in response -@attr(resource='object') -@attr(method='delete') -@attr(operation='multi delete create marker') -@attr(assertion='returns correct marker version id') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_versioning_multi_object_delete_with_marker_create(): bucket_name = get_new_bucket() client = get_client() @@ -8870,15 +7486,10 @@ def test_versioning_multi_object_delete_with_marker_create(): response = client.list_object_versions(Bucket=bucket_name) delete_markers = response['DeleteMarkers'] - eq(len(delete_markers), 1) - eq(delete_marker_version_id, delete_markers[0]['VersionId']) - eq(key, delete_markers[0]['Key']) + assert len(delete_markers) == 1 + assert delete_marker_version_id == delete_markers[0]['VersionId'] + assert key == delete_markers[0]['Key'] -@attr(resource='object') -@attr(method='put') -@attr(operation='change acl on an object version changes specific version') -@attr(assertion='works') -@attr('versioning') def test_versioned_object_acl(): bucket_name = get_new_bucket() client = get_client() @@ -8897,8 +7508,8 @@ def test_versioned_object_acl(): display_name = get_main_display_name() user_id = get_main_user_id() - eq(response['Owner']['DisplayName'], display_name) - eq(response['Owner']['ID'], user_id) + assert response['Owner']['DisplayName'] == display_name + assert response['Owner']['ID'] == user_id grants = response['Grants'] default_policy = [ @@ -8946,12 +7557,7 @@ def test_versioned_object_acl(): grants = response['Grants'] check_grants(grants, default_policy) -@attr(resource='object') -@attr(method='put') -@attr(operation='change acl on an object with no version specified changes latest version') -@attr(assertion='works') -@attr('versioning') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_versioned_object_acl_no_version_specified(): bucket_name = get_new_bucket() client = get_client() @@ -8971,8 +7577,8 @@ def test_versioned_object_acl_no_version_specified(): display_name = get_main_display_name() user_id = get_main_user_id() - eq(response['Owner']['DisplayName'], display_name) - eq(response['Owner']['ID'], user_id) + assert response['Owner']['DisplayName'] == display_name + assert response['Owner']['ID'] == user_id grants = response['Grants'] default_policy = [ @@ -9038,13 +7644,8 @@ def _do_clear_versioned_bucket_concurrent(client, bucket_name): t.append(thr) return t -@attr(resource='object') -@attr(method='put') -@attr(operation='concurrent creation of objects, concurrent removal') -@attr(assertion='works') # TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/39142 is resolved -@attr('fails_on_rgw') -@attr('versioning') +@pytest.mark.fails_on_rgw def test_versioned_concurrent_object_create_concurrent_remove(): bucket_name = get_new_bucket() client = get_client() @@ -9061,19 +7662,14 @@ def test_versioned_concurrent_object_create_concurrent_remove(): response = client.list_object_versions(Bucket=bucket_name) versions = response['Versions'] - eq(len(versions), num_versions) + assert len(versions) == num_versions t = _do_clear_versioned_bucket_concurrent(client, bucket_name) _do_wait_completion(t) response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) + assert not 'Versions' in response -@attr(resource='object') -@attr(method='put') -@attr(operation='concurrent creation and removal of objects') -@attr(assertion='works') -@attr('versioning') def test_versioned_concurrent_object_create_and_remove(): bucket_name = get_new_bucket() client = get_client() @@ -9100,12 +7696,9 @@ def test_versioned_concurrent_object_create_and_remove(): _do_wait_completion(t) response = client.list_object_versions(Bucket=bucket_name) - eq(('Versions' in response), False) + assert not 'Versions' in response -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_set(): bucket_name = get_new_bucket() client = get_client() @@ -9113,12 +7706,9 @@ def test_lifecycle_set(): {'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}] lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get lifecycle config') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_get(): bucket_name = get_new_bucket() client = get_client() @@ -9127,12 +7717,9 @@ def test_lifecycle_get(): lifecycle = {'Rules': rules} client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name) - eq(response['Rules'], rules) + assert response['Rules'] == rules -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get lifecycle config no id') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_get_no_id(): bucket_name = get_new_bucket() client = get_client() @@ -9150,12 +7737,12 @@ def test_lifecycle_get_no_id(): for lc_rule in current_lc: if lc_rule['Prefix'] == rules['rule1'].prefix: - eq(lc_rule['Expiration']['Days'], rules['rule1'].days) - eq(lc_rule['Status'], rules['rule1'].status) + assert lc_rule['Expiration']['Days'] == rules['rule1'].days + assert lc_rule['Status'] == rules['rule1'].status assert 'ID' in lc_rule elif lc_rule['Prefix'] == rules['rule2'].prefix: - eq(lc_rule['Expiration']['Days'], rules['rule2'].days) - eq(lc_rule['Status'], rules['rule2'].status) + assert lc_rule['Expiration']['Days'] == rules['rule2'].days + assert lc_rule['Status'] == rules['rule2'].status assert 'ID' in lc_rule else: # neither of the rules we supplied was returned, something wrong @@ -9163,13 +7750,10 @@ def test_lifecycle_get_no_id(): assert False # The test harness for lifecycle is configured to treat days as 10 second intervals. -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_expiration(): bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar', 'expire3/foo', 'expire3/bar']) @@ -9195,19 +7779,16 @@ def test_lifecycle_expiration(): response = client.list_objects(Bucket=bucket_name) expire3_objects = response['Contents'] - eq(len(init_objects), 6) - eq(len(expire1_objects), 4) - eq(len(keep2_objects), 4) - eq(len(expire3_objects), 2) + assert len(init_objects) == 6 + assert len(expire1_objects) == 4 + assert len(keep2_objects) == 4 + assert len(expire3_objects) == 2 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration with list-objects-v2') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('list-objects-v2') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.list_objects_v2 +@pytest.mark.fails_on_dbstore def test_lifecyclev2_expiration(): bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar', 'expire3/foo', 'expire3/bar']) @@ -9233,17 +7814,14 @@ def test_lifecyclev2_expiration(): response = client.list_objects_v2(Bucket=bucket_name) expire3_objects = response['Contents'] - eq(len(init_objects), 6) - eq(len(expire1_objects), 4) - eq(len(keep2_objects), 4) - eq(len(expire3_objects), 2) + assert len(init_objects) == 6 + assert len(expire1_objects) == 4 + assert len(keep2_objects) == 4 + assert len(expire3_objects) == 2 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration on versioning enabled bucket') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws def test_lifecycle_expiration_versioning_enabled(): bucket_name = get_new_bucket() client = get_client() @@ -9262,15 +7840,12 @@ def test_lifecycle_expiration_versioning_enabled(): response = client.list_object_versions(Bucket=bucket_name) versions = response['Versions'] delete_markers = response['DeleteMarkers'] - eq(len(versions), 1) - eq(len(delete_markers), 1) + assert len(versions) == 1 + assert len(delete_markers) == 1 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration with 1 tag') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws def test_lifecycle_expiration_tags1(): bucket_name = get_new_bucket() client = get_client() @@ -9283,7 +7858,7 @@ def test_lifecycle_expiration_tags1(): response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key, Tagging=tom_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 lifecycle_config = { 'Rules': [ @@ -9306,7 +7881,7 @@ def test_lifecycle_expiration_tags1(): response = client.put_bucket_lifecycle_configuration( Bucket=bucket_name, LifecycleConfiguration=lifecycle_config) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 lc_interval = get_lc_debug_interval() @@ -9317,7 +7892,7 @@ def test_lifecycle_expiration_tags1(): except KeyError: expire_objects = [] - eq(len(expire_objects), 0) + assert len(expire_objects) == 0 # factor out common setup code def setup_lifecycle_tags2(client, bucket_name): @@ -9329,7 +7904,7 @@ def setup_lifecycle_tags2(client, bucket_name): response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key, Tagging=tom_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 huck_key = 'days1/huck' huck_tagset = { @@ -9341,7 +7916,7 @@ def setup_lifecycle_tags2(client, bucket_name): response = client.put_object_tagging(Bucket=bucket_name, Key=huck_key, Tagging=huck_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 lifecycle_config = { 'Rules': [ @@ -9373,16 +7948,13 @@ def setup_lifecycle_tags2(client, bucket_name): response = client.put_bucket_lifecycle_configuration( Bucket=bucket_name, LifecycleConfiguration=lifecycle_config) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 return response -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration with 2 tags') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_expiration_tags2(): bucket_name = get_new_bucket() client = get_client() @@ -9395,15 +7967,12 @@ def test_lifecycle_expiration_tags2(): response = client.list_objects(Bucket=bucket_name) expire1_objects = response['Contents'] - eq(len(expire1_objects), 1) + assert len(expire1_objects) == 1 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration with versioning and 2 tags') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_expiration_versioned_tags2(): bucket_name = get_new_bucket() client = get_client() @@ -9419,7 +7988,7 @@ def test_lifecycle_expiration_versioned_tags2(): response = client.list_objects(Bucket=bucket_name) expire1_objects = response['Contents'] - eq(len(expire1_objects), 1) + assert len(expire1_objects) == 1 # setup for scenario based on vidushi mishra's in rhbz#1877737 def setup_lifecycle_noncur_tags(client, bucket_name, days): @@ -9432,10 +8001,10 @@ def setup_lifecycle_noncur_tags(client, bucket_name, days): for ix in range(10): body = "%s v%d" % (key, ix) response = client.put_object(Bucket=bucket_name, Key=key, Body=body) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 lifecycle_config = { 'Rules': [ @@ -9458,7 +8027,7 @@ def setup_lifecycle_noncur_tags(client, bucket_name, days): response = client.put_bucket_lifecycle_configuration( Bucket=bucket_name, LifecycleConfiguration=lifecycle_config) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 return response def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs): @@ -9470,13 +8039,10 @@ def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs): objs_list = [] return len(objs_list) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle noncurrent expiration with 1 tag filter') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_expiration_noncur_tags1(): bucket_name = get_new_bucket() client = get_client() @@ -9493,19 +8059,15 @@ def test_lifecycle_expiration_noncur_tags1(): client, bucket_name, 2*lc_interval) # at T+20, 10 objects should exist - eq(num_objs, 10) + assert num_objs == 10 num_objs = verify_lifecycle_expiration_noncur_tags( client, bucket_name, 5*lc_interval) # at T+60, only the current object version should exist - eq(num_objs, 1) + assert num_objs == 1 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='id too long in lifecycle rule') -@attr('lifecycle') -@attr(assertion='fails 400') +@pytest.mark.lifecycle def test_lifecycle_id_too_long(): bucket_name = get_new_bucket() client = get_client() @@ -9514,14 +8076,10 @@ def test_lifecycle_id_too_long(): e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='same id') -@attr('lifecycle') -@attr(assertion='fails 400') +@pytest.mark.lifecycle def test_lifecycle_same_id(): bucket_name = get_new_bucket() client = get_client() @@ -9531,14 +8089,10 @@ def test_lifecycle_same_id(): e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='invalid status in lifecycle rule') -@attr('lifecycle') -@attr(assertion='fails 400') +@pytest.mark.lifecycle def test_lifecycle_invalid_status(): bucket_name = get_new_bucket() client = get_client() @@ -9547,29 +8101,26 @@ def test_lifecycle_invalid_status(): e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}] lifecycle = {'Rules': rules} e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}] lifecycle = {'Rules': rules} e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with expiration date') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_set_date(): bucket_name = get_new_bucket() client = get_client() @@ -9577,13 +8128,9 @@ def test_lifecycle_set_date(): lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with not iso8601 date') -@attr('lifecycle') -@attr(assertion='fails 400') +@pytest.mark.lifecycle def test_lifecycle_set_invalid_date(): bucket_name = get_new_bucket() client = get_client() @@ -9592,15 +8139,12 @@ def test_lifecycle_set_invalid_date(): e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration with date') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_expiration_date(): bucket_name = _create_objects(keys=['past/foo', 'future/bar']) client = get_client() @@ -9618,14 +8162,11 @@ def test_lifecycle_expiration_date(): response = client.list_objects(Bucket=bucket_name) expire_objects = response['Contents'] - eq(len(init_objects), 2) - eq(len(expire_objects), 1) + assert len(init_objects) == 2 + assert len(expire_objects) == 1 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration days 0') -@attr('lifecycle') -@attr('lifecycle_expiration') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration def test_lifecycle_expiration_days0(): bucket_name = _create_objects(keys=['days0/foo', 'days0/bar']) client = get_client() @@ -9641,7 +8182,7 @@ def test_lifecycle_expiration_days0(): except botocore.exceptions.ClientError as e: response_code = e.response['Error']['Code'] - eq(response_code, 'InvalidArgument') + assert response_code == 'InvalidArgument' def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days, @@ -9652,12 +8193,12 @@ def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days, lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration( Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 key = rule_prefix + 'foo' body = 'bar' response = client.put_object(Bucket=bucket_name, Key=key, Body=body) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 return response def check_lifecycle_expiration_header(response, start_time, rule_id, @@ -9675,11 +8216,8 @@ def check_lifecycle_expiration_header(response, start_time, rule_id, return days_to_expire and rule_eq_id -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration header put') -@attr('lifecycle') -@attr('lifecycle_expiration') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration def test_lifecycle_expiration_header_put(): bucket_name = get_new_bucket() client = get_client() @@ -9687,14 +8225,11 @@ def test_lifecycle_expiration_header_put(): now = datetime.datetime.now(None) response = setup_lifecycle_expiration( client, bucket_name, 'rule1', 1, 'days1/') - eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True) + assert check_lifecycle_expiration_header(response, now, 'rule1', 1) -@attr(resource='bucket') -@attr(method='head') -@attr(operation='test lifecycle expiration header head') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_dbstore def test_lifecycle_expiration_header_head(): bucket_name = get_new_bucket() client = get_client() @@ -9707,15 +8242,12 @@ def test_lifecycle_expiration_header_head(): # stat the object, check header response = client.head_object(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert check_lifecycle_expiration_header(response, now, 'rule1', 1) -@attr(resource='bucket') -@attr(method='head') -@attr(operation='test lifecycle expiration header head with tags') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_dbstore def test_lifecycle_expiration_header_tags_head(): bucket_name = get_new_bucket() client = get_client() @@ -9744,8 +8276,8 @@ def test_lifecycle_expiration_header_tags_head(): # stat the object, check header response = client.head_object(Bucket=bucket_name, Key=key1) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), True) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1) # test that header is not returning when it should not lifecycle={ @@ -9766,14 +8298,11 @@ def test_lifecycle_expiration_header_tags_head(): Bucket=bucket_name, LifecycleConfiguration=lifecycle) # stat the object, check header response = client.head_object(Bucket=bucket_name, Key=key1) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1) -@attr(resource='bucket') -@attr(method='head') -@attr(operation='test lifecycle expiration header head with tags and And') -@attr('lifecycle') -@attr('lifecycle_expiration') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration def test_lifecycle_expiration_header_and_tags_head(): now = datetime.datetime.now(None) bucket_name = get_new_bucket() @@ -9814,13 +8343,10 @@ def test_lifecycle_expiration_header_and_tags_head(): # stat the object, check header response = client.head_object(Bucket=bucket_name, Key=key1) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with noncurrent version expiration') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_set_noncurrent(): bucket_name = _create_objects(keys=['past/foo', 'future/bar']) client = get_client() @@ -9828,15 +8354,12 @@ def test_lifecycle_set_noncurrent(): {'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}] lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle non-current version expiration') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_noncur_expiration(): bucket_name = get_new_bucket() client = get_client() @@ -9859,52 +8382,40 @@ def test_lifecycle_noncur_expiration(): response = client.list_object_versions(Bucket=bucket_name) expire_versions = response['Versions'] - eq(len(init_versions), 6) - eq(len(expire_versions), 4) + assert len(init_versions) == 6 + assert len(expire_versions) == 4 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with delete marker expiration') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_set_deletemarker(): bucket_name = get_new_bucket() client = get_client() rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}] lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with Filter') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_set_filter(): bucket_name = get_new_bucket() client = get_client() rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}] lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with empty Filter') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_set_empty_filter(): bucket_name = get_new_bucket() client = get_client() rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}] lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle delete marker expiration') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_deletemarker_expiration(): bucket_name = get_new_bucket() client = get_client() @@ -9933,13 +8444,10 @@ def test_lifecycle_deletemarker_expiration(): deleted_versions = response['DeleteMarkers'] total_expire_versions = init_versions + deleted_versions - eq(len(total_init_versions), 4) - eq(len(total_expire_versions), 2) + assert len(total_init_versions) == 4 + assert len(total_expire_versions) == 2 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with multipart expiration') -@attr('lifecycle') +@pytest.mark.lifecycle def test_lifecycle_set_multipart(): bucket_name = get_new_bucket() client = get_client() @@ -9951,15 +8459,12 @@ def test_lifecycle_set_multipart(): ] lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle multipart expiration') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_multipart_expiration(): bucket_name = get_new_bucket() client = get_client() @@ -9988,14 +8493,10 @@ def test_lifecycle_multipart_expiration(): response = client.list_multipart_uploads(Bucket=bucket_name) expired_uploads = response['Uploads'] - eq(len(init_uploads), 2) - eq(len(expired_uploads), 1) + assert len(init_uploads) == 2 + assert len(expired_uploads) == 1 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config transition with not iso8601 date') -@attr('lifecycle') -@attr(assertion='fails 400') +@pytest.mark.lifecycle def test_lifecycle_transition_set_invalid_date(): bucket_name = get_new_bucket() client = get_client() @@ -10003,7 +8504,7 @@ def test_lifecycle_transition_set_invalid_date(): lifecycle = {'Rules': rules} e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 def _test_encryption_sse_customer_write(file_size): """ @@ -10029,19 +8530,16 @@ def _test_encryption_sse_customer_write(file_size): client.meta.events.register('before-call.s3.GetObject', lf) response = client.get_object(Bucket=bucket_name, Key=key) body = _get_body(response) - eq(body, data) + assert body == data # The test harness for lifecycle is configured to treat days as 10 second intervals. -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle transition') -@attr('lifecycle') -@attr('lifecycle_transition') -@attr('fails_on_aws') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_transition +@pytest.mark.fails_on_aws def test_lifecycle_transition(): sc = configured_storage_classes() if len(sc) < 3: - raise SkipTest + pytest.skip('requires 3 or more storage classes') bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar', 'expire3/foo', 'expire3/bar']) @@ -10054,42 +8552,39 @@ def test_lifecycle_transition(): # Get list of all keys response = client.list_objects(Bucket=bucket_name) init_keys = _get_keys(response) - eq(len(init_keys), 6) + assert len(init_keys) == 6 lc_interval = get_lc_debug_interval() # Wait for first expiration (plus fudge to handle the timer window) time.sleep(4*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire1_keys['STANDARD']), 4) - eq(len(expire1_keys[sc[1]]), 2) - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys['STANDARD']) == 4 + assert len(expire1_keys[sc[1]]) == 2 + assert len(expire1_keys[sc[2]]) == 0 # Wait for next expiration cycle time.sleep(lc_interval) keep2_keys = list_bucket_storage_class(client, bucket_name) - eq(len(keep2_keys['STANDARD']), 4) - eq(len(keep2_keys[sc[1]]), 2) - eq(len(keep2_keys[sc[2]]), 0) + assert len(keep2_keys['STANDARD']) == 4 + assert len(keep2_keys[sc[1]]) == 2 + assert len(keep2_keys[sc[2]]) == 0 # Wait for final expiration cycle time.sleep(5*lc_interval) expire3_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire3_keys['STANDARD']), 2) - eq(len(expire3_keys[sc[1]]), 2) - eq(len(expire3_keys[sc[2]]), 2) + assert len(expire3_keys['STANDARD']) == 2 + assert len(expire3_keys[sc[1]]) == 2 + assert len(expire3_keys[sc[2]]) == 2 # The test harness for lifecycle is configured to treat days as 10 second intervals. -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle expiration') -@attr('lifecycle') -@attr('lifecycle_transition') -@attr('fails_on_aws') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_transition +@pytest.mark.fails_on_aws def test_lifecycle_transition_single_rule_multi_trans(): sc = configured_storage_classes() if len(sc) < 3: - raise SkipTest + pytest.skip('requires 3 or more storage classes') bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar', 'expire3/foo', 'expire3/bar']) @@ -10101,40 +8596,37 @@ def test_lifecycle_transition_single_rule_multi_trans(): # Get list of all keys response = client.list_objects(Bucket=bucket_name) init_keys = _get_keys(response) - eq(len(init_keys), 6) + assert len(init_keys) == 6 lc_interval = get_lc_debug_interval() # Wait for first expiration (plus fudge to handle the timer window) time.sleep(5*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire1_keys['STANDARD']), 4) - eq(len(expire1_keys[sc[1]]), 2) - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys['STANDARD']) == 4 + assert len(expire1_keys[sc[1]]) == 2 + assert len(expire1_keys[sc[2]]) == 0 # Wait for next expiration cycle time.sleep(lc_interval) keep2_keys = list_bucket_storage_class(client, bucket_name) - eq(len(keep2_keys['STANDARD']), 4) - eq(len(keep2_keys[sc[1]]), 2) - eq(len(keep2_keys[sc[2]]), 0) + assert len(keep2_keys['STANDARD']) == 4 + assert len(keep2_keys[sc[1]]) == 2 + assert len(keep2_keys[sc[2]]) == 0 # Wait for final expiration cycle time.sleep(6*lc_interval) expire3_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire3_keys['STANDARD']), 4) - eq(len(expire3_keys[sc[1]]), 0) - eq(len(expire3_keys[sc[2]]), 2) + assert len(expire3_keys['STANDARD']) == 4 + assert len(expire3_keys[sc[1]]) == 0 + assert len(expire3_keys[sc[2]]) == 2 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='set lifecycle config with noncurrent version expiration') -@attr('lifecycle') -@attr('lifecycle_transition') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_transition def test_lifecycle_set_noncurrent_transition(): sc = configured_storage_classes() if len(sc) < 3: - raise SkipTest + pytest.skip('requires 3 or more storage classes') bucket = get_new_bucket() client = get_client() @@ -10162,20 +8654,17 @@ def test_lifecycle_set_noncurrent_transition(): lifecycle = {'Rules': rules} response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle non-current version expiration') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('lifecycle_transition') -@attr('fails_on_aws') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.lifecycle_transition +@pytest.mark.fails_on_aws def test_lifecycle_noncur_transition(): sc = configured_storage_classes() if len(sc) < 3: - raise SkipTest + pytest.skip('requires 3 or more storage classes') bucket = get_new_bucket() client = get_client() @@ -10208,27 +8697,27 @@ def test_lifecycle_noncur_transition(): create_multiple_versions(client, bucket, "test1/b", 3) init_keys = list_bucket_storage_class(client, bucket) - eq(len(init_keys['STANDARD']), 6) + assert len(init_keys['STANDARD']) == 6 lc_interval = get_lc_debug_interval() time.sleep(4*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket) - eq(len(expire1_keys['STANDARD']), 2) - eq(len(expire1_keys[sc[1]]), 4) - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys['STANDARD']) == 2 + assert len(expire1_keys[sc[1]]) == 4 + assert len(expire1_keys[sc[2]]) == 0 time.sleep(4*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket) - eq(len(expire1_keys['STANDARD']), 2) - eq(len(expire1_keys[sc[1]]), 0) - eq(len(expire1_keys[sc[2]]), 4) + assert len(expire1_keys['STANDARD']) == 2 + assert len(expire1_keys[sc[1]]) == 0 + assert len(expire1_keys[sc[2]]) == 4 time.sleep(6*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket) - eq(len(expire1_keys['STANDARD']), 2) - eq(len(expire1_keys[sc[1]]), 0) - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys['STANDARD']) == 2 + assert len(expire1_keys[sc[1]]) == 0 + assert len(expire1_keys[sc[2]]) == 0 def verify_object(client, bucket, key, content=None, sc=None): response = client.get_object(Bucket=bucket, Key=key) @@ -10237,27 +8726,24 @@ def verify_object(client, bucket, key, content=None, sc=None): sc = 'STANDARD' if ('StorageClass' in response): - eq(response['StorageClass'], sc) + assert response['StorageClass'] == sc else: #storage class should be STANDARD - eq('STANDARD', sc) + assert 'STANDARD' == sc if (content != None): body = _get_body(response) - eq(body, content) + assert body == content # The test harness for lifecycle is configured to treat days as 10 second intervals. -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle transition for cloud') -@attr('lifecycle') -@attr('lifecycle_transition') -@attr('cloud_transition') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_transition +@pytest.mark.cloud_transition +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_cloud_transition(): cloud_sc = get_cloud_storage_class() if cloud_sc == None: - raise SkipTest + pytest.skip('no cloud_storage_class configured') retain_head_object = get_cloud_retain_head_object() target_path = get_cloud_target_path() @@ -10273,19 +8759,19 @@ def test_lifecycle_cloud_transition(): # Get list of all keys response = client.list_objects(Bucket=bucket_name) init_keys = _get_keys(response) - eq(len(init_keys), 4) + assert len(init_keys) == 4 lc_interval = get_lc_debug_interval() # Wait for first expiration (plus fudge to handle the timer window) time.sleep(10*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire1_keys['STANDARD']), 2) + assert len(expire1_keys['STANDARD']) == 2 if (retain_head_object != None and retain_head_object == "true"): - eq(len(expire1_keys[cloud_sc]), 2) + assert len(expire1_keys[cloud_sc]) == 2 else: - eq(len(expire1_keys[cloud_sc]), 0) + assert len(expire1_keys[cloud_sc]) == 0 time.sleep(2*lc_interval) # Check if objects copied to target path @@ -10307,42 +8793,39 @@ def test_lifecycle_cloud_transition(): if (retain_head_object != None and retain_head_object == "true"): # verify HEAD response response = client.head_object(Bucket=bucket_name, Key=keys[0]) - eq(0, response['ContentLength']) - eq(cloud_sc, response['StorageClass']) + assert 0 == response['ContentLength'] + assert cloud_sc == response['StorageClass'] # GET should return InvalidObjectState error e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'InvalidObjectState') + assert status == 403 + assert error_code == 'InvalidObjectState' # COPY of object should return InvalidObjectState error copy_source = {'Bucket': bucket_name, 'Key': src_key} e = assert_raises(ClientError, client.copy, CopySource=copy_source, Bucket=bucket_name, Key='copy_obj') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'InvalidObjectState') + assert status == 403 + assert error_code == 'InvalidObjectState' # DELETE should succeed response = client.delete_object(Bucket=bucket_name, Key=src_key) e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'NoSuchKey') + assert status == 404 + assert error_code == 'NoSuchKey' # Similar to 'test_lifecycle_transition' but for cloud transition -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle transition for cloud') -@attr('lifecycle') -@attr('lifecycle_transition') -@attr('cloud_transition') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_transition +@pytest.mark.cloud_transition +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_cloud_multiple_transition(): cloud_sc = get_cloud_storage_class() if cloud_sc == None: - raise SkipTest + pytest.skip('[s3 cloud] section missing cloud_storage_class') retain_head_object = get_cloud_retain_head_object() target_path = get_cloud_target_path() @@ -10351,7 +8834,7 @@ def test_lifecycle_cloud_multiple_transition(): sc1 = get_cloud_regular_storage_class() if (sc1 == None): - raise SkipTest + pytest.skip('[s3 cloud] section missing storage_class') sc = ['STANDARD', sc1, cloud_sc] @@ -10367,58 +8850,54 @@ def test_lifecycle_cloud_multiple_transition(): # Get list of all keys response = client.list_objects(Bucket=bucket_name) init_keys = _get_keys(response) - eq(len(init_keys), 4) + assert len(init_keys) == 4 lc_interval = get_lc_debug_interval() # Wait for first expiration (plus fudge to handle the timer window) time.sleep(4*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire1_keys['STANDARD']), 2) - eq(len(expire1_keys[sc[1]]), 2) - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys['STANDARD']) == 2 + assert len(expire1_keys[sc[1]]) == 2 + assert len(expire1_keys[sc[2]]) == 0 # Wait for next expiration cycle time.sleep(7*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire1_keys['STANDARD']), 2) - eq(len(expire1_keys[sc[1]]), 0) + assert len(expire1_keys['STANDARD']) == 2 + assert len(expire1_keys[sc[1]]) == 0 if (retain_head_object != None and retain_head_object == "true"): - eq(len(expire1_keys[sc[2]]), 2) + assert len(expire1_keys[sc[2]]) == 2 else: - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys[sc[2]]) == 0 # Wait for final expiration cycle time.sleep(12*lc_interval) expire3_keys = list_bucket_storage_class(client, bucket_name) - eq(len(expire3_keys['STANDARD']), 2) - eq(len(expire3_keys[sc[1]]), 0) - eq(len(expire3_keys[sc[2]]), 0) + assert len(expire3_keys['STANDARD']) == 2 + assert len(expire3_keys[sc[1]]) == 0 + assert len(expire3_keys[sc[2]]) == 0 # Noncurrent objects for cloud transition -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle non-current version expiration on cloud transition') -@attr('lifecycle') -@attr('lifecycle_expiration') -@attr('lifecycle_transition') -@attr('cloud_transition') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_expiration +@pytest.mark.lifecycle_transition +@pytest.mark.cloud_transition +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_noncur_cloud_transition(): cloud_sc = get_cloud_storage_class() if cloud_sc == None: - raise SkipTest + pytest.skip('[s3 cloud] section missing cloud_storage_class') retain_head_object = get_cloud_retain_head_object() target_path = get_cloud_target_path() target_sc = get_cloud_target_storage_class() sc1 = get_cloud_regular_storage_class() - if (sc1 == None): - raise SkipTest + pytest.skip('[s3 cloud] section missing storage_class') sc = ['STANDARD', sc1, cloud_sc] @@ -10452,7 +8931,7 @@ def test_lifecycle_noncur_cloud_transition(): create_multiple_versions(client, bucket, k, 3) init_keys = list_bucket_storage_class(client, bucket) - eq(len(init_keys['STANDARD']), 6) + assert len(init_keys['STANDARD']) == 6 response = client.list_object_versions(Bucket=bucket) @@ -10460,19 +8939,19 @@ def test_lifecycle_noncur_cloud_transition(): time.sleep(4*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket) - eq(len(expire1_keys['STANDARD']), 2) - eq(len(expire1_keys[sc[1]]), 4) - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys['STANDARD']) == 2 + assert len(expire1_keys[sc[1]]) == 4 + assert len(expire1_keys[sc[2]]) == 0 time.sleep(10*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket) - eq(len(expire1_keys['STANDARD']), 2) - eq(len(expire1_keys[sc[1]]), 0) + assert len(expire1_keys['STANDARD']) == 2 + assert len(expire1_keys[sc[1]]) == 0 if (retain_head_object == None or retain_head_object == "false"): - eq(len(expire1_keys[sc[2]]), 0) + assert len(expire1_keys[sc[2]]) == 0 else: - eq(len(expire1_keys[sc[2]]), 4) + assert len(expire1_keys[sc[2]]) == 4 #check if versioned object exists on cloud endpoint if target_path == None: @@ -10490,18 +8969,15 @@ def test_lifecycle_noncur_cloud_transition(): verify_object(cloud_client, target_path, expire1_key1_str, None, target_sc) # The test harness for lifecycle is configured to treat days as 10 second intervals. -@attr(resource='bucket') -@attr(method='put') -@attr(operation='test lifecycle transition for cloud') -@attr('lifecycle') -@attr('lifecycle_transition') -@attr('cloud_transition') -@attr('fails_on_aws') -@attr('fails_on_dbstore') +@pytest.mark.lifecycle +@pytest.mark.lifecycle_transition +@pytest.mark.cloud_transition +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_lifecycle_cloud_transition_large_obj(): cloud_sc = get_cloud_storage_class() if cloud_sc == None: - raise SkipTest + pytest.skip('[s3 cloud] section missing cloud_storage_class') retain_head_object = get_cloud_retain_head_object() target_path = get_cloud_target_path() @@ -10527,13 +9003,13 @@ def test_lifecycle_cloud_transition_large_obj(): # Wait for first expiration (plus fudge to handle the timer window) time.sleep(8*lc_interval) expire1_keys = list_bucket_storage_class(client, bucket) - eq(len(expire1_keys['STANDARD']), 1) + assert len(expire1_keys['STANDARD']) == 1 if (retain_head_object != None and retain_head_object == "true"): - eq(len(expire1_keys[cloud_sc]), 1) + assert len(expire1_keys[cloud_sc]) == 1 else: - eq(len(expire1_keys[cloud_sc]), 0) + assert len(expire1_keys[cloud_sc]) == 0 # Check if objects copied to target path if target_path == None: @@ -10547,48 +9023,31 @@ def test_lifecycle_cloud_transition_large_obj(): expire1_key1_str = prefix + keys[1] verify_object(cloud_client, target_path, expire1_key1_str, data, target_sc) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-C encrypted transfer 1 byte') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encrypted_transfer_1b(): _test_encryption_sse_customer_write(1) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-C encrypted transfer 1KB') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encrypted_transfer_1kb(): _test_encryption_sse_customer_write(1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-C encrypted transfer 1MB') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encrypted_transfer_1MB(): _test_encryption_sse_customer_write(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-C encrypted transfer 13 bytes') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encrypted_transfer_13b(): _test_encryption_sse_customer_write(13) -@attr(assertion='success') -@attr('encryption') +@pytest.mark.encryption def test_encryption_sse_c_method_head(): bucket_name = get_new_bucket() client = get_client() @@ -10606,18 +9065,14 @@ def test_encryption_sse_c_method_head(): e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers)) client.meta.events.register('before-call.s3.HeadObject', lf) response = client.head_object(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='object') -@attr(method='put') -@attr(operation='write encrypted with SSE-C and read without SSE-C') -@attr(assertion='operation fails') -@attr('encryption') +@pytest.mark.encryption def test_encryption_sse_c_present(): bucket_name = get_new_bucket() client = get_client() @@ -10635,13 +9090,9 @@ def test_encryption_sse_c_present(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='write encrypted with SSE-C but read with other key') -@attr(assertion='operation fails') -@attr('encryption') +@pytest.mark.encryption def test_encryption_sse_c_other_key(): bucket_name = get_new_bucket() client = get_client() @@ -10666,13 +9117,9 @@ def test_encryption_sse_c_other_key(): client.meta.events.register('before-call.s3.GetObject', lf) e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='write encrypted with SSE-C, but md5 is bad') -@attr(assertion='operation fails') -@attr('encryption') +@pytest.mark.encryption def test_encryption_sse_c_invalid_md5(): bucket_name = get_new_bucket() client = get_client() @@ -10688,13 +9135,9 @@ def test_encryption_sse_c_invalid_md5(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='write encrypted with SSE-C, but dont provide MD5') -@attr(assertion='operation fails') -@attr('encryption') +@pytest.mark.encryption def test_encryption_sse_c_no_md5(): bucket_name = get_new_bucket() client = get_client() @@ -10709,11 +9152,7 @@ def test_encryption_sse_c_no_md5(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data) -@attr(resource='object') -@attr(method='put') -@attr(operation='declare SSE-C but do not provide key') -@attr(assertion='operation fails') -@attr('encryption') +@pytest.mark.encryption def test_encryption_sse_c_no_key(): bucket_name = get_new_bucket() client = get_client() @@ -10727,11 +9166,7 @@ def test_encryption_sse_c_no_key(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data) -@attr(resource='object') -@attr(method='put') -@attr(operation='Do not declare SSE-C but provide key and MD5') -@attr(assertion='operation successfull, no encryption') -@attr('encryption') +@pytest.mark.encryption def test_encryption_key_no_sse_c(): bucket_name = get_new_bucket() client = get_client() @@ -10746,7 +9181,7 @@ def test_encryption_key_no_sse_c(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts): """ @@ -10796,16 +9231,12 @@ def _check_content_using_range_enc(client, bucket_name, key, data, step, enc_hea response = client.get_object(Bucket=bucket_name, Key=key, Range=r) read_range = response['ContentLength'] body = _get_body(response) - eq(read_range, toread) - eq(body, data[ofs:end+1]) + assert read_range == toread + assert body == data[ofs:end+1] -@attr(resource='object') -@attr(method='put') -@attr(operation='complete multi-part upload') -@attr(assertion='successful') -@attr('encryption') -@attr('fails_on_aws') # allow-unordered is a non-standard extension -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_aws +@pytest.mark.fails_on_dbstore def test_encryption_sse_c_multipart_upload(): bucket_name = get_new_bucket() client = get_client() @@ -10830,32 +9261,28 @@ def test_encryption_sse_c_multipart_upload(): response = client.head_bucket(Bucket=bucket_name) rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1)) - eq(rgw_object_count, 1) + assert rgw_object_count == 1 rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen)) - eq(rgw_bytes_used, objlen) + assert rgw_bytes_used == objlen lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers)) client.meta.events.register('before-call.s3.GetObject', lf) response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['Metadata'], metadata) - eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type) + assert response['Metadata'] == metadata + assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type body = _get_body(response) - eq(body, data) + assert body == data size = response['ContentLength'] - eq(len(body), size) + assert len(body) == size _check_content_using_range_enc(client, bucket_name, key, data, 1000000, enc_headers=enc_headers) _check_content_using_range_enc(client, bucket_name, key, data, 10000000, enc_headers=enc_headers) -@attr(resource='object') -@attr(method='put') -@attr(operation='multipart upload with bad key for uploading chunks') -@attr(assertion='successful') -@attr('encryption') +@pytest.mark.encryption # TODO: remove this fails_on_rgw when I fix it -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_encryption_sse_c_multipart_invalid_chunks_1(): bucket_name = get_new_bucket() client = get_client() @@ -10879,15 +9306,11 @@ def test_encryption_sse_c_multipart_invalid_chunks_1(): e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name, key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='multipart upload with bad md5 for chunks') -@attr(assertion='successful') -@attr('encryption') +@pytest.mark.encryption # TODO: remove this fails_on_rgw when I fix it -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_encryption_sse_c_multipart_invalid_chunks_2(): bucket_name = get_new_bucket() client = get_client() @@ -10911,14 +9334,10 @@ def test_encryption_sse_c_multipart_invalid_chunks_2(): e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name, key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='complete multi-part upload and download with bad key') -@attr(assertion='successful') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encryption_sse_c_multipart_bad_download(): bucket_name = get_new_bucket() client = get_client() @@ -10948,30 +9367,26 @@ def test_encryption_sse_c_multipart_bad_download(): response = client.head_bucket(Bucket=bucket_name) rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1)) - eq(rgw_object_count, 1) + assert rgw_object_count == 1 rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen)) - eq(rgw_bytes_used, objlen) + assert rgw_bytes_used == objlen lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers)) client.meta.events.register('before-call.s3.GetObject', lf) response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['Metadata'], metadata) - eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type) + assert response['Metadata'] == metadata + assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers)) client.meta.events.register('before-call.s3.GetObject', lf) e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr(assertion='succeeds and returns written data') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_encryption_sse_c_post_object_authenticated_request(): bucket_name = get_new_bucket() client = get_client() @@ -11011,7 +9426,7 @@ def test_encryption_sse_c_post_object_authenticated_request(): ('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 get_headers = { 'x-amz-server-side-encryption-customer-algorithm': 'AES256', @@ -11022,11 +9437,10 @@ def test_encryption_sse_c_post_object_authenticated_request(): client.meta.events.register('before-call.s3.GetObject', lf) response = client.get_object(Bucket=bucket_name, Key='foo.txt') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'): """ Tests Create a file of A's, use it to set_contents_from_file. @@ -11047,19 +9461,15 @@ def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'): response = client.get_object(Bucket=bucket_name, Key='testobj') body = _get_body(response) - eq(body, data) + assert body == data -@attr(resource='object') -@attr(method='head') -@attr(operation='Test SSE-KMS encrypted does perform head properly') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_method_head(): kms_keyid = get_main_kms_keyid() bucket_name = get_new_bucket() @@ -11076,21 +9486,17 @@ def test_sse_kms_method_head(): client.put_object(Bucket=bucket_name, Key=key, Body=data) response = client.head_object(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid) + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms' + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers)) client.meta.events.register('before-call.s3.HeadObject', lf) e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='write encrypted with SSE-KMS and read without SSE-KMS') -@attr(assertion='operation success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_present(): kms_keyid = get_main_kms_keyid() bucket_name = get_new_bucket() @@ -11108,13 +9514,9 @@ def test_sse_kms_present(): response = client.get_object(Bucket=bucket_name, Key=key) body = _get_body(response) - eq(body, data) + assert body == data -@attr(resource='object') -@attr(method='put') -@attr(operation='declare SSE-KMS but do not provide key_id') -@attr(assertion='operation fails') -@attr('encryption') +@pytest.mark.encryption def test_sse_kms_no_key(): bucket_name = get_new_bucket() client = get_client() @@ -11130,11 +9532,7 @@ def test_sse_kms_no_key(): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data) -@attr(resource='object') -@attr(method='put') -@attr(operation='Do not declare SSE-KMS but provide key_id') -@attr(assertion='operation successfull, no encryption') -@attr('encryption') +@pytest.mark.encryption def test_sse_kms_not_declared(): bucket_name = get_new_bucket() client = get_client() @@ -11149,14 +9547,10 @@ def test_sse_kms_not_declared(): e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='complete KMS multi-part upload') -@attr(assertion='successful') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_multipart_upload(): kms_keyid = get_main_kms_keyid() bucket_name = get_new_bucket() @@ -11181,33 +9575,29 @@ def test_sse_kms_multipart_upload(): response = client.head_bucket(Bucket=bucket_name) rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1)) - eq(rgw_object_count, 1) + assert rgw_object_count == 1 rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen)) - eq(rgw_bytes_used, objlen) + assert rgw_bytes_used == objlen lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers)) client.meta.events.register('before-call.s3.UploadPart', lf) response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['Metadata'], metadata) - eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type) + assert response['Metadata'] == metadata + assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type body = _get_body(response) - eq(body, data) + assert body == data size = response['ContentLength'] - eq(len(body), size) + assert len(body) == size _check_content_using_range(key, bucket_name, data, 1000000) _check_content_using_range(key, bucket_name, data, 10000000) -@attr(resource='object') -@attr(method='put') -@attr(operation='multipart KMS upload with bad key_id for uploading chunks') -@attr(assertion='successful') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_multipart_invalid_chunks_1(): kms_keyid = get_main_kms_keyid() kms_keyid2 = get_secondary_kms_keyid() @@ -11233,12 +9623,8 @@ def test_sse_kms_multipart_invalid_chunks_1(): resend_parts=resend_parts) -@attr(resource='object') -@attr(method='put') -@attr(operation='multipart KMS upload with unexistent key_id for chunks') -@attr(assertion='successful') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_multipart_invalid_chunks_2(): kms_keyid = get_main_kms_keyid() bucket_name = get_new_bucket() @@ -11263,12 +9649,8 @@ def test_sse_kms_multipart_invalid_chunks_2(): resend_parts=resend_parts) -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated KMS browser based upload via POST request') -@attr(assertion='succeeds and returns written data') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_post_object_authenticated_request(): kms_keyid = get_main_kms_keyid() bucket_name = get_new_bucket() @@ -11307,69 +9689,49 @@ def test_sse_kms_post_object_authenticated_request(): ('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-KMS encrypted transfer 1 byte') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_transfer_1b(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: - raise SkipTest + pytest.skip('[s3 main] section missing kms_keyid') _test_sse_kms_customer_write(1, key_id = kms_keyid) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-KMS encrypted transfer 1KB') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_transfer_1kb(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: - raise SkipTest + pytest.skip('[s3 main] section missing kms_keyid') _test_sse_kms_customer_write(1024, key_id = kms_keyid) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-KMS encrypted transfer 1MB') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_transfer_1MB(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: - raise SkipTest + pytest.skip('[s3 main] section missing kms_keyid') _test_sse_kms_customer_write(1024*1024, key_id = kms_keyid) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test SSE-KMS encrypted transfer 13 bytes') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_transfer_13b(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: - raise SkipTest + pytest.skip('[s3 main] section missing kms_keyid') _test_sse_kms_customer_write(13, key_id = kms_keyid) -@attr(resource='object') -@attr(method='get') -@attr(operation='write encrypted with SSE-KMS and read with SSE-KMS') -@attr(assertion='operation fails') -@attr('encryption') +@pytest.mark.encryption def test_sse_kms_read_declare(): bucket_name = get_new_bucket() client = get_client() @@ -11386,13 +9748,9 @@ def test_sse_kms_read_declare(): e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy') -@attr(assertion='succeeds') -@attr('bucket-policy') +@pytest.mark.bucket_policy def test_bucket_policy(): bucket_name = get_new_bucket() client = get_client() @@ -11419,10 +9777,10 @@ def test_bucket_policy(): alt_client = get_alt_client() response = alt_client.list_objects(Bucket=bucket_name) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 -@attr('bucket-policy') -@attr('list-objects-v2') +@pytest.mark.bucket_policy +@pytest.mark.list_objects_v2 def test_bucketv2_policy(): bucket_name = get_new_bucket() client = get_client() @@ -11449,13 +9807,9 @@ def test_bucketv2_policy(): alt_client = get_alt_client() response = alt_client.list_objects_v2(Bucket=bucket_name) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy and ACL') -@attr(assertion='fails') -@attr('bucket-policy') +@pytest.mark.bucket_policy def test_bucket_policy_acl(): bucket_name = get_new_bucket() client = get_client() @@ -11484,18 +9838,14 @@ def test_bucket_policy_acl(): alt_client = get_alt_client() e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' client.delete_bucket_policy(Bucket=bucket_name) client.put_bucket_acl(Bucket=bucket_name, ACL='public-read') -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy and ACL with list-objects-v2') -@attr(assertion='fails') -@attr('bucket-policy') -@attr('list-objects-v2') +@pytest.mark.bucket_policy +@pytest.mark.list_objects_v2 def test_bucketv2_policy_acl(): bucket_name = get_new_bucket() client = get_client() @@ -11524,19 +9874,15 @@ def test_bucketv2_policy_acl(): alt_client = get_alt_client() e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' client.delete_bucket_policy(Bucket=bucket_name) client.put_bucket_acl(Bucket=bucket_name, ACL='public-read') -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy for a user belonging to a different tenant') -@attr(assertion='succeeds') -@attr('bucket-policy') +@pytest.mark.bucket_policy # TODO: remove this fails_on_rgw when I fix it -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_policy_different_tenant(): bucket_name = get_new_bucket() client = get_client() @@ -11576,16 +9922,12 @@ def test_bucket_policy_different_tenant(): #alt_client = get_alt_client() #response = alt_client.list_objects(Bucket=bucket_name) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy for a user belonging to a different tenant') -@attr(assertion='succeeds') -@attr('bucket-policy') +@pytest.mark.bucket_policy # TODO: remove this fails_on_rgw when I fix it -@attr('fails_on_rgw') -@attr('list-objects-v2') +@pytest.mark.fails_on_rgw +@pytest.mark.list_objects_v2 def test_bucketv2_policy_different_tenant(): bucket_name = get_new_bucket() client = get_client() @@ -11625,13 +9967,9 @@ def test_bucketv2_policy_different_tenant(): #alt_client = get_alt_client() #response = alt_client.list_objects_v2(Bucket=bucket_name) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy on another bucket') -@attr(assertion='succeeds') -@attr('bucket-policy') +@pytest.mark.bucket_policy def test_bucket_policy_another_bucket(): bucket_name = get_new_bucket() bucket_name2 = get_new_bucket() @@ -11662,18 +10000,14 @@ def test_bucket_policy_another_bucket(): alt_client = get_alt_client() response = alt_client.list_objects(Bucket=bucket_name) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 alt_client = get_alt_client() response = alt_client.list_objects(Bucket=bucket_name2) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test Bucket Policy on another bucket with list-objects-v2') -@attr(assertion='succeeds') -@attr('bucket-policy') -@attr('list-objects-v2') +@pytest.mark.bucket_policy +@pytest.mark.list_objects_v2 def test_bucketv2_policy_another_bucket(): bucket_name = get_new_bucket() bucket_name2 = get_new_bucket() @@ -11704,18 +10038,15 @@ def test_bucketv2_policy_another_bucket(): alt_client = get_alt_client() response = alt_client.list_objects_v2(Bucket=bucket_name) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 alt_client = get_alt_client() response = alt_client.list_objects_v2(Bucket=bucket_name2) - eq(len(response['Contents']), 1) + assert len(response['Contents']) == 1 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put condition operator end with ifExists') -@attr('bucket-policy') +@pytest.mark.bucket_policy # TODO: remove this fails_on_rgw when I fix it -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_policy_set_condition_operator_end_with_IfExists(): bucket_name = get_new_bucket() client = get_client() @@ -11746,7 +10077,7 @@ def test_bucket_policy_set_condition_operator_end_with_IfExists(): client.meta.events.register('before-call.s3.GetObject', lf) response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 request_headers={'referer': 'http://www.example.com/index.html'} @@ -11754,11 +10085,11 @@ def test_bucket_policy_set_condition_operator_end_with_IfExists(): client.meta.events.register('before-call.s3.GetObject', lf) response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 # the 'referer' headers need to be removed for this one #response = client.get_object(Bucket=bucket_name, Key=key) - #eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + #assert response['ResponseMetadata']['HTTPStatusCode'] == 200 request_headers={'referer': 'http://example.com'} @@ -11768,7 +10099,7 @@ def test_bucket_policy_set_condition_operator_end_with_IfExists(): # TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3 e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 response = client.get_bucket_policy(Bucket=bucket_name) print(response) @@ -11784,12 +10115,8 @@ def _make_random_string(size): return ''.join(random.choice(string.ascii_letters) for _ in range(size)) -@attr(resource='object') -@attr(method='get') -@attr(operation='Test Get/PutObjTagging output') -@attr(assertion='success') -@attr('tagging') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.fails_on_dbstore def test_get_obj_tagging(): key = 'testputtags' bucket_name = _create_key_with_random_content(key) @@ -11797,17 +10124,13 @@ def test_get_obj_tagging(): input_tagset = _create_simple_tagset(2) response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(response['TagSet'], input_tagset['TagSet']) + assert response['TagSet'] == input_tagset['TagSet'] -@attr(resource='object') -@attr(method='get') -@attr(operation='Test HEAD obj tagging output') -@attr(assertion='success') -@attr('tagging') +@pytest.mark.tagging def test_get_obj_head_tagging(): key = 'testputtags' bucket_name = _create_key_with_random_content(key) @@ -11816,18 +10139,14 @@ def test_get_obj_head_tagging(): input_tagset = _create_simple_tagset(count) response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.head_object(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'], str(count)) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'] == str(count) -@attr(resource='object') -@attr(method='get') -@attr(operation='Test Put max allowed tags') -@attr(assertion='success') -@attr('tagging') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.fails_on_dbstore def test_put_max_tags(): key = 'testputmaxtags' bucket_name = _create_key_with_random_content(key) @@ -11835,16 +10154,12 @@ def test_put_max_tags(): input_tagset = _create_simple_tagset(10) response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(response['TagSet'], input_tagset['TagSet']) + assert response['TagSet'] == input_tagset['TagSet'] -@attr(resource='object') -@attr(method='get') -@attr(operation='Test Put max allowed tags') -@attr(assertion='fails') -@attr('tagging') +@pytest.mark.tagging def test_put_excess_tags(): key = 'testputmaxtags' bucket_name = _create_key_with_random_content(key) @@ -11853,17 +10168,13 @@ def test_put_excess_tags(): input_tagset = _create_simple_tagset(11) e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidTag') + assert status == 400 + assert error_code == 'InvalidTag' response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(len(response['TagSet']), 0) + assert len(response['TagSet']) == 0 -@attr(resource='object') -@attr(method='get') -@attr(operation='Test Put max allowed k-v size') -@attr(assertion='success') -@attr('tagging') +@pytest.mark.tagging def test_put_max_kvsize_tags(): key = 'testputmaxkeysize' bucket_name = _create_key_with_random_content(key) @@ -11878,17 +10189,13 @@ def test_put_max_kvsize_tags(): input_tagset = {'TagSet': tagset} response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_object_tagging(Bucket=bucket_name, Key=key) for kv_pair in response['TagSet']: - eq((kv_pair in input_tagset['TagSet']), True) + assert kv_pair in input_tagset['TagSet'] -@attr(resource='object') -@attr(method='get') -@attr(operation='Test exceed key size') -@attr(assertion='success') -@attr('tagging') +@pytest.mark.tagging def test_put_excess_key_tags(): key = 'testputexcesskeytags' bucket_name = _create_key_with_random_content(key) @@ -11904,17 +10211,13 @@ def test_put_excess_key_tags(): e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidTag') + assert status == 400 + assert error_code == 'InvalidTag' response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(len(response['TagSet']), 0) + assert len(response['TagSet']) == 0 -@attr(resource='object') -@attr(method='get') -@attr(operation='Test exceed val size') -@attr(assertion='success') -@attr('tagging') +@pytest.mark.tagging def test_put_excess_val_tags(): key = 'testputexcesskeytags' bucket_name = _create_key_with_random_content(key) @@ -11930,18 +10233,14 @@ def test_put_excess_val_tags(): e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidTag') + assert status == 400 + assert error_code == 'InvalidTag' response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(len(response['TagSet']), 0) + assert len(response['TagSet']) == 0 -@attr(resource='object') -@attr(method='get') -@attr(operation='Test PUT modifies existing tags') -@attr(assertion='success') -@attr('tagging') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.fails_on_dbstore def test_put_modify_tags(): key = 'testputmodifytags' bucket_name = _create_key_with_random_content(key) @@ -11954,10 +10253,10 @@ def test_put_modify_tags(): input_tagset = {'TagSet': tagset} response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(response['TagSet'], input_tagset['TagSet']) + assert response['TagSet'] == input_tagset['TagSet'] tagset2 = [] tagset2.append({'Key': 'key3', 'Value': 'val3'}) @@ -11965,17 +10264,13 @@ def test_put_modify_tags(): input_tagset2 = {'TagSet': tagset2} response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(response['TagSet'], input_tagset2['TagSet']) + assert response['TagSet'] == input_tagset2['TagSet'] -@attr(resource='object') -@attr(method='get') -@attr(operation='Test Delete tags') -@attr(assertion='success') -@attr('tagging') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.fails_on_dbstore def test_put_delete_tags(): key = 'testputmodifytags' bucket_name = _create_key_with_random_content(key) @@ -11983,23 +10278,19 @@ def test_put_delete_tags(): input_tagset = _create_simple_tagset(2) response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(response['TagSet'], input_tagset['TagSet']) + assert response['TagSet'] == input_tagset['TagSet'] response = client.delete_object_tagging(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(len(response['TagSet']), 0) + assert len(response['TagSet']) == 0 -@attr(resource='object') -@attr(method='post') -@attr(operation='anonymous browser based upload via POST request') -@attr('tagging') -@attr(assertion='succeeds and returns written data') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.fails_on_dbstore def test_post_object_tags_anonymous_request(): bucket_name = get_new_bucket_name() client = get_client() @@ -12022,19 +10313,15 @@ def test_post_object_tags_anonymous_request(): ]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key=key_name) body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' response = client.get_object_tagging(Bucket=bucket_name, Key=key_name) - eq(response['TagSet'], input_tagset['TagSet']) + assert response['TagSet'] == input_tagset['TagSet'] -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated browser based upload via POST request') -@attr('tagging') -@attr(assertion='succeeds and returns written data') +@pytest.mark.tagging def test_post_object_tags_authenticated_request(): bucket_name = get_new_bucket() client = get_client() @@ -12074,18 +10361,14 @@ def test_post_object_tags_authenticated_request(): ('file', ('bar'))]) r = requests.post(url, files=payload, verify=get_config_ssl_verify()) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='put') -@attr(operation='Test PutObj with tagging headers') -@attr(assertion='success') -@attr('tagging') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.fails_on_dbstore def test_put_obj_with_tags(): bucket_name = get_new_bucket() client = get_client() @@ -12106,23 +10389,19 @@ def test_put_obj_with_tags(): client.put_object(Bucket=bucket_name, Key=key, Body=data) response = client.get_object(Bucket=bucket_name, Key=key) body = _get_body(response) - eq(body, data) + assert body == data response = client.get_object_tagging(Bucket=bucket_name, Key=key) response_tagset = response['TagSet'] tagset = tagset - eq(response_tagset, tagset) + assert response_tagset == tagset def _make_arn_resource(path="*"): return "arn:aws:s3:::{}".format(path) -@attr(resource='object') -@attr(method='get') -@attr(operation='Test GetObjTagging public read') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_get_tags_acl_public(): key = 'testputtagsacl' bucket_name = _create_key_with_random_content(key) @@ -12136,20 +10415,16 @@ def test_get_tags_acl_public(): input_tagset = _create_simple_tagset(10) response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 alt_client = get_alt_client() response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(response['TagSet'], input_tagset['TagSet']) + assert response['TagSet'] == input_tagset['TagSet'] -@attr(resource='object') -@attr(method='get') -@attr(operation='Test PutObjTagging public wrote') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_put_tags_acl_public(): key = 'testputtagsacl' bucket_name = _create_key_with_random_content(key) @@ -12164,17 +10439,13 @@ def test_put_tags_acl_public(): input_tagset = _create_simple_tagset(10) alt_client = get_alt_client() response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(response['TagSet'], input_tagset['TagSet']) + assert response['TagSet'] == input_tagset['TagSet'] -@attr(resource='object') -@attr(method='get') -@attr(operation='test deleteobjtagging public') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') +@pytest.mark.tagging +@pytest.mark.bucket_policy def test_delete_tags_obj_public(): key = 'testputtagsacl' bucket_name = _create_key_with_random_content(key) @@ -12188,21 +10459,16 @@ def test_delete_tags_obj_public(): input_tagset = _create_simple_tagset(10) response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 alt_client = get_alt_client() response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response = client.get_object_tagging(Bucket=bucket_name, Key=key) - eq(len(response['TagSet']), 0) + assert len(response['TagSet']) == 0 -@attr(resource='object') -@attr(method='put') -@attr(operation='test whether a correct version-id returned') -@attr(assertion='version-id is same as bucket list') -@attr('versioning') def test_versioning_bucket_atomic_upload_return_version_id(): bucket_name = get_new_bucket() client = get_client() @@ -12216,27 +10482,22 @@ def test_versioning_bucket_atomic_upload_return_version_id(): response = client.list_object_versions(Bucket=bucket_name) versions = response['Versions'] for version in versions: - eq(version['VersionId'], version_id) + assert version['VersionId'] == version_id # for versioning-default-bucket, no version-id should return. bucket_name = get_new_bucket() key = 'baz' response = client.put_object(Bucket=bucket_name, Key=key) - eq(('VersionId' in response), False) + assert not 'VersionId' in response # for versioning-suspended-bucket, no version-id should return. bucket_name = get_new_bucket() key = 'baz' check_configure_versioning_retry(bucket_name, "Suspended", "Suspended") response = client.put_object(Bucket=bucket_name, Key=key) - eq(('VersionId' in response), False) + assert not 'VersionId' in response -@attr(resource='object') -@attr(method='put') -@attr(operation='test whether a correct version-id returned') -@attr(assertion='version-id is same as bucket list') -@attr('versioning') def test_versioning_bucket_multipart_upload_return_version_id(): content_type='text/bla' objlen = 30 * 1024 * 1024 @@ -12257,7 +10518,7 @@ def test_versioning_bucket_multipart_upload_return_version_id(): response = client.list_object_versions(Bucket=bucket_name) versions = response['Versions'] for version in versions: - eq(version['VersionId'], version_id) + assert version['VersionId'] == version_id # for versioning-default-bucket, no version-id should return. bucket_name = get_new_bucket() @@ -12266,7 +10527,7 @@ def test_versioning_bucket_multipart_upload_return_version_id(): (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata) response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) - eq(('VersionId' in response), False) + assert not 'VersionId' in response # for versioning-suspended-bucket, no version-id should return bucket_name = get_new_bucket() @@ -12276,15 +10537,11 @@ def test_versioning_bucket_multipart_upload_return_version_id(): (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata) response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) - eq(('VersionId' in response), False) + assert not 'VersionId' in response -@attr(resource='object') -@attr(method='get') -@attr(operation='Test ExistingObjectTag conditional on get object') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_get_obj_existing_tag(): bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag']) client = get_client() @@ -12307,7 +10564,7 @@ def test_bucket_policy_get_obj_existing_tag(): input_tagset = {'TagSet': tagset} response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 tagset2 = [] tagset2.append({'Key': 'security', 'Value': 'private'}) @@ -12315,7 +10572,7 @@ def test_bucket_policy_get_obj_existing_tag(): input_tagset = {'TagSet': tagset2} response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 tagset3 = [] tagset3.append({'Key': 'security1', 'Value': 'public'}) @@ -12323,27 +10580,23 @@ def test_bucket_policy_get_obj_existing_tag(): input_tagset = {'TagSet': tagset3} response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 alt_client = get_alt_client() response = alt_client.get_object(Bucket=bucket_name, Key='publictag') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='object') -@attr(method='get') -@attr(operation='Test ExistingObjectTag conditional on get object tagging') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_get_obj_tagging_existing_tag(): bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag']) client = get_client() @@ -12366,7 +10619,7 @@ def test_bucket_policy_get_obj_tagging_existing_tag(): input_tagset = {'TagSet': tagset} response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 tagset2 = [] tagset2.append({'Key': 'security', 'Value': 'private'}) @@ -12374,7 +10627,7 @@ def test_bucket_policy_get_obj_tagging_existing_tag(): input_tagset = {'TagSet': tagset2} response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 tagset3 = [] tagset3.append({'Key': 'security1', 'Value': 'public'}) @@ -12382,34 +10635,30 @@ def test_bucket_policy_get_obj_tagging_existing_tag(): input_tagset = {'TagSet': tagset3} response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 alt_client = get_alt_client() response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 # A get object itself should fail since we allowed only GetObjectTagging e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='object') -@attr(method='get') -@attr(operation='Test ExistingObjectTag conditional on put object tagging') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_put_obj_tagging_existing_tag(): bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag']) client = get_client() @@ -12432,7 +10681,7 @@ def test_bucket_policy_put_obj_tagging_existing_tag(): input_tagset = {'TagSet': tagset} response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 tagset2 = [] tagset2.append({'Key': 'security', 'Value': 'private'}) @@ -12440,7 +10689,7 @@ def test_bucket_policy_put_obj_tagging_existing_tag(): input_tagset = {'TagSet': tagset2} response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 alt_client = get_alt_client() # PUT requests with object tagging are a bit wierd, if you forget to put @@ -12454,11 +10703,11 @@ def test_bucket_policy_put_obj_tagging_existing_tag(): input_tagset = {'TagSet': testtagset1} response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 testtagset2 = [] testtagset2.append({'Key': 'security', 'Value': 'private'}) @@ -12466,23 +10715,19 @@ def test_bucket_policy_put_obj_tagging_existing_tag(): input_tagset = {'TagSet': testtagset2} response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 # Now try putting the original tags again, this should fail input_tagset = {'TagSet': testtagset1} e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='object') -@attr(method='put') -@attr(operation='Test copy-source conditional on put obj') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_put_obj_copy_source(): bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo']) client = get_client() @@ -12515,25 +10760,21 @@ def test_bucket_policy_put_obj_copy_source(): # policy on how to do this right response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo') body = _get_body(response) - eq(body, 'public/foo') + assert body == 'public/foo' copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'} alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2') response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2') body = _get_body(response) - eq(body, 'public/bar') + assert body == 'public/bar' copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'} check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2') -@attr(resource='object') -@attr(method='put') -@attr(operation='Test copy-source conditional on put obj') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_put_obj_copy_source_meta(): src_bucket_name = _create_objects(keys=['public/foo', 'public/bar']) client = get_client() @@ -12569,7 +10810,7 @@ def test_bucket_policy_put_obj_copy_source_meta(): # policy on how to do this right response = alt_client.get_object(Bucket=bucket_name, Key='new_foo') body = _get_body(response) - eq(body, 'public/foo') + assert body == 'public/foo' # remove the x-amz-metadata-directive header def remove_header(**kwargs): @@ -12582,12 +10823,8 @@ def test_bucket_policy_put_obj_copy_source_meta(): check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"}) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test put obj with canned-acl not to be public') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') +@pytest.mark.tagging +@pytest.mark.bucket_policy def test_bucket_policy_put_obj_acl(): bucket_name = get_new_bucket() client = get_client() @@ -12615,7 +10852,7 @@ def test_bucket_policy_put_obj_acl(): # as an ERROR anyway response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1) #response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 key2 = 'public-key' @@ -12624,14 +10861,10 @@ def test_bucket_policy_put_obj_acl(): e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='object') -@attr(method='put') -@attr(operation='Test put obj with amz-grant back to bucket-owner') -@attr(assertion='success') -@attr('bucket-policy') +@pytest.mark.bucket_policy def test_bucket_policy_put_obj_grant(): bucket_name = get_new_bucket() @@ -12669,7 +10902,7 @@ def test_bucket_policy_put_obj_grant(): alt_client.meta.events.register('before-call.s3.PutObject', lf) response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 def remove_header(**kwargs): if ("x-amz-grant-full-control" in kwargs['params']['headers']): @@ -12679,7 +10912,7 @@ def test_bucket_policy_put_obj_grant(): key2 = 'key2' response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1) @@ -12689,15 +10922,11 @@ def test_bucket_policy_put_obj_grant(): acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2) - eq(acl1_response['Grants'][0]['Grantee']['ID'], main_user_id) - eq(acl2_response['Grants'][0]['Grantee']['ID'], alt_user_id) + assert acl1_response['Grants'][0]['Grantee']['ID'] == main_user_id + assert acl2_response['Grants'][0]['Grantee']['ID'] == alt_user_id -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj specifying both sse-c and sse-s3') -@attr(assertion='success') -@attr('encryption') +@pytest.mark.encryption def test_put_obj_enc_conflict_c_s3(): bucket_name = get_new_bucket() client = get_v2_client() @@ -12717,14 +10946,10 @@ def test_put_obj_enc_conflict_c_s3(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj specifying both sse-c and sse-kms') -@attr(assertion='success') -@attr('encryption') +@pytest.mark.encryption def test_put_obj_enc_conflict_c_kms(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: @@ -12748,14 +10973,10 @@ def test_put_obj_enc_conflict_c_kms(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj specifying sse-s3 with kms key id') -@attr(assertion='success') -@attr('encryption') +@pytest.mark.encryption def test_put_obj_enc_conflict_s3_kms(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: @@ -12776,14 +10997,10 @@ def test_put_obj_enc_conflict_s3_kms(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj specifying invalid algorithm' ) -@attr(assertion='success') -@attr('encryption') +@pytest.mark.encryption def test_put_obj_enc_conflict_bad_enc_kms(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: @@ -12803,17 +11020,13 @@ def test_put_obj_enc_conflict_bad_enc_kms(): client.meta.events.register('before-call.s3.PutObject', lf) e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidArgument') + assert status == 400 + assert error_code == 'InvalidArgument' -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj requests if not sse-s3: without encryption') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-policy') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_policy +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_bucket_policy_put_obj_s3_noenc(): bucket_name = get_new_bucket() client = get_v2_client() @@ -12857,20 +11070,16 @@ def test_bucket_policy_put_obj_s3_noenc(): # first validate that writing a sse-s3 object works response = client.put_object(Bucket=bucket_name, Key=key1_str, ServerSideEncryption='AES256') response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256') + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256' # then validate that a non-encrypted object fails. # (this also breaks the connection--non-sse bug, probably because the server # errors out before it consumes the data...) check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str) -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj requests if not sse-s3: kms') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-policy') -@attr('sse-s3') +@pytest.mark.encryption +@pytest.mark.bucket_policy +@pytest.mark.sse_s3 def test_bucket_policy_put_obj_s3_kms(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: @@ -12914,17 +11123,13 @@ def test_bucket_policy_put_obj_s3_kms(): client.meta.events.register('before-call.s3.PutObject', lf) check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str) -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj requests if not sse-kms: without encryption') -@attr(assertion='success') -@attr('encryption') -@attr('fails_on_dbstore') -@attr('bucket-policy') +@pytest.mark.encryption +@pytest.mark.fails_on_dbstore +@pytest.mark.bucket_policy def test_bucket_policy_put_obj_kms_noenc(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: - raise SkipTest + pytest.skip('[s3 main] section missing kms_keyid') bucket_name = get_new_bucket() client = get_v2_client() @@ -12960,17 +11165,13 @@ def test_bucket_policy_put_obj_kms_noenc(): # breaks next call... response = client.put_object(Bucket=bucket_name, Key=key1_str, ServerSideEncryption='aws:kms', SSEKMSKeyId=kms_keyid) - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid) + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms' + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid check_access_denied(client.put_object, Bucket=bucket_name, Key=key2_str, Body=key2_str) -@attr(resource='object') -@attr(method='put') -@attr(operation='Deny put obj requests if not sse-kms: s3') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-policy') +@pytest.mark.encryption +@pytest.mark.bucket_policy def test_bucket_policy_put_obj_kms_s3(): bucket_name = get_new_bucket() client = get_v2_client() @@ -13010,14 +11211,10 @@ def test_bucket_policy_put_obj_kms_s3(): client.meta.events.register('before-call.s3.PutObject', lf) check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str) -@attr(resource='object') -@attr(method='put') -@attr(operation='put obj with RequestObjectTag') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') +@pytest.mark.tagging +@pytest.mark.bucket_policy # TODO: remove this fails_on_rgw when I fix it -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_bucket_policy_put_obj_request_obj_tag(): bucket_name = get_new_bucket() client = get_client() @@ -13044,13 +11241,9 @@ def test_bucket_policy_put_obj_request_obj_tag(): #TODO: why is this a 400 and not passing alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str) -@attr(resource='object') -@attr(method='get') -@attr(operation='Test ExistingObjectTag conditional on get object acl') -@attr(assertion='success') -@attr('tagging') -@attr('bucket-policy') -@attr('fails_on_dbstore') +@pytest.mark.tagging +@pytest.mark.bucket_policy +@pytest.mark.fails_on_dbstore def test_bucket_policy_get_obj_acl_existing_tag(): bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag']) client = get_client() @@ -13073,7 +11266,7 @@ def test_bucket_policy_get_obj_acl_existing_tag(): input_tagset = {'TagSet': tagset} response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 tagset2 = [] tagset2.append({'Key': 'security', 'Value': 'private'}) @@ -13081,7 +11274,7 @@ def test_bucket_policy_get_obj_acl_existing_tag(): input_tagset = {'TagSet': tagset2} response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 tagset3 = [] tagset3.append({'Key': 'security1', 'Value': 'public'}) @@ -13089,32 +11282,27 @@ def test_bucket_policy_get_obj_acl_existing_tag(): input_tagset = {'TagSet': tagset3} response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 alt_client = get_alt_client() response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag') - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 # A get object itself should fail since we allowed only GetObjectTagging e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object lock with defalut retention') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_lock(): bucket_name = get_new_bucket_name() client = get_client() @@ -13129,7 +11317,7 @@ def test_object_lock_put_obj_lock(): response = client.put_object_lock_configuration( Bucket=bucket_name, ObjectLockConfiguration=conf) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 conf = {'ObjectLockEnabled':'Enabled', 'Rule': { @@ -13141,17 +11329,12 @@ def test_object_lock_put_obj_lock(): response = client.put_object_lock_configuration( Bucket=bucket_name, ObjectLockConfiguration=conf) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.get_bucket_versioning(Bucket=bucket_name) - eq(response['Status'], 'Enabled') + assert response['Status'] == 'Enabled' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object lock with bucket object lock not enabled') -@attr(assertion='fails') -@attr('object-lock') def test_object_lock_put_obj_lock_invalid_bucket(): bucket_name = get_new_bucket_name() client = get_client() @@ -13165,16 +11348,11 @@ def test_object_lock_put_obj_lock_invalid_bucket(): }} e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf) status, error_code = _get_status_and_error_code(e.response) - eq(status, 409) - eq(error_code, 'InvalidBucketState') + assert status == 409 + assert error_code == 'InvalidBucketState' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object lock with days and years') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_lock_with_days_and_years(): bucket_name = get_new_bucket_name() client = get_client() @@ -13189,16 +11367,11 @@ def test_object_lock_put_obj_lock_with_days_and_years(): }} e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object lock with invalid days') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_lock_invalid_days(): bucket_name = get_new_bucket_name() client = get_client() @@ -13212,16 +11385,11 @@ def test_object_lock_put_obj_lock_invalid_days(): }} e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidRetentionPeriod') + assert status == 400 + assert error_code == 'InvalidRetentionPeriod' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object lock with invalid years') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_lock_invalid_years(): bucket_name = get_new_bucket_name() client = get_client() @@ -13235,16 +11403,11 @@ def test_object_lock_put_obj_lock_invalid_years(): }} e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidRetentionPeriod') + assert status == 400 + assert error_code == 'InvalidRetentionPeriod' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object lock with invalid mode') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_lock_invalid_mode(): bucket_name = get_new_bucket_name() client = get_client() @@ -13258,8 +11421,8 @@ def test_object_lock_put_obj_lock_invalid_mode(): }} e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' conf = {'ObjectLockEnabled':'Enabled', 'Rule': { @@ -13270,16 +11433,11 @@ def test_object_lock_put_obj_lock_invalid_mode(): }} e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' -attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object lock with invalid status') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_lock_invalid_status(): bucket_name = get_new_bucket_name() client = get_client() @@ -13293,32 +11451,22 @@ def test_object_lock_put_obj_lock_invalid_status(): }} e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' -attr(resource='bucket') -@attr(method='put') -@attr(operation='Test suspend versioning when object lock enabled') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_suspend_versioning(): bucket_name = get_new_bucket_name() client = get_client() client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True) e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'}) status, error_code = _get_status_and_error_code(e.response) - eq(status, 409) - eq(error_code, 'InvalidBucketState') + assert status == 409 + assert error_code == 'InvalidBucketState' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test get object lock') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_get_obj_lock(): bucket_name = get_new_bucket_name() client = get_client() @@ -13334,30 +11482,20 @@ def test_object_lock_get_obj_lock(): Bucket=bucket_name, ObjectLockConfiguration=conf) response = client.get_object_lock_configuration(Bucket=bucket_name) - eq(response['ObjectLockConfiguration'], conf) + assert response['ObjectLockConfiguration'] == conf -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test get object lock with bucket object lock not enabled') -@attr(assertion='fails') -@attr('object-lock') def test_object_lock_get_obj_lock_invalid_bucket(): bucket_name = get_new_bucket_name() client = get_client() client.create_bucket(Bucket=bucket_name) e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name) status, error_code = _get_status_and_error_code(e.response) - eq(status, 404) - eq(error_code, 'ObjectLockConfigurationNotFoundError') + assert status == 404 + assert error_code == 'ObjectLockConfigurationNotFoundError' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test put object retention') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_retention(): bucket_name = get_new_bucket_name() client = get_client() @@ -13367,16 +11505,11 @@ def test_object_lock_put_obj_retention(): version_id = response['VersionId'] retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object retention with bucket object lock not enabled') -@attr(assertion='fails') -@attr('object-lock') def test_object_lock_put_obj_retention_invalid_bucket(): bucket_name = get_new_bucket_name() client = get_client() @@ -13386,16 +11519,11 @@ def test_object_lock_put_obj_retention_invalid_bucket(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidRequest') + assert status == 400 + assert error_code == 'InvalidRequest' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object retention with invalid mode') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_retention_invalid_mode(): bucket_name = get_new_bucket_name() client = get_client() @@ -13405,22 +11533,17 @@ def test_object_lock_put_obj_retention_invalid_mode(): retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test get object retention') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_get_obj_retention(): bucket_name = get_new_bucket_name() client = get_client() @@ -13431,16 +11554,11 @@ def test_object_lock_get_obj_retention(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention) response = client.get_object_retention(Bucket=bucket_name, Key=key) - eq(response['Retention'], retention) + assert response['Retention'] == retention client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test object retention date formatting') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_get_obj_retention_iso8601(): bucket_name = get_new_bucket_name() client = get_client() @@ -13458,11 +11576,6 @@ def test_object_lock_get_obj_retention_iso8601(): client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test get object retention with invalid bucket') -@attr(assertion='fails') -@attr('object-lock') def test_object_lock_get_obj_retention_invalid_bucket(): bucket_name = get_new_bucket_name() client = get_client() @@ -13471,16 +11584,11 @@ def test_object_lock_get_obj_retention_invalid_bucket(): client.put_object(Bucket=bucket_name, Body='abc', Key=key) e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidRequest') + assert status == 400 + assert error_code == 'InvalidRequest' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object retention with version id') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_retention_versionid(): bucket_name = get_new_bucket_name() client = get_client() @@ -13492,16 +11600,11 @@ def test_object_lock_put_obj_retention_versionid(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention) response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id) - eq(response['Retention'], retention) + assert response['Retention'] == retention client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object retention to override default retention') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_retention_override_default_retention(): bucket_name = get_new_bucket_name() client = get_client() @@ -13522,16 +11625,11 @@ def test_object_lock_put_obj_retention_override_default_retention(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention) response = client.get_object_retention(Bucket=bucket_name, Key=key) - eq(response['Retention'], retention) + assert response['Retention'] == retention client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object retention to increase retention period') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_retention_increase_period(): bucket_name = get_new_bucket_name() client = get_client() @@ -13544,16 +11642,11 @@ def test_object_lock_put_obj_retention_increase_period(): retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)} client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2) response = client.get_object_retention(Bucket=bucket_name, Key=key) - eq(response['Retention'], retention2) + assert response['Retention'] == retention2 client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object retention to shorten period') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_retention_shorten_period(): bucket_name = get_new_bucket_name() client = get_client() @@ -13566,17 +11659,12 @@ def test_object_lock_put_obj_retention_shorten_period(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put object retention to shorten period with bypass header') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_obj_retention_shorten_period_bypass(): bucket_name = get_new_bucket_name() client = get_client() @@ -13589,16 +11677,11 @@ def test_object_lock_put_obj_retention_shorten_period_bypass(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True) response = client.get_object_retention(Bucket=bucket_name, Key=key) - eq(response['Retention'], retention) + assert response['Retention'] == retention client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='Test delete object with retention') -@attr(assertion='retention period make effects') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_delete_object_with_retention(): bucket_name = get_new_bucket_name() client = get_client() @@ -13610,18 +11693,13 @@ def test_object_lock_delete_object_with_retention(): client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention) e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId']) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='Test delete object with retention and delete marker') -@attr(assertion='retention period make effects') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_delete_object_with_retention_and_marker(): bucket_name = get_new_bucket_name() client = get_client() @@ -13634,24 +11712,19 @@ def test_object_lock_delete_object_with_retention_and_marker(): del_response = client.delete_object(Bucket=bucket_name, Key=key) e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId']) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' client.delete_object(Bucket=bucket_name, Key=key, VersionId=del_response['VersionId']) e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId']) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 -@attr(resource='object') -@attr(method='delete') -@attr(operation='Test multi-delete object with retention') -@attr(assertion='retention period make effects') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_multi_delete_object_with_retention(): bucket_name = get_new_bucket_name() client = get_client() @@ -13685,17 +11758,17 @@ def test_object_lock_multi_delete_object_with_retention(): } ) - eq(len(delete_response['Deleted']), 1) - eq(len(delete_response['Errors']), 1) + assert len(delete_response['Deleted']) == 1 + assert len(delete_response['Errors']) == 1 failed_object = delete_response['Errors'][0] - eq(failed_object['Code'], 'AccessDenied') - eq(failed_object['Key'], key1) - eq(failed_object['VersionId'], versionId1) + assert failed_object['Code'] == 'AccessDenied' + assert failed_object['Key'] == key1 + assert failed_object['VersionId'] == versionId1 deleted_object = delete_response['Deleted'][0] - eq(deleted_object['Key'], key2) - eq(deleted_object['VersionId'], versionId2) + assert deleted_object['Key'] == key2 + assert deleted_object['VersionId'] == versionId2 delete_response = client.delete_objects( Bucket=bucket_name, @@ -13711,19 +11784,14 @@ def test_object_lock_multi_delete_object_with_retention(): ) assert( ('Errors' not in delete_response) or (len(delete_response['Errors']) == 0) ) - eq(len(delete_response['Deleted']), 1) + assert len(delete_response['Deleted']) == 1 deleted_object = delete_response['Deleted'][0] - eq(deleted_object['Key'], key1) - eq(deleted_object['VersionId'], versionId1) + assert deleted_object['Key'] == key1 + assert deleted_object['VersionId'] == versionId1 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put legal hold') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_legal_hold(): bucket_name = get_new_bucket_name() client = get_client() @@ -13732,16 +11800,11 @@ def test_object_lock_put_legal_hold(): client.put_object(Bucket=bucket_name, Body='abc', Key=key) legal_hold = {'Status': 'ON'} response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'}) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put legal hold with invalid bucket') -@attr(assertion='fails') -@attr('object-lock') def test_object_lock_put_legal_hold_invalid_bucket(): bucket_name = get_new_bucket_name() client = get_client() @@ -13751,16 +11814,11 @@ def test_object_lock_put_legal_hold_invalid_bucket(): legal_hold = {'Status': 'ON'} e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidRequest') + assert status == 400 + assert error_code == 'InvalidRequest' -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put legal hold with invalid status') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_put_legal_hold_invalid_status(): bucket_name = get_new_bucket_name() client = get_client() @@ -13770,16 +11828,11 @@ def test_object_lock_put_legal_hold_invalid_status(): legal_hold = {'Status': 'abc'} e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'MalformedXML') + assert status == 400 + assert error_code == 'MalformedXML' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test get legal hold') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_get_legal_hold(): bucket_name = get_new_bucket_name() client = get_client() @@ -13789,18 +11842,13 @@ def test_object_lock_get_legal_hold(): legal_hold = {'Status': 'ON'} client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold) response = client.get_object_legal_hold(Bucket=bucket_name, Key=key) - eq(response['LegalHold'], legal_hold) + assert response['LegalHold'] == legal_hold legal_hold_off = {'Status': 'OFF'} client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off) response = client.get_object_legal_hold(Bucket=bucket_name, Key=key) - eq(response['LegalHold'], legal_hold_off) + assert response['LegalHold'] == legal_hold_off -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test get legal hold with invalid bucket') -@attr(assertion='fails') -@attr('object-lock') def test_object_lock_get_legal_hold_invalid_bucket(): bucket_name = get_new_bucket_name() client = get_client() @@ -13809,16 +11857,11 @@ def test_object_lock_get_legal_hold_invalid_bucket(): client.put_object(Bucket=bucket_name, Body='abc', Key=key) e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(error_code, 'InvalidRequest') + assert status == 400 + assert error_code == 'InvalidRequest' -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='Test delete object with legal hold on') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_delete_object_with_legal_hold_on(): bucket_name = get_new_bucket_name() client = get_client() @@ -13828,17 +11871,12 @@ def test_object_lock_delete_object_with_legal_hold_on(): client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'}) e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId']) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'}) -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='Test delete object with legal hold off') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_delete_object_with_legal_hold_off(): bucket_name = get_new_bucket_name() client = get_client() @@ -13847,15 +11885,10 @@ def test_object_lock_delete_object_with_legal_hold_off(): response = client.put_object(Bucket=bucket_name, Body='abc', Key=key) client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'}) response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId']) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test get object metadata') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_get_obj_metadata(): bucket_name = get_new_bucket_name() client = get_client() @@ -13867,20 +11900,15 @@ def test_object_lock_get_obj_metadata(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)} client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention) response = client.head_object(Bucket=bucket_name, Key=key) - eq(response['ObjectLockMode'], retention['Mode']) - eq(response['ObjectLockRetainUntilDate'], retention['RetainUntilDate']) - eq(response['ObjectLockLegalHoldStatus'], legal_hold['Status']) + assert response['ObjectLockMode'] == retention['Mode'] + assert response['ObjectLockRetainUntilDate'] == retention['RetainUntilDate'] + assert response['ObjectLockLegalHoldStatus'] == legal_hold['Status'] client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'}) client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='Test put legal hold and retention when uploading object') -@attr(assertion='success') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_uploading_obj(): bucket_name = get_new_bucket_name() client = get_client() @@ -13890,18 +11918,13 @@ def test_object_lock_uploading_obj(): ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON') response = client.head_object(Bucket=bucket_name, Key=key) - eq(response['ObjectLockMode'], 'GOVERNANCE') - eq(response['ObjectLockRetainUntilDate'], datetime.datetime(2030,1,1,tzinfo=pytz.UTC)) - eq(response['ObjectLockLegalHoldStatus'], 'ON') + assert response['ObjectLockMode'] == 'GOVERNANCE' + assert response['ObjectLockRetainUntilDate'] == datetime.datetime(2030,1,1,tzinfo=pytz.UTC) + assert response['ObjectLockLegalHoldStatus'] == 'ON' client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'}) client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test changing object retention mode from GOVERNANCE to COMPLIANCE with bypass') -@attr(assertion='succeeds') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_changing_mode_from_governance_with_bypass(): bucket_name = get_new_bucket_name() key = 'file1' @@ -13915,12 +11938,7 @@ def test_object_lock_changing_mode_from_governance_with_bypass(): retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until} client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test changing object retention mode from GOVERNANCE to COMPLIANCE without bypass') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_changing_mode_from_governance_without_bypass(): bucket_name = get_new_bucket_name() key = 'file1' @@ -13934,15 +11952,10 @@ def test_object_lock_changing_mode_from_governance_without_bypass(): retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until} e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@attr(resource='object') -@attr(method='put') -@attr(operation='Test changing object retention mode from COMPLIANCE to GOVERNANCE') -@attr(assertion='fails') -@attr('object-lock') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_object_lock_changing_mode_from_compliance(): bucket_name = get_new_bucket_name() key = 'file1' @@ -13956,14 +11969,10 @@ def test_object_lock_changing_mode_from_compliance(): retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':retain_until} e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention) status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) - eq(error_code, 'AccessDenied') + assert status == 403 + assert error_code == 'AccessDenied' -@attr(resource='object') -@attr(method='copy') -@attr(operation='copy w/ x-amz-copy-source-if-match: the latest ETag') -@attr(assertion='succeeds') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_copy_object_ifmatch_good(): bucket_name = get_new_bucket() client = get_client() @@ -13972,14 +11981,10 @@ def test_copy_object_ifmatch_good(): client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar') response = client.get_object(Bucket=bucket_name, Key='bar') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='copy') -@attr(operation='copy w/ x-amz-copy-source-if-match: bogus ETag') -@attr(assertion='fails 412') # TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_copy_object_ifmatch_failed(): bucket_name = get_new_bucket() client = get_client() @@ -13987,15 +11992,11 @@ def test_copy_object_ifmatch_failed(): e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' -@attr(resource='object') -@attr(method='copy') -@attr(operation='copy w/ x-amz-copy-source-if-none-match: the latest ETag') -@attr(assertion='fails 412') # TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_copy_object_ifnonematch_good(): bucket_name = get_new_bucket() client = get_client() @@ -14003,14 +12004,10 @@ def test_copy_object_ifnonematch_good(): e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar') status, error_code = _get_status_and_error_code(e.response) - eq(status, 412) - eq(error_code, 'PreconditionFailed') + assert status == 412 + assert error_code == 'PreconditionFailed' -@attr(resource='object') -@attr(method='copy') -@attr(operation='copy w/ x-amz-copy-source-if-none-match: bogus ETag') -@attr(assertion='succeeds') -@attr('fails_on_dbstore') +@pytest.mark.fails_on_dbstore def test_copy_object_ifnonematch_failed(): bucket_name = get_new_bucket() client = get_client() @@ -14019,27 +12016,18 @@ def test_copy_object_ifnonematch_failed(): client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar') response = client.get_object(Bucket=bucket_name, Key='bar') body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='get') -@attr(operation='read to invalid key') -@attr(assertion='fails 400') # TODO: results in a 404 instead of 400 on the RGW -@attr('fails_on_rgw') +@pytest.mark.fails_on_rgw def test_object_read_unreadable(): bucket_name = get_new_bucket() client = get_client() e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='\xae\x8a-') status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) - eq(e.response['Error']['Message'], 'Couldn\'t parse the specified URI.') + assert status == 400 + assert e.response['Error']['Message'] == 'Couldn\'t parse the specified URI.' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='Test User Policy') -@attr(assertion='succeeds') -@attr('user-policy') def test_user_policy(): client = get_tenant_iam_client() @@ -14057,56 +12045,33 @@ def test_user_policy(): ) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket policy status on a new bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_get_bucket_policy_status(): bucket_name = get_new_bucket() client = get_client() resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],False) + assert resp['PolicyStatus']['IsPublic'] == False -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket policy status on a public acl bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_get_public_acl_bucket_policy_status(): bucket_name = get_new_bucket() client = get_client() - client = get_client() client.put_bucket_acl(Bucket=bucket_name, ACL='public-read') resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],True) + assert resp['PolicyStatus']['IsPublic'] == True -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket policy status on a authenticated acl bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_get_authpublic_acl_bucket_policy_status(): bucket_name = get_new_bucket() client = get_client() - client = get_client() client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read') resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],True) + assert resp['PolicyStatus']['IsPublic'] == True -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket policy status on a public policy bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_get_publicpolicy_acl_bucket_policy_status(): bucket_name = get_new_bucket() client = get_client() - client = get_client() resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],False) + assert resp['PolicyStatus']['IsPublic'] == False resource1 = "arn:aws:s3:::" + bucket_name resource2 = "arn:aws:s3:::" + bucket_name + "/*" @@ -14126,21 +12091,15 @@ def test_get_publicpolicy_acl_bucket_policy_status(): client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document) resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],True) + assert resp['PolicyStatus']['IsPublic'] == True -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket policy status on a public policy bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_get_nonpublicpolicy_acl_bucket_policy_status(): bucket_name = get_new_bucket() client = get_client() - client = get_client() resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],False) + assert resp['PolicyStatus']['IsPublic'] == False resource1 = "arn:aws:s3:::" + bucket_name resource2 = "arn:aws:s3:::" + bucket_name + "/*" @@ -14164,20 +12123,15 @@ def test_get_nonpublicpolicy_acl_bucket_policy_status(): client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document) resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],False) + assert resp['PolicyStatus']['IsPublic'] == False -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket policy status on a public policy bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_get_nonpublicpolicy_deny_bucket_policy_status(): bucket_name = get_new_bucket() client = get_client() resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],False) + assert resp['PolicyStatus']['IsPublic'] == False resource1 = "arn:aws:s3:::" + bucket_name resource2 = "arn:aws:s3:::" + bucket_name + "/*" @@ -14197,29 +12151,19 @@ def test_get_nonpublicpolicy_deny_bucket_policy_status(): client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document) resp = client.get_bucket_policy_status(Bucket=bucket_name) - eq(resp['PolicyStatus']['IsPublic'],True) + assert resp['PolicyStatus']['IsPublic'] == True -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get public access block on a bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_get_default_public_block(): #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'})) bucket_name = get_new_bucket() client = get_client() resp = client.get_public_access_block(Bucket=bucket_name) - eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], False) - eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], False) - eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], False) - eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], False) + assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == False + assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == False + assert resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'] == False + assert resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'] == False -@attr(resource='bucket') -@attr(method='put') -@attr(operation='get public access block on a bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_put_public_block(): #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'})) bucket_name = get_new_bucket() @@ -14233,17 +12177,12 @@ def test_put_public_block(): client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf) resp = client.get_public_access_block(Bucket=bucket_name) - eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls']) - eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy']) - eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], access_conf['IgnorePublicAcls']) - eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], access_conf['RestrictPublicBuckets']) + assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls'] + assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy'] + assert resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'] == access_conf['IgnorePublicAcls'] + assert resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'] == access_conf['RestrictPublicBuckets'] -@attr(resource='bucket') -@attr(method='put') -@attr(operation='get public access block on a bucket') -@attr(assertion='succeeds') -@attr('policy_status') def test_block_public_put_bucket_acls(): #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'})) bucket_name = get_new_bucket() @@ -14257,27 +12196,22 @@ def test_block_public_put_bucket_acls(): client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf) resp = client.get_public_access_block(Bucket=bucket_name) - eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls']) - eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy']) + assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls'] + assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy'] e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read-write') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='authenticated-read') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='block public acls on canned acls') -@attr(assertion='succeeds') -@attr('policy_status') def test_block_public_object_canned_acls(): bucket_name = get_new_bucket() client = get_client() @@ -14290,28 +12224,23 @@ def test_block_public_object_canned_acls(): client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf) # resp = client.get_public_access_block(Bucket=bucket_name) - # eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls']) - # eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy']) + # assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls'] + # assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy'] #FIXME: use empty body until #42208 e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo1', Body='', ACL='public-read') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo2', Body='', ACL='public-read') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo3', Body='', ACL='authenticated-read') status, error_code = _get_status_and_error_code(e.response) - eq(status, 403) + assert status == 403 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='block public acls on canned acls') -@attr(assertion='succeeds') -@attr('policy_status') def test_block_public_policy(): bucket_name = get_new_bucket() client = get_client() @@ -14329,11 +12258,6 @@ def test_block_public_policy(): check_access_denied(client.put_bucket_policy, Bucket=bucket_name, Policy=policy_document) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='ignore public acls on canned acls') -@attr(assertion='succeeds') -@attr('policy_status') def test_ignore_public_acls(): bucket_name = get_new_bucket() client = get_client() @@ -14345,7 +12269,7 @@ def test_ignore_public_acls(): client.put_object(Bucket=bucket_name,Key='key1',Body='abcde',ACL='public-read') resp=alt_client.get_object(Bucket=bucket_name, Key='key1') - eq(_get_body(resp), 'abcde') + assert _get_body(resp) == 'abcde' access_conf = {'BlockPublicAcls': False, 'IgnorePublicAcls': True, @@ -14361,11 +12285,6 @@ def test_ignore_public_acls(): check_access_denied(alt_client.get_object, Bucket=bucket_name, Key='key1') -@attr(resource='bucket') -@attr(method='put') -@attr(operation='multipart upload on a bucket with a policy') -@attr(assertion='succeeds') -@attr('policy_status') def test_multipart_upload_on_a_bucket_with_policy(): bucket_name = get_new_bucket() client = get_client() @@ -14389,7 +12308,7 @@ def test_multipart_upload_on_a_bucket_with_policy(): client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document) (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client) response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts}) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 def _put_bucket_encryption_s3(client, bucket_name): """ @@ -14405,7 +12324,7 @@ def _put_bucket_encryption_s3(client, bucket_name): ] } response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 def _put_bucket_encryption_kms(client, bucket_name): """ @@ -14425,35 +12344,23 @@ def _put_bucket_encryption_kms(client, bucket_name): ] } response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 -@attr(resource='bucket') -@attr(method='put') -@attr(operation='put bucket encryption on bucket - s3') -@attr(assertion='succeeds') -@attr('sse-s3') +@pytest.mark.sse_s3 def test_put_bucket_encryption_s3(): bucket_name = get_new_bucket() client = get_client() _put_bucket_encryption_s3(client, bucket_name) -@attr(resource='bucket') -@attr(method='put') -@attr(operation='put bucket encryption on bucket - kms') -@attr(assertion='succeeds') -@attr('encryption') +@pytest.mark.encryption def test_put_bucket_encryption_kms(): bucket_name = get_new_bucket() client = get_client() _put_bucket_encryption_kms(client, bucket_name) -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket encryption on bucket - s3') -@attr(assertion='succeeds') -@attr('sse-s3') +@pytest.mark.sse_s3 def test_get_bucket_encryption_s3(): bucket_name = get_new_bucket() client = get_client() @@ -14464,20 +12371,16 @@ def test_get_bucket_encryption_s3(): except ClientError as e: response_code = e.response['Error']['Code'] - eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError') + assert response_code == 'ServerSideEncryptionConfigurationNotFoundError' _put_bucket_encryption_s3(client, bucket_name) response = client.get_bucket_encryption(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'], 'AES256') + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] == 'AES256' -@attr(resource='bucket') -@attr(method='get') -@attr(operation='get bucket encryption on bucket - kms') -@attr(assertion='succeeds') -@attr('encryption') +@pytest.mark.encryption def test_get_bucket_encryption_kms(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: @@ -14491,32 +12394,28 @@ def test_get_bucket_encryption_kms(): except ClientError as e: response_code = e.response['Error']['Code'] - eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError') + assert response_code == 'ServerSideEncryptionConfigurationNotFoundError' _put_bucket_encryption_kms(client, bucket_name) response = client.get_bucket_encryption(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 200) - eq(response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'], 'aws:kms') - eq(response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID'], kms_keyid) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] == 'aws:kms' + assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID'] == kms_keyid -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='delete bucket encryption on bucket - s3') -@attr(assertion='succeeds') -@attr('sse-s3') +@pytest.mark.sse_s3 def test_delete_bucket_encryption_s3(): bucket_name = get_new_bucket() client = get_client() response = client.delete_bucket_encryption(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 _put_bucket_encryption_s3(client, bucket_name) response = client.delete_bucket_encryption(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response_code = "" try: @@ -14524,25 +12423,21 @@ def test_delete_bucket_encryption_s3(): except ClientError as e: response_code = e.response['Error']['Code'] - eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError') + assert response_code == 'ServerSideEncryptionConfigurationNotFoundError' -@attr(resource='bucket') -@attr(method='delete') -@attr(operation='delete bucket encryption on bucket - kms') -@attr(assertion='succeeds') -@attr('encryption') +@pytest.mark.encryption def test_delete_bucket_encryption_kms(): bucket_name = get_new_bucket() client = get_client() response = client.delete_bucket_encryption(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 _put_bucket_encryption_kms(client, bucket_name) response = client.delete_bucket_encryption(Bucket=bucket_name) - eq(response['ResponseMetadata']['HTTPStatusCode'], 204) + assert response['ResponseMetadata']['HTTPStatusCode'] == 204 response_code = "" try: @@ -14550,7 +12445,7 @@ def test_delete_bucket_encryption_kms(): except ClientError as e: response_code = e.response['Error']['Code'] - eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError') + assert response_code == 'ServerSideEncryptionConfigurationNotFoundError' def _test_sse_s3_default_upload(file_size): """ @@ -14564,54 +12459,38 @@ def _test_sse_s3_default_upload(file_size): data = 'A'*file_size response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data) - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256') + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256' response = client.get_object(Bucket=bucket_name, Key='testobj') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256') + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256' body = _get_body(response) - eq(body, data) + assert body == data -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1 byte upload to SSE-S3 default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_default_upload_1b(): _test_sse_s3_default_upload(1) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1KB upload to SSE-S3 default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_default_upload_1kb(): _test_sse_s3_default_upload(1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1MB upload to SSE-S3 default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_default_upload_1mb(): _test_sse_s3_default_upload(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 8MB upload to SSE-S3 default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_default_upload_8mb(): _test_sse_s3_default_upload(8*1024*1024) @@ -14623,76 +12502,56 @@ def _test_sse_kms_default_upload(file_size): """ kms_keyid = get_main_kms_keyid() if kms_keyid is None: - raise SkipTest + pytest.skip('[s3 main] section missing kms_keyid') bucket_name = get_new_bucket() client = get_client() _put_bucket_encryption_kms(client, bucket_name) data = 'A'*file_size response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data) - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid) + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms' + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid response = client.get_object(Bucket=bucket_name, Key='testobj') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid) + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms' + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid body = _get_body(response) - eq(body, data) + assert body == data -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1 byte upload to SSE-KMS default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_kms_default_upload_1b(): _test_sse_kms_default_upload(1) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1KB upload to SSE-KMS default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_kms_default_upload_1kb(): _test_sse_kms_default_upload(1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1MB upload to SSE-KMS default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_kms_default_upload_1mb(): _test_sse_kms_default_upload(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 8MB upload to SSE-KMS default-encrypted bucket') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_kms_default_upload_8mb(): _test_sse_kms_default_upload(8*1024*1024) -@attr(resource='object') -@attr(method='head') -@attr(operation='Test head operation on SSE-S3 default-encrypted object') -@attr(assertion='success') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_default_method_head(): bucket_name = get_new_bucket() client = get_client() @@ -14703,7 +12562,7 @@ def test_sse_s3_default_method_head(): client.put_object(Bucket=bucket_name, Key=key, Body=data) response = client.head_object(Bucket=bucket_name, Key=key) - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256') + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256' sse_s3_headers = { 'x-amz-server-side-encryption': 'AES256', @@ -14712,16 +12571,12 @@ def test_sse_s3_default_method_head(): client.meta.events.register('before-call.s3.HeadObject', lf) e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key) status, error_code = _get_status_and_error_code(e.response) - eq(status, 400) + assert status == 400 -@attr(resource='object') -@attr(method='put') -@attr(operation='complete SSE-S3 multi-part upload') -@attr(assertion='successful') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_default_multipart_upload(): bucket_name = get_new_bucket() client = get_client() @@ -14745,34 +12600,30 @@ def test_sse_s3_default_multipart_upload(): response = client.head_bucket(Bucket=bucket_name) rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1)) - eq(rgw_object_count, 1) + assert rgw_object_count == 1 rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen)) - eq(rgw_bytes_used, objlen) + assert rgw_bytes_used == objlen lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers)) client.meta.events.register('before-call.s3.UploadPart', lf) response = client.get_object(Bucket=bucket_name, Key=key) - eq(response['Metadata'], metadata) - eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type) + assert response['Metadata'] == metadata + assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type body = _get_body(response) - eq(body, data) + assert body == data size = response['ContentLength'] - eq(len(body), size) + assert len(body) == size _check_content_using_range(key, bucket_name, data, 1000000) _check_content_using_range(key, bucket_name, data, 10000000) -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated SSE-S3 browser based upload via POST request') -@attr(assertion='succeeds and returns written data') -@attr('encryption') -@attr('bucket-encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_default_post_object_authenticated_request(): bucket_name = get_new_bucket() client = get_client() @@ -14808,25 +12659,20 @@ def test_sse_s3_default_post_object_authenticated_request(): ('file', ('bar'))]) r = requests.post(url, files = payload) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256') + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256' body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' -@attr(resource='object') -@attr(method='post') -@attr(operation='authenticated SSE-kMS browser based upload via POST request') -@attr(assertion='succeeds and returns written data') -@attr('encryption') -@attr('bucket-encryption') -@attr('encryption') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.bucket_encryption +@pytest.mark.fails_on_dbstore def test_sse_kms_default_post_object_authenticated_request(): kms_keyid = get_main_kms_keyid() if kms_keyid is None: - raise SkipTest + pytest.skip('[s3 main] section missing kms_keyid') bucket_name = get_new_bucket() client = get_client() _put_bucket_encryption_kms(client, bucket_name) @@ -14861,13 +12707,13 @@ def test_sse_kms_default_post_object_authenticated_request(): ('file', ('bar'))]) r = requests.post(url, files = payload) - eq(r.status_code, 204) + assert r.status_code == 204 response = client.get_object(Bucket=bucket_name, Key='foo.txt') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid) + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms' + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid body = _get_body(response) - eq(body, 'bar') + assert body == 'bar' def _test_sse_s3_encrypted_upload(file_size): @@ -14879,49 +12725,33 @@ def _test_sse_s3_encrypted_upload(file_size): data = 'A'*file_size response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data, ServerSideEncryption='AES256') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256') + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256' response = client.get_object(Bucket=bucket_name, Key='testobj') - eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256') + assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256' body = _get_body(response) - eq(body, data) + assert body == data -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1 byte upload with SSE-S3 encryption') -@attr(assertion='success') -@attr('encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_encrypted_upload_1b(): _test_sse_s3_encrypted_upload(1) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1Kb upload with SSE-S3 encryption') -@attr(assertion='success') -@attr('encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_encrypted_upload_1kb(): _test_sse_s3_encrypted_upload(1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 1MB upload with SSE-S3 encryption') -@attr(assertion='success') -@attr('encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_encrypted_upload_1mb(): _test_sse_s3_encrypted_upload(1024*1024) -@attr(resource='object') -@attr(method='put') -@attr(operation='Test 8MB upload with SSE-S3 encryption') -@attr(assertion='success') -@attr('encryption') -@attr('sse-s3') -@attr('fails_on_dbstore') +@pytest.mark.encryption +@pytest.mark.sse_s3 +@pytest.mark.fails_on_dbstore def test_sse_s3_encrypted_upload_8mb(): _test_sse_s3_encrypted_upload(8*1024*1024) diff --git a/s3tests_boto3/functional/test_s3select.py b/s3tests_boto3/functional/test_s3select.py index 85d7c39..feb6720 100644 --- a/s3tests_boto3/functional/test_s3select.py +++ b/s3tests_boto3/functional/test_s3select.py @@ -1,14 +1,14 @@ -import nose +import pytest import random import string import re -from nose.plugins.attrib import attr from botocore.exceptions import ClientError import uuid -from nose.tools import eq_ as eq from . import ( + configfile, + setup_teardown, get_client ) @@ -76,12 +76,12 @@ def generate_s3select_expression_projection(bucket_name,obj_name): # both results should be close (epsilon) assert( abs(float(res.split("\n")[1]) - eval(e)) < epsilon ) -@attr('s3select') +@pytest.mark.s3select def get_random_string(): return uuid.uuid4().hex[:6].upper() -@attr('s3select') +@pytest.mark.s3select def test_generate_where_clause(): # create small csv file for testing the random expressions @@ -93,7 +93,7 @@ def test_generate_where_clause(): for _ in range(100): generate_s3select_where_clause(bucket_name,obj_name) -@attr('s3select') +@pytest.mark.s3select def test_generate_projection(): # create small csv file for testing the random expressions @@ -114,8 +114,7 @@ def s3select_assert_result(a,b): else: assert a != "" assert b != "" - - nose.tools.assert_equal(a,b) + assert a == b def create_csv_object_for_datetime(rows,columns): result = "" @@ -219,7 +218,7 @@ def upload_csv_object(bucket_name,new_key,obj): # validate uploaded object c2 = get_client() response = c2.get_object(Bucket=bucket_name, Key=new_key) - eq(response['Body'].read().decode('utf-8'), obj, 's3select error[ downloaded object not equal to uploaded objecy') + assert response['Body'].read().decode('utf-8') == obj, 's3select error[ downloaded object not equal to uploaded objecy' def run_s3select(bucket,key,query,column_delim=",",row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE", progress = False): @@ -291,7 +290,7 @@ def remove_xml_tags_from_result(obj): x = bool(re.search("^failure.*$", result_strip)) if x: logging.info(result) - nose.tools.assert_equal(x, False) + assert x == False return result @@ -309,7 +308,7 @@ def create_list_of_int(column_pos,obj,field_split=",",row_split="\n"): return list_of_int -@attr('s3select') +@pytest.mark.s3select def test_count_operation(): csv_obj_name = get_random_string() bucket_name = "test" @@ -320,7 +319,7 @@ def test_count_operation(): s3select_assert_result( num_of_rows, int( res )) -@attr('s3select') +@pytest.mark.s3select def test_column_sum_min_max(): csv_obj = create_random_csv_object(10000,10) @@ -385,7 +384,7 @@ def test_column_sum_min_max(): s3select_assert_result( int(count)*4 , int(sum1)-int(sum2) ) -@attr('s3select') +@pytest.mark.s3select def test_nullif_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -440,7 +439,7 @@ def test_nullif_expressions(): s3select_assert_result( res_s3select_nullif, res_s3select) -@attr('s3select') +@pytest.mark.s3select def test_nulliftrue_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -467,7 +466,7 @@ def test_nulliftrue_expressions(): s3select_assert_result( res_s3select_nullif, res_s3select) -@attr('s3select') +@pytest.mark.s3select def test_is_not_null_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -488,7 +487,7 @@ def test_is_not_null_expressions(): s3select_assert_result( res_s3select_null, res_s3select) -@attr('s3select') +@pytest.mark.s3select def test_lowerupper_expressions(): csv_obj = create_random_csv_object(1,10) @@ -505,7 +504,7 @@ def test_lowerupper_expressions(): s3select_assert_result( res_s3select, "AB12CD$$") -@attr('s3select') +@pytest.mark.s3select def test_in_expressions(): # purpose of test: engine is process correctly several projections containing aggregation-functions @@ -575,7 +574,7 @@ def test_in_expressions(): s3select_assert_result( res_s3select_in, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_true_false_in_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -620,7 +619,7 @@ def test_true_false_in_expressions(): s3select_assert_result( res_s3select_in, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_like_expressions(): csv_obj = create_random_csv_object_string(1000,10) @@ -707,7 +706,7 @@ def test_like_expressions(): s3select_assert_result( res_s3select_like, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_truefalselike_expressions(): csv_obj = create_random_csv_object_string(1000,10) @@ -752,7 +751,7 @@ def test_truefalselike_expressions(): s3select_assert_result( res_s3select_like, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_nullif_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -765,21 +764,21 @@ def test_nullif_expressions(): res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 = _2 ;") ).replace("\n","") - nose.tools.assert_equal( res_s3select_nullif, res_s3select) + assert res_s3select_nullif == res_s3select res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where not nullif(_1,_2) is null ;") ).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2 ;") ).replace("\n","") - nose.tools.assert_equal( res_s3select_nullif, res_s3select) + assert res_s3select_nullif == res_s3select res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where nullif(_1,_2) = _1 ;") ).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2 ;") ).replace("\n","") - nose.tools.assert_equal( res_s3select_nullif, res_s3select) + assert res_s3select_nullif == res_s3select -@attr('s3select') +@pytest.mark.s3select def test_lowerupper_expressions(): csv_obj = create_random_csv_object(1,10) @@ -790,13 +789,13 @@ def test_lowerupper_expressions(): res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from stdin ;') ).replace("\n","") - nose.tools.assert_equal( res_s3select, "ab12cd$$") + assert res_s3select == "ab12cd$$" res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from stdin ;') ).replace("\n","") - nose.tools.assert_equal( res_s3select, "AB12CD$$") + assert res_s3select == "AB12CD$$" -@attr('s3select') +@pytest.mark.s3select def test_in_expressions(): # purpose of test: engine is process correctly several projections containing aggregation-functions @@ -810,33 +809,33 @@ def test_in_expressions(): res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) = 1;')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1,0);')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) = 1 or int(_1) = 0;')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) in(1,0,2);')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where character_length(_1) = 2 and substring(_1,2,1) in ("3");')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where _1 like "_3";')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select -@attr('s3select') +@pytest.mark.s3select def test_like_expressions(): csv_obj = create_random_csv_object_string(10000,10) @@ -849,40 +848,40 @@ def test_like_expressions(): res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_1,11,4) = "aeio" ;')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "cbcd%";')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_1,1,4) = "cbcd";')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "%y[y-z]";')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_3,character_length(_3),1) between "y" and "z" and substring(_3,character_length(_3)-1,1) = "y";')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%yz";')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_2,character_length(_2),1) = "z" and substring(_2,character_length(_2)-1,1) = "y";')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "c%z";')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_3,character_length(_3),1) = "z" and substring(_3,1,1) = "c";')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%xy_";')).replace("\n","") res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_2,character_length(_2)-1,1) = "y" and substring(_2,character_length(_2)-2,1) = "x";')).replace("\n","") - nose.tools.assert_equal( res_s3select_in, res_s3select ) + assert res_s3select_in == res_s3select -@attr('s3select') +@pytest.mark.s3select def test_complex_expressions(): # purpose of test: engine is process correctly several projections containing aggregation-functions @@ -914,7 +913,7 @@ def test_complex_expressions(): s3select_assert_result( res_s3select_between_numbers, res_s3select_eq_modolu) -@attr('s3select') +@pytest.mark.s3select def test_alias(): # purpose: test is comparing result of exactly the same queries , one with alias the other without. @@ -935,7 +934,7 @@ def test_alias(): s3select_assert_result( res_s3select_alias, res_s3select_no_alias) -@attr('s3select') +@pytest.mark.s3select def test_alias_cyclic_refernce(): number_of_rows = 10000 @@ -953,7 +952,7 @@ def test_alias_cyclic_refernce(): assert int(find_res) >= 0 -@attr('s3select') +@pytest.mark.s3select def test_datetime(): # purpose of test is to validate date-time functionality is correct, @@ -984,7 +983,7 @@ def test_datetime(): s3select_assert_result( res_s3select_date_time_to_timestamp, res_s3select_substring) -@attr('s3select') +@pytest.mark.s3select def test_true_false_datetime(): # purpose of test is to validate date-time functionality is correct, @@ -1018,7 +1017,7 @@ def test_true_false_datetime(): s3select_assert_result( res_s3select_date_time_utcnow, res_s3select_count) -@attr('s3select') +@pytest.mark.s3select def test_csv_parser(): # purpuse: test default csv values(, \n " \ ), return value may contain meta-char @@ -1058,7 +1057,7 @@ def test_csv_parser(): res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _9 from s3object;") ).replace("\n","") s3select_assert_result( res_s3select_alias, 'null') -@attr('s3select') +@pytest.mark.s3select def test_csv_definition(): number_of_rows = 10000 @@ -1088,7 +1087,7 @@ def test_csv_definition(): s3select_assert_result( res_s3select, __res ) -@attr('s3select') +@pytest.mark.s3select def test_schema_definition(): number_of_rows = 10000 @@ -1123,7 +1122,7 @@ def test_schema_definition(): assert ((res_multiple_defintion.find("multiple definition of column {c4} as schema-column and alias")) >= 0) -@attr('s3select') +@pytest.mark.s3select def test_when_then_else_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -1152,7 +1151,7 @@ def test_when_then_else_expressions(): s3select_assert_result( str(count3) , res2) -@attr('s3select') +@pytest.mark.s3select def test_coalesce_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -1174,7 +1173,7 @@ def test_coalesce_expressions(): s3select_assert_result( res_s3select, res_coalesce) -@attr('s3select') +@pytest.mark.s3select def test_cast_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -1195,7 +1194,7 @@ def test_cast_expressions(): s3select_assert_result( res_s3select, res) -@attr('s3select') +@pytest.mark.s3select def test_version(): return @@ -1213,7 +1212,7 @@ def test_version(): s3select_assert_result( res_version, "41.a," ) -@attr('s3select') +@pytest.mark.s3select def test_trim_expressions(): csv_obj = create_random_csv_object_trim(10000,10) @@ -1252,7 +1251,7 @@ def test_trim_expressions(): s3select_assert_result( res_s3select_trim, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_truefalse_trim_expressions(): csv_obj = create_random_csv_object_trim(10000,10) @@ -1291,7 +1290,7 @@ def test_truefalse_trim_expressions(): s3select_assert_result( res_s3select_trim, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_escape_expressions(): csv_obj = create_random_csv_object_escape(10000,10) @@ -1312,7 +1311,7 @@ def test_escape_expressions(): s3select_assert_result( res_s3select_escape, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_case_value_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -1327,7 +1326,7 @@ def test_case_value_expressions(): s3select_assert_result( res_s3select_case, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_bool_cast_expressions(): csv_obj = create_random_csv_object(10000,10) @@ -1342,7 +1341,7 @@ def test_bool_cast_expressions(): s3select_assert_result( res_s3select_cast, res_s3select ) -@attr('s3select') +@pytest.mark.s3select def test_progress_expressions(): csv_obj = create_random_csv_object(1000000,10) @@ -1369,7 +1368,7 @@ def test_progress_expressions(): # end response s3select_assert_result({}, res_s3select_response[total_response-1]) -@attr('s3select') +@pytest.mark.s3select def test_output_serial_expressions(): return # TODO fix test diff --git a/s3tests_boto3/functional/test_sts.py b/s3tests_boto3/functional/test_sts.py index dce1712..0229dbd 100644 --- a/s3tests_boto3/functional/test_sts.py +++ b/s3tests_boto3/functional/test_sts.py @@ -2,9 +2,7 @@ import boto3 import botocore.session from botocore.exceptions import ClientError from botocore.exceptions import ParamValidationError -from nose.tools import eq_ as eq -from nose.plugins.attrib import attr -from nose.plugins.skip import SkipTest +import pytest import isodate import email.utils import datetime @@ -20,7 +18,6 @@ import hashlib import xml.etree.ElementTree as ET import time import operator -import nose import os import string import random @@ -32,6 +29,8 @@ from collections import namedtuple from email.header import decode_header from . import( + configfile, + setup_teardown, get_iam_client, get_sts_client, get_client, @@ -149,12 +148,8 @@ def get_s3_resource_using_iam_creds(): return s3_res_iam_creds -@attr(resource='get session token') -@attr(method='get') -@attr(operation='check') -@attr(assertion='s3 ops only accessible by temporary credentials') -@attr('test_of_sts') -@attr('fails_on_dbstore') +@pytest.mark.test_of_sts +@pytest.mark.fails_on_dbstore def test_get_session_token(): iam_client=get_iam_client() sts_client=get_sts_client() @@ -163,10 +158,10 @@ def test_get_session_token(): user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}" (resp_err,resp,policy_name)=put_user_policy(iam_client,sts_user_id,None,user_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 response=sts_client.get_session_token() - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client=boto3.client('s3', aws_access_key_id = response['Credentials']['AccessKeyId'], @@ -178,17 +173,13 @@ def test_get_session_token(): bucket_name = get_new_bucket_name() try: s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 finish=s3_client.delete_bucket(Bucket=bucket_name) finally: # clean up user policy even if create_bucket/delete_bucket fails iam_client.delete_user_policy(UserName=sts_user_id,PolicyName=policy_name) -@attr(resource='get session token') -@attr(method='get') -@attr(operation='check') -@attr(assertion='s3 ops denied by permanent credentials') -@attr('test_of_sts') -@attr('fails_on_dbstore') +@pytest.mark.test_of_sts +@pytest.mark.fails_on_dbstore def test_get_session_token_permanent_creds_denied(): s3bucket_error=None iam_client=get_iam_client() @@ -200,10 +191,10 @@ def test_get_session_token_permanent_creds_denied(): user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}" (resp_err,resp,policy_name)=put_user_policy(iam_client,sts_user_id,None,user_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 response=sts_client.get_session_token() - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client=boto3.client('s3', aws_access_key_id = s3_main_access_key, @@ -217,15 +208,11 @@ def test_get_session_token_permanent_creds_denied(): s3bucket = s3_client.create_bucket(Bucket=bucket_name) except ClientError as e: s3bucket_error = e.response.get("Error", {}).get("Code") - eq(s3bucket_error,'AccessDenied') + assert s3bucket_error == 'AccessDenied' iam_client.delete_user_policy(UserName=sts_user_id,PolicyName=policy_name) -@attr(resource='assume role') -@attr(method='get') -@attr(operation='check') -@attr(assertion='role policy allows all s3 ops') -@attr('test_of_sts') -@attr('fails_on_dbstore') +@pytest.mark.test_of_sts +@pytest.mark.fails_on_dbstore def test_assume_role_allow(): iam_client=get_iam_client() sts_client=get_sts_client() @@ -235,14 +222,14 @@ def test_assume_role_allow(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -253,16 +240,12 @@ def test_assume_role_allow(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 -@attr(resource='assume role') -@attr(method='get') -@attr(operation='check') -@attr(assertion='role policy denies all s3 ops') -@attr('test_of_sts') -@attr('fails_on_dbstore') +@pytest.mark.test_of_sts +@pytest.mark.fails_on_dbstore def test_assume_role_deny(): s3bucket_error=None iam_client=get_iam_client() @@ -273,14 +256,14 @@ def test_assume_role_deny(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -294,14 +277,10 @@ def test_assume_role_deny(): s3bucket = s3_client.create_bucket(Bucket=bucket_name) except ClientError as e: s3bucket_error = e.response.get("Error", {}).get("Code") - eq(s3bucket_error,'AccessDenied') + assert s3bucket_error == 'AccessDenied' -@attr(resource='assume role') -@attr(method='get') -@attr(operation='check') -@attr(assertion='creds expire so all s3 ops fails') -@attr('test_of_sts') -@attr('fails_on_dbstore') +@pytest.mark.test_of_sts +@pytest.mark.fails_on_dbstore def test_assume_role_creds_expiry(): iam_client=get_iam_client() sts_client=get_sts_client() @@ -311,14 +290,14 @@ def test_assume_role_creds_expiry(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,DurationSeconds=900) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 time.sleep(900) s3_client = boto3.client('s3', @@ -333,14 +312,10 @@ def test_assume_role_creds_expiry(): s3bucket = s3_client.create_bucket(Bucket=bucket_name) except ClientError as e: s3bucket_error = e.response.get("Error", {}).get("Code") - eq(s3bucket_error,'AccessDenied') + assert s3bucket_error == 'AccessDenied' -@attr(resource='assume role') -@attr(method='head') -@attr(operation='check') -@attr(assertion='HEAD fails with 403 when role policy denies s3:ListBucket') -@attr('test_of_sts') -@attr('fails_on_dbstore') +@pytest.mark.test_of_sts +@pytest.mark.fails_on_dbstore def test_assume_role_deny_head_nonexistent(): # create a bucket with the normal s3 client bucket_name = get_new_bucket_name() @@ -354,15 +329,15 @@ def test_assume_role_deny_head_nonexistent(): policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}' (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name) + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name # allow GetObject but deny ListBucket role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":"s3:GetObject","Principal":"*","Resource":"arn:aws:s3:::*"}}' (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -375,14 +350,10 @@ def test_assume_role_deny_head_nonexistent(): s3_client.head_object(Bucket=bucket_name, Key='nonexistent') except ClientError as e: status = e.response['ResponseMetadata']['HTTPStatusCode'] - eq(status,403) + assert status == 403 -@attr(resource='assume role') -@attr(method='head') -@attr(operation='check') -@attr(assertion='HEAD fails with 404 when role policy allows s3:ListBucket') -@attr('test_of_sts') -@attr('fails_on_dbstore') +@pytest.mark.test_of_sts +@pytest.mark.fails_on_dbstore def test_assume_role_allow_head_nonexistent(): # create a bucket with the normal s3 client bucket_name = get_new_bucket_name() @@ -396,15 +367,15 @@ def test_assume_role_allow_head_nonexistent(): policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}' (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name) + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name # allow GetObject and ListBucket role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":["s3:GetObject","s3:ListBucket"],"Principal":"*","Resource":"arn:aws:s3:::*"}}' (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -417,16 +388,12 @@ def test_assume_role_allow_head_nonexistent(): s3_client.head_object(Bucket=bucket_name, Key='nonexistent') except ClientError as e: status = e.response['ResponseMetadata']['HTTPStatusCode'] - eq(status,404) + assert status == 404 -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role through web token') -@attr('webidentity_test') -@attr('token_claims_trust_policy_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.token_claims_trust_policy_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity(): check_webidentity() iam_client=get_iam_client() @@ -447,14 +414,14 @@ def test_assume_role_with_web_identity(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -465,20 +432,16 @@ def test_assume_role_with_web_identity(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) ''' -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assume_role_with_web_token creds expire') -@attr('webidentity_test') +@pytest.mark.webidentity_test def test_assume_role_with_web_identity_invalid_webtoken(): resp_error=None iam_client=get_iam_client() @@ -499,11 +462,11 @@ def test_assume_role_with_web_identity_invalid_webtoken(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp="" try: @@ -513,7 +476,7 @@ def test_assume_role_with_web_identity_invalid_webtoken(): log.debug('{}'.format(e.response.get("Error", {}).get("Code"))) log.debug('{}'.format(e)) resp_error = e.response.get("Error", {}).get("Code") - eq(resp_error,'AccessDenied') + assert resp_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] @@ -524,13 +487,9 @@ def test_assume_role_with_web_identity_invalid_webtoken(): # Session Policy Tests ####################### -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='checking session policy working for two different buckets') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_check_on_different_buckets(): check_webidentity() iam_client=get_iam_client() @@ -550,17 +509,17 @@ def test_session_policy_check_on_different_buckets(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::test2\",\"arn:aws:s3:::test2/*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -575,14 +534,14 @@ def test_session_policy_check_on_different_buckets(): s3bucket = s3_client.create_bucket(Bucket=bucket_name_1) except ClientError as e: s3bucket_error = e.response.get("Error", {}).get("Code") - eq(s3bucket_error, 'AccessDenied') + assert s3bucket_error == 'AccessDenied' bucket_name_2 = 'test2' try: s3bucket = s3_client.create_bucket(Bucket=bucket_name_2) except ClientError as e: s3bucket_error = e.response.get("Error", {}).get("Code") - eq(s3bucket_error, 'AccessDenied') + assert s3bucket_error == 'AccessDenied' bucket_body = 'please-write-something' #body.encode(encoding='utf_8') @@ -590,20 +549,16 @@ def test_session_policy_check_on_different_buckets(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3_put_obj_error = e.response.get("Error", {}).get("Code") - eq(s3_put_obj_error,'NoSuchBucket') + assert s3_put_obj_error == 'NoSuchBucket' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking session policy working for same bucket') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_check_on_same_bucket(): check_webidentity() iam_client=get_iam_client() @@ -623,23 +578,23 @@ def test_session_policy_check_on_same_bucket(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client_iam_creds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -651,20 +606,16 @@ def test_session_policy_check_on_same_bucket(): bucket_body = 'this is a test file' s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='checking put_obj op denial') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_check_put_obj_denial(): check_webidentity() iam_client=get_iam_client() @@ -686,23 +637,23 @@ def test_session_policy_check_put_obj_denial(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client_iam_creds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -717,20 +668,16 @@ def test_session_policy_check_put_obj_denial(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3_put_obj_error = e.response.get("Error", {}).get("Code") - eq(s3_put_obj_error, 'AccessDenied') + assert s3_put_obj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='checking put_obj working by swapping policies') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_swapping_role_policy_and_session_policy(): check_webidentity() iam_client=get_iam_client() @@ -752,23 +699,23 @@ def test_swapping_role_policy_and_session_policy(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client_iam_creds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -779,19 +726,15 @@ def test_swapping_role_policy_and_session_policy(): ) bucket_body = 'this is a test file' s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking put_obj working by setting different permissions to role and session policy') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_check_different_op_permissions(): check_webidentity() iam_client=get_iam_client() @@ -813,23 +756,23 @@ def test_session_policy_check_different_op_permissions(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client_iam_creds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -844,20 +787,16 @@ def test_session_policy_check_different_op_permissions(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3_put_obj_error = e.response.get("Error", {}).get("Code") - eq(s3_put_obj_error, 'AccessDenied') + assert s3_put_obj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking op behaviour with deny effect') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_check_with_deny_effect(): check_webidentity() iam_client=get_iam_client() @@ -879,23 +818,23 @@ def test_session_policy_check_with_deny_effect(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client_iam_creds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -909,20 +848,16 @@ def test_session_policy_check_with_deny_effect(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3_put_obj_error = e.response.get("Error", {}).get("Code") - eq(s3_put_obj_error, 'AccessDenied') + assert s3_put_obj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking put_obj working with deny and allow on same op') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_check_with_deny_on_same_op(): check_webidentity() iam_client=get_iam_client() @@ -944,23 +879,23 @@ def test_session_policy_check_with_deny_on_same_op(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client_iam_creds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -975,19 +910,15 @@ def test_session_policy_check_with_deny_on_same_op(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3_put_obj_error = e.response.get("Error", {}).get("Code") - eq(s3_put_obj_error, 'AccessDenied') + assert s3_put_obj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking op when bucket policy has role arn') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_bucket_policy_role_arn(): check_webidentity() iam_client=get_iam_client() @@ -1007,16 +938,16 @@ def test_session_policy_bucket_policy_role_arn(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3client_iamcreds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 resource1 = "arn:aws:s3:::" + bucket_name_1 resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*" @@ -1038,7 +969,7 @@ def test_session_policy_bucket_policy_role_arn(): session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1049,25 +980,21 @@ def test_session_policy_bucket_policy_role_arn(): ) bucket_body = 'this is a test file' s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 try: obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3object_error = e.response.get("Error", {}).get("Code") - eq(s3object_error, 'AccessDenied') + assert s3object_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='checking op when bucket policy has session arn') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_bucket_policy_session_arn(): check_webidentity() iam_client=get_iam_client() @@ -1087,16 +1014,16 @@ def test_session_policy_bucket_policy_session_arn(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3client_iamcreds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 resource1 = "arn:aws:s3:::" + bucket_name_1 resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*" @@ -1118,7 +1045,7 @@ def test_session_policy_bucket_policy_session_arn(): session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1129,23 +1056,19 @@ def test_session_policy_bucket_policy_session_arn(): ) bucket_body = 'this is a test file' s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt") - eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking copy object op with role, session and bucket policy') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_copy_object(): check_webidentity() iam_client=get_iam_client() @@ -1165,16 +1088,16 @@ def test_session_policy_copy_object(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3client_iamcreds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 resource1 = "arn:aws:s3:::" + bucket_name_1 resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*" @@ -1197,7 +1120,7 @@ def test_session_policy_copy_object(): session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1208,7 +1131,7 @@ def test_session_policy_copy_object(): ) bucket_body = 'this is a test file' s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 copy_source = { 'Bucket': bucket_name_1, @@ -1218,19 +1141,15 @@ def test_session_policy_copy_object(): s3_client.copy(copy_source, bucket_name_1, "test-2.txt") s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-2.txt") - eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking op is denied when no role policy') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_no_bucket_role_policy(): check_webidentity() iam_client=get_iam_client() @@ -1250,17 +1169,17 @@ def test_session_policy_no_bucket_role_policy(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' s3client_iamcreds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\",\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1274,19 +1193,15 @@ def test_session_policy_no_bucket_role_policy(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3putobj_error = e.response.get("Error", {}).get("Code") - eq(s3putobj_error, 'AccessDenied') + assert s3putobj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='checking op is denied when resource policy denies') -@attr('webidentity_test') -@attr('session_policy') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.session_policy +@pytest.mark.fails_on_dbstore def test_session_policy_bucket_policy_deny(): check_webidentity() iam_client=get_iam_client() @@ -1306,16 +1221,16 @@ def test_session_policy_bucket_policy_deny(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 s3client_iamcreds = get_s3_client_using_iam_creds() bucket_name_1 = 'test1' s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 resource1 = "arn:aws:s3:::" + bucket_name_1 resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*" @@ -1337,7 +1252,7 @@ def test_session_policy_bucket_policy_deny(): session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}" resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1352,19 +1267,15 @@ def test_session_policy_bucket_policy_deny(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt") except ClientError as e: s3putobj_error = e.response.get("Error", {}).get("Code") - eq(s3putobj_error, 'AccessDenied') + assert s3putobj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_arn ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token using sub in trust policy') -@attr('webidentity_test') -@attr('token_claims_trust_policy_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.token_claims_trust_policy_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_with_sub(): check_webidentity() iam_client=get_iam_client() @@ -1385,14 +1296,14 @@ def test_assume_role_with_web_identity_with_sub(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":sub\":\""+sub+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1403,21 +1314,17 @@ def test_assume_role_with_web_identity_with_sub(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token using azp in trust policy') -@attr('webidentity_test') -@attr('token_claims_trust_policy_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.token_claims_trust_policy_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_with_azp(): check_webidentity() iam_client=get_iam_client() @@ -1438,14 +1345,14 @@ def test_assume_role_with_web_identity_with_azp(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":azp\":\""+azp+"\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1456,22 +1363,18 @@ def test_assume_role_with_web_identity_with_azp(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token using aws:RequestTag in trust policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_request_tag_trust_policy_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_request_tag_trust_policy_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_with_request_tag(): check_webidentity() iam_client=get_iam_client() @@ -1491,14 +1394,14 @@ def test_assume_role_with_web_identity_with_request_tag(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1509,22 +1412,18 @@ def test_assume_role_with_web_identity_with_request_tag(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token with aws:PrincipalTag in role policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_principal_tag_role_policy_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_principal_tag_role_policy_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_with_principal_tag(): check_webidentity() iam_client=get_iam_client() @@ -1544,14 +1443,14 @@ def test_assume_role_with_web_identity_with_principal_tag(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:PrincipalTag/Department\":\"Engineering\"}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1562,22 +1461,18 @@ def test_assume_role_with_web_identity_with_principal_tag(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token with aws:PrincipalTag in role policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_principal_tag_role_policy_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_principal_tag_role_policy_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_for_all_values(): check_webidentity() iam_client=get_iam_client() @@ -1597,14 +1492,14 @@ def test_assume_role_with_web_identity_for_all_values(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\",\"Marketing\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1615,22 +1510,18 @@ def test_assume_role_with_web_identity_for_all_values(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token with aws:PrincipalTag in role policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_principal_tag_role_policy_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_principal_tag_role_policy_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_for_all_values_deny(): check_webidentity() iam_client=get_iam_client() @@ -1650,15 +1541,15 @@ def test_assume_role_with_web_identity_for_all_values_deny(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' #ForAllValues: The condition returns true if every key value in the request matches at least one value in the policy role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1672,20 +1563,16 @@ def test_assume_role_with_web_identity_for_all_values_deny(): s3bucket = s3_client.create_bucket(Bucket=bucket_name) except ClientError as e: s3bucket_error = e.response.get("Error", {}).get("Code") - eq(s3bucket_error,'AccessDenied') + assert s3bucket_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token with aws:TagKeys in trust policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_tag_keys_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_tag_keys_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_tag_keys_trust_policy(): check_webidentity() iam_client=get_iam_client() @@ -1705,14 +1592,14 @@ def test_assume_role_with_web_identity_tag_keys_trust_policy(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":\"Department\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAnyValue:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1723,22 +1610,18 @@ def test_assume_role_with_web_identity_tag_keys_trust_policy(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='get') -@attr(operation='check') -@attr(assertion='assuming role using web token with aws:TagKeys in role permission policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_tag_keys_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_tag_keys_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_tag_keys_role_policy(): check_webidentity() iam_client=get_iam_client() @@ -1758,14 +1641,14 @@ def test_assume_role_with_web_identity_tag_keys_role_policy(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":[\"Department\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1776,22 +1659,18 @@ def test_assume_role_with_web_identity_tag_keys_role_policy(): ) bucket_name = get_new_bucket_name() s3bucket = s3_client.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bkt = s3_client.delete_bucket(Bucket=bucket_name) - eq(bkt['ResponseMetadata']['HTTPStatusCode'],204) + assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='assuming role using web token with s3:ResourceTag in role permission policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_resource_tags_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_resource_tags_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_resource_tag(): check_webidentity() iam_client=get_iam_client() @@ -1808,7 +1687,7 @@ def test_assume_role_with_web_identity_resource_tag(): bucket_name = get_new_bucket_name() s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name) Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]}) @@ -1822,14 +1701,14 @@ def test_assume_role_with_web_identity_resource_tag(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1841,20 +1720,16 @@ def test_assume_role_with_web_identity_resource_tag(): bucket_body = 'this is a test file' s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt") - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='assuming role using web token with s3:ResourceTag with missing tags on bucket') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_resource_tags_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_resource_tags_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_resource_tag_deny(): check_webidentity() iam_client=get_iam_client() @@ -1871,7 +1746,7 @@ def test_assume_role_with_web_identity_resource_tag_deny(): bucket_name = get_new_bucket_name() s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_response = iam_client.create_open_id_connect_provider( Url='http://localhost:8080/auth/realms/{}'.format(realm), @@ -1882,14 +1757,14 @@ def test_assume_role_with_web_identity_resource_tag_deny(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1904,20 +1779,16 @@ def test_assume_role_with_web_identity_resource_tag_deny(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt") except ClientError as e: s3_put_obj_error = e.response.get("Error", {}).get("Code") - eq(s3_put_obj_error,'AccessDenied') + assert s3_put_obj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='assuming role using web token with s3:ResourceTag with wrong resource tag in policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_resource_tags_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_resource_tags_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_wrong_resource_tag_deny(): check_webidentity() iam_client=get_iam_client() @@ -1934,7 +1805,7 @@ def test_assume_role_with_web_identity_wrong_resource_tag_deny(): bucket_name = get_new_bucket_name() s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name) Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'WrongResourcetag'}]}) @@ -1948,14 +1819,14 @@ def test_assume_role_with_web_identity_wrong_resource_tag_deny(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -1970,20 +1841,16 @@ def test_assume_role_with_web_identity_wrong_resource_tag_deny(): s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt") except ClientError as e: s3_put_obj_error = e.response.get("Error", {}).get("Code") - eq(s3_put_obj_error,'AccessDenied') + assert s3_put_obj_error == 'AccessDenied' oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='assuming role using web token with s3:ResourceTag matching aws:PrincipalTag in role permission policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_resource_tags_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_resource_tags_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_resource_tag_princ_tag(): check_webidentity() iam_client=get_iam_client() @@ -2000,7 +1867,7 @@ def test_assume_role_with_web_identity_resource_tag_princ_tag(): bucket_name = get_new_bucket_name() s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name) Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]}) @@ -2014,14 +1881,14 @@ def test_assume_role_with_web_identity_resource_tag_princ_tag(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -2035,23 +1902,19 @@ def test_assume_role_with_web_identity_resource_tag_princ_tag(): tags = 'Department=Engineering&Department=Marketing' key = "test-1.txt" s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags) - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key=key) - eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='assuming role using web token with s3:ResourceTag used to test copy object') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_resource_tags_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_resource_tags_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_resource_tag_copy_obj(): check_webidentity() iam_client=get_iam_client() @@ -2069,14 +1932,14 @@ def test_assume_role_with_web_identity_resource_tag_copy_obj(): #create two buckets and add same tags to both bucket_name = get_new_bucket_name() s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name) Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]}) copy_bucket_name = get_new_bucket_name() s3bucket = s3_client_iam_creds.create_bucket(Bucket=copy_bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bucket_tagging = s3_res_iam_creds.BucketTagging(copy_bucket_name) Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]}) @@ -2090,14 +1953,14 @@ def test_assume_role_with_web_identity_resource_tag_copy_obj(): policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}" (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -2111,7 +1974,7 @@ def test_assume_role_with_web_identity_resource_tag_copy_obj(): tags = 'Department=Engineering' key = "test-1.txt" s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags) - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 #copy to same bucket copy_source = { @@ -2122,7 +1985,7 @@ def test_assume_role_with_web_identity_resource_tag_copy_obj(): s3_client.copy(copy_source, bucket_name, "test-2.txt") s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key="test-2.txt") - eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200 #copy to another bucket copy_source = { @@ -2133,20 +1996,16 @@ def test_assume_role_with_web_identity_resource_tag_copy_obj(): s3_client.copy(copy_source, copy_bucket_name, "test-1.txt") s3_get_obj = s3_client.get_object(Bucket=copy_bucket_name, Key="test-1.txt") - eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] ) -@attr(resource='assume role with web identity') -@attr(method='put') -@attr(operation='check') -@attr(assertion='assuming role using web token with iam:ResourceTag in role trust policy') -@attr('webidentity_test') -@attr('abac_test') -@attr('token_role_tags_test') -@attr('fails_on_dbstore') +@pytest.mark.webidentity_test +@pytest.mark.abac_test +@pytest.mark.token_role_tags_test +@pytest.mark.fails_on_dbstore def test_assume_role_with_web_identity_role_resource_tag(): check_webidentity() iam_client=get_iam_client() @@ -2163,7 +2022,7 @@ def test_assume_role_with_web_identity_role_resource_tag(): bucket_name = get_new_bucket_name() s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name) - eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200) + assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200 bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name) Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]}) @@ -2183,14 +2042,14 @@ def test_assume_role_with_web_identity_role_resource_tag(): ] (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None,tags_list) - eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'') + assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+'' role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}" (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy) - eq(response['ResponseMetadata']['HTTPStatusCode'],200) + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token) - eq(resp['ResponseMetadata']['HTTPStatusCode'],200) + assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 s3_client = boto3.client('s3', aws_access_key_id = resp['Credentials']['AccessKeyId'], @@ -2202,7 +2061,7 @@ def test_assume_role_with_web_identity_role_resource_tag(): bucket_body = 'this is a test file' s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt") - eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200) + assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200 oidc_remove=iam_client.delete_open_id_connect_provider( OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"] diff --git a/s3tests_boto3/functional/test_utils.py b/s3tests_boto3/functional/test_utils.py index 59c3c74..c0dd398 100644 --- a/s3tests_boto3/functional/test_utils.py +++ b/s3tests_boto3/functional/test_utils.py @@ -1,11 +1,9 @@ -from nose.tools import eq_ as eq - from . import utils def test_generate(): FIVE_MB = 5 * 1024 * 1024 - eq(len(''.join(utils.generate_random(0))), 0) - eq(len(''.join(utils.generate_random(1))), 1) - eq(len(''.join(utils.generate_random(FIVE_MB - 1))), FIVE_MB - 1) - eq(len(''.join(utils.generate_random(FIVE_MB))), FIVE_MB) - eq(len(''.join(utils.generate_random(FIVE_MB + 1))), FIVE_MB + 1) + assert len(''.join(utils.generate_random(0))) == 0 + assert len(''.join(utils.generate_random(1))) == 1 + assert len(''.join(utils.generate_random(FIVE_MB - 1))) == FIVE_MB - 1 + assert len(''.join(utils.generate_random(FIVE_MB))) == FIVE_MB + assert len(''.join(utils.generate_random(FIVE_MB + 1))) == FIVE_MB + 1 diff --git a/s3tests_boto3/functional/utils.py b/s3tests_boto3/functional/utils.py index 4d9dc49..ab84c16 100644 --- a/s3tests_boto3/functional/utils.py +++ b/s3tests_boto3/functional/utils.py @@ -3,8 +3,6 @@ import requests import string import time -from nose.tools import eq_ as eq - def assert_raises(excClass, callableObj, *args, **kwargs): """ Like unittest.TestCase.assertRaises, but returns the exception. diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..93fde38 --- /dev/null +++ b/tox.ini @@ -0,0 +1,7 @@ +[tox] +envlist = py + +[testenv] +deps = -rrequirements.txt +passenv = S3TEST_CONF S3_USE_SIGV4 +commands = pytest {posargs}