2011-06-07 17:47:27 +00:00
|
|
|
from cStringIO import StringIO
|
2011-04-04 21:45:42 +00:00
|
|
|
import boto.exception
|
|
|
|
import boto.s3.connection
|
2011-07-08 21:38:00 +00:00
|
|
|
import boto.s3.acl
|
2016-08-05 15:51:48 +00:00
|
|
|
import boto.s3.lifecycle
|
2011-04-04 21:45:42 +00:00
|
|
|
import bunch
|
2011-08-11 22:56:41 +00:00
|
|
|
import datetime
|
2013-07-27 03:33:48 +00:00
|
|
|
import time
|
2011-08-11 22:56:41 +00:00
|
|
|
import email.utils
|
|
|
|
import isodate
|
2011-06-10 18:51:18 +00:00
|
|
|
import nose
|
2011-04-18 16:58:10 +00:00
|
|
|
import operator
|
2011-06-28 19:49:20 +00:00
|
|
|
import socket
|
2011-07-14 23:26:46 +00:00
|
|
|
import ssl
|
2012-05-16 20:30:39 +00:00
|
|
|
import os
|
2012-10-15 17:59:10 +00:00
|
|
|
import requests
|
|
|
|
import base64
|
|
|
|
import hmac
|
|
|
|
import sha
|
|
|
|
import pytz
|
|
|
|
import json
|
2015-01-14 00:46:33 +00:00
|
|
|
import httplib2
|
2014-12-24 00:26:47 +00:00
|
|
|
import threading
|
2015-01-09 00:11:23 +00:00
|
|
|
import itertools
|
2015-02-27 19:50:55 +00:00
|
|
|
import string
|
|
|
|
import random
|
2015-09-09 19:28:32 +00:00
|
|
|
import re
|
2012-10-15 17:59:10 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
from collections import defaultdict
|
2011-08-26 20:42:31 +00:00
|
|
|
from urlparse import urlparse
|
2011-08-25 15:07:58 +00:00
|
|
|
|
2011-04-04 21:45:42 +00:00
|
|
|
from nose.tools import eq_ as eq
|
|
|
|
from nose.plugins.attrib import attr
|
2013-07-29 20:17:46 +00:00
|
|
|
from nose.plugins.skip import SkipTest
|
2011-04-04 21:45:42 +00:00
|
|
|
|
2017-10-20 11:42:30 +00:00
|
|
|
import utils
|
2011-07-11 22:24:26 +00:00
|
|
|
from .utils import assert_raises
|
2017-10-19 14:24:30 +00:00
|
|
|
|
2017-11-06 15:47:05 +00:00
|
|
|
from .policy import Policy, Statement, make_json_policy
|
2017-10-19 14:24:30 +00:00
|
|
|
|
2011-07-11 22:24:26 +00:00
|
|
|
from . import (
|
|
|
|
nuke_prefixed_buckets,
|
|
|
|
get_new_bucket,
|
2013-02-07 20:58:32 +00:00
|
|
|
get_new_bucket_name,
|
2011-07-11 22:24:26 +00:00
|
|
|
s3,
|
2013-07-25 21:13:34 +00:00
|
|
|
targets,
|
2011-07-11 22:24:26 +00:00
|
|
|
config,
|
2011-07-13 21:49:07 +00:00
|
|
|
get_prefix,
|
2014-10-03 22:09:27 +00:00
|
|
|
is_slow_backend,
|
2015-06-15 05:18:07 +00:00
|
|
|
_make_request,
|
|
|
|
_make_bucket_request,
|
2019-04-01 04:44:42 +00:00
|
|
|
_make_raw_request,
|
2011-07-11 22:24:26 +00:00
|
|
|
)
|
2011-04-04 21:45:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
def check_access_denied(fn, *args, **kwargs):
|
|
|
|
e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs)
|
|
|
|
eq(e.status, 403)
|
|
|
|
eq(e.reason, 'Forbidden')
|
|
|
|
eq(e.error_code, 'AccessDenied')
|
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def check_bad_bucket_name(name):
|
2017-07-25 08:32:05 +00:00
|
|
|
"""
|
2019-04-01 04:44:42 +00:00
|
|
|
Attempt to create a bucket with a specified name, and confirm
|
|
|
|
that the request fails because of an invalid bucket name.
|
2017-07-25 08:32:05 +00:00
|
|
|
"""
|
2019-04-01 04:44:42 +00:00
|
|
|
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name)
|
|
|
|
eq(e.status, 400)
|
|
|
|
eq(e.reason.lower(), 'bad request') # some proxies vary the case
|
|
|
|
eq(e.error_code, 'InvalidBucketName')
|
2011-08-08 22:14:31 +00:00
|
|
|
|
|
|
|
def _create_keys(bucket=None, keys=[]):
|
2011-12-22 21:08:03 +00:00
|
|
|
"""
|
|
|
|
Populate a (specified or new) bucket with objects with
|
|
|
|
specified names (and contents identical to their names).
|
|
|
|
"""
|
2011-08-08 22:14:31 +00:00
|
|
|
if bucket is None:
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
|
2011-07-28 18:00:47 +00:00
|
|
|
for s in keys:
|
|
|
|
key = bucket.new_key(s)
|
|
|
|
key.set_contents_from_string(s)
|
2011-08-08 22:14:31 +00:00
|
|
|
|
|
|
|
return bucket
|
|
|
|
|
|
|
|
|
2017-07-25 08:32:05 +00:00
|
|
|
def _get_alt_connection():
|
|
|
|
return boto.s3.connection.S3Connection(
|
|
|
|
aws_access_key_id=s3['alt'].aws_access_key_id,
|
|
|
|
aws_secret_access_key=s3['alt'].aws_secret_access_key,
|
|
|
|
is_secure=s3['alt'].is_secure,
|
|
|
|
port=s3['alt'].port,
|
|
|
|
host=s3['alt'].host,
|
|
|
|
calling_format=s3['alt'].calling_format,
|
|
|
|
)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2011-08-08 22:14:31 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Breaks DNS with SubdomainCallingFormat
|
|
|
|
@attr('fails_with_subdomain')
|
2011-12-22 21:08:03 +00:00
|
|
|
@attr(resource='bucket')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='create w/! in name')
|
|
|
|
@attr(assertion='fails with subdomain')
|
|
|
|
def test_bucket_create_naming_bad_punctuation():
|
|
|
|
# characters other than [a-zA-Z0-9._-]
|
|
|
|
check_bad_bucket_name('alpha!soup')
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def check_versioning(bucket, status):
|
|
|
|
try:
|
|
|
|
eq(bucket.get_versioning_status()['Versioning'], status)
|
|
|
|
except KeyError:
|
|
|
|
eq(status, None)
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# amazon is eventual consistent, retry a bit if failed
|
|
|
|
def check_configure_versioning_retry(bucket, status, expected_string):
|
|
|
|
bucket.configure_versioning(status)
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
read_status = None
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
for i in xrange(5):
|
|
|
|
try:
|
|
|
|
read_status = bucket.get_versioning_status()['Versioning']
|
|
|
|
except KeyError:
|
|
|
|
read_status = None
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if (expected_string == read_status):
|
|
|
|
break
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
time.sleep(1)
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
eq(expected_string, read_status)
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='create')
|
|
|
|
@attr(operation='create versioned object, read not exist null version')
|
|
|
|
@attr(assertion='read null version behaves correctly')
|
|
|
|
@attr('versioning')
|
|
|
|
def test_versioning_obj_read_not_exist_null():
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
check_versioning(bucket, None)
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
check_configure_versioning_retry(bucket, True, "Enabled")
|
2014-07-18 20:51:22 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
content = 'fooz'
|
|
|
|
objname = 'testobj'
|
2011-08-08 22:14:31 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
key = bucket.new_key(objname)
|
|
|
|
key.set_contents_from_string(content)
|
2015-08-12 00:23:08 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
key = bucket.get_key(objname, version_id='null')
|
|
|
|
eq(key, None)
|
2011-08-08 22:14:31 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='append object')
|
|
|
|
@attr(assertion='success')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
@attr('appendobject')
|
|
|
|
def test_append_object():
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
key = bucket.new_key('foo')
|
|
|
|
expires_in = 100000
|
|
|
|
url = key.generate_url(expires_in, method='PUT')
|
|
|
|
o = urlparse(url)
|
|
|
|
path = o.path + '?' + o.query
|
|
|
|
path1 = path + '&append&position=0'
|
|
|
|
res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path1, body='abc', secure=s3.main.is_secure)
|
|
|
|
path2 = path + '&append&position=3'
|
|
|
|
res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path2, body='abc', secure=s3.main.is_secure)
|
|
|
|
eq(res.status, 200)
|
|
|
|
eq(res.reason, 'OK')
|
2011-08-08 22:14:31 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
key = bucket.get_key('foo')
|
|
|
|
eq(key.size, 6)
|
2011-08-08 22:14:31 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='append to normal object')
|
|
|
|
@attr(assertion='fails 409')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
@attr('appendobject')
|
|
|
|
def test_append_normal_object():
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
key = bucket.new_key('foo')
|
|
|
|
key.set_contents_from_string('abc')
|
|
|
|
expires_in = 100000
|
|
|
|
url = key.generate_url(expires_in, method='PUT')
|
|
|
|
o = urlparse(url)
|
|
|
|
path = o.path + '?' + o.query
|
|
|
|
path = path + '&append&position=3'
|
|
|
|
res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path, body='abc', secure=s3.main.is_secure)
|
|
|
|
eq(res.status, 409)
|
2011-08-08 22:14:31 +00:00
|
|
|
|
2017-07-06 12:28:50 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='append position not right')
|
|
|
|
@attr(assertion='fails 409')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
@attr('appendobject')
|
|
|
|
def test_append_object_position_wrong():
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
key = bucket.new_key('foo')
|
|
|
|
expires_in = 100000
|
|
|
|
url = key.generate_url(expires_in, method='PUT')
|
|
|
|
o = urlparse(url)
|
|
|
|
path = o.path + '?' + o.query
|
|
|
|
path1 = path + '&append&position=0'
|
|
|
|
res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path1, body='abc', secure=s3.main.is_secure)
|
|
|
|
path2 = path + '&append&position=9'
|
|
|
|
res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path2, body='abc', secure=s3.main.is_secure)
|
|
|
|
eq(res.status, 409)
|
|
|
|
eq(int(res.getheader('x-rgw-next-append-position')), 3)
|
2017-07-06 12:28:50 +00:00
|
|
|
|
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
|
|
|
|
# http://tracker.newdream.net/issues/984
|
|
|
|
@attr(resource='bucket.log')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='set/enable/disable logging target')
|
|
|
|
@attr(assertion='operations succeed')
|
|
|
|
@attr('fails_on_rgw')
|
|
|
|
def test_logging_toggle():
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log')
|
|
|
|
log_bucket.set_as_logging_target()
|
|
|
|
bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name)
|
|
|
|
bucket.disable_logging()
|
|
|
|
# NOTE: this does not actually test whether or not logging works
|
2017-07-06 12:28:50 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def list_bucket_storage_class(bucket):
|
|
|
|
result = defaultdict(list)
|
|
|
|
for k in bucket.get_all_versions():
|
|
|
|
result[k.storage_class].append(k)
|
2017-07-06 12:28:50 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
return result
|
2011-08-08 22:14:31 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# The test harness for lifecycle is configured to treat days as 10 second intervals.
|
2015-10-23 09:52:05 +00:00
|
|
|
@attr(resource='bucket')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='test lifecycle expiration')
|
|
|
|
@attr('lifecycle')
|
|
|
|
@attr('lifecycle_transition')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_lifecycle_transition():
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 3:
|
|
|
|
raise SkipTest
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
bucket = set_lifecycle(rules=[{'id': 'rule1', 'transition': lc_transition(days=1, storage_class=sc[1]), 'prefix': 'expire1/', 'status': 'Enabled'},
|
|
|
|
{'id':'rule2', 'transition': lc_transition(days=4, storage_class=sc[2]), 'prefix': 'expire3/', 'status': 'Enabled'}])
|
|
|
|
_create_keys(bucket=bucket, keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
|
|
|
|
'keep2/bar', 'expire3/foo', 'expire3/bar'])
|
|
|
|
# Get list of all keys
|
|
|
|
init_keys = bucket.get_all_keys()
|
|
|
|
eq(len(init_keys), 6)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Wait for first expiration (plus fudge to handle the timer window)
|
|
|
|
time.sleep(25)
|
|
|
|
expire1_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(expire1_keys['STANDARD']), 4)
|
|
|
|
eq(len(expire1_keys[sc[1]]), 2)
|
|
|
|
eq(len(expire1_keys[sc[2]]), 0)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Wait for next expiration cycle
|
|
|
|
time.sleep(10)
|
|
|
|
keep2_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(keep2_keys['STANDARD']), 4)
|
|
|
|
eq(len(keep2_keys[sc[1]]), 2)
|
|
|
|
eq(len(keep2_keys[sc[2]]), 0)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Wait for final expiration cycle
|
|
|
|
time.sleep(20)
|
|
|
|
expire3_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(expire3_keys['STANDARD']), 2)
|
|
|
|
eq(len(expire3_keys[sc[1]]), 2)
|
|
|
|
eq(len(expire3_keys[sc[2]]), 2)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# The test harness for lifecycle is configured to treat days as 10 second intervals.
|
2015-10-23 09:52:05 +00:00
|
|
|
@attr(resource='bucket')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='test lifecycle expiration')
|
|
|
|
@attr('lifecycle')
|
|
|
|
@attr('lifecycle_transition')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_lifecycle_transition_single_rule_multi_trans():
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 3:
|
|
|
|
raise SkipTest
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
bucket = set_lifecycle(rules=[
|
|
|
|
{'id': 'rule1',
|
|
|
|
'transition': lc_transitions([
|
|
|
|
lc_transition(days=1, storage_class=sc[1]),
|
|
|
|
lc_transition(days=4, storage_class=sc[2])]),
|
|
|
|
'prefix': 'expire1/',
|
|
|
|
'status': 'Enabled'}])
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
_create_keys(bucket=bucket, keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
|
|
|
|
'keep2/bar', 'expire3/foo', 'expire3/bar'])
|
|
|
|
# Get list of all keys
|
|
|
|
init_keys = bucket.get_all_keys()
|
|
|
|
eq(len(init_keys), 6)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Wait for first expiration (plus fudge to handle the timer window)
|
|
|
|
time.sleep(25)
|
|
|
|
expire1_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(expire1_keys['STANDARD']), 4)
|
|
|
|
eq(len(expire1_keys[sc[1]]), 2)
|
|
|
|
eq(len(expire1_keys[sc[2]]), 0)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Wait for next expiration cycle
|
|
|
|
time.sleep(10)
|
|
|
|
keep2_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(keep2_keys['STANDARD']), 4)
|
|
|
|
eq(len(keep2_keys[sc[1]]), 2)
|
|
|
|
eq(len(keep2_keys[sc[2]]), 0)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Wait for final expiration cycle
|
|
|
|
time.sleep(20)
|
|
|
|
expire3_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(expire3_keys['STANDARD']), 4)
|
|
|
|
eq(len(expire3_keys[sc[1]]), 0)
|
|
|
|
eq(len(expire3_keys[sc[2]]), 2)
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def generate_lifecycle_body(rules):
|
|
|
|
body = '<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration>'
|
|
|
|
for rule in rules:
|
|
|
|
body += '<Rule><ID>%s</ID><Status>%s</Status>' % (rule['ID'], rule['Status'])
|
|
|
|
if 'Prefix' in rule.keys():
|
|
|
|
body += '<Prefix>%s</Prefix>' % rule['Prefix']
|
|
|
|
if 'Filter' in rule.keys():
|
|
|
|
prefix_str= '' # AWS supports empty filters
|
|
|
|
if 'Prefix' in rule['Filter'].keys():
|
|
|
|
prefix_str = '<Prefix>%s</Prefix>' % rule['Filter']['Prefix']
|
|
|
|
body += '<Filter>%s</Filter>' % prefix_str
|
2015-10-23 09:52:05 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if 'Expiration' in rule.keys():
|
|
|
|
if 'ExpiredObjectDeleteMarker' in rule['Expiration'].keys():
|
|
|
|
body += '<Expiration><ExpiredObjectDeleteMarker>%s</ExpiredObjectDeleteMarker></Expiration>' \
|
|
|
|
% rule['Expiration']['ExpiredObjectDeleteMarker']
|
|
|
|
elif 'Date' in rule['Expiration'].keys():
|
|
|
|
body += '<Expiration><Date>%s</Date></Expiration>' % rule['Expiration']['Date']
|
|
|
|
else:
|
|
|
|
body += '<Expiration><Days>%d</Days></Expiration>' % rule['Expiration']['Days']
|
|
|
|
if 'NoncurrentVersionExpiration' in rule.keys():
|
|
|
|
body += '<NoncurrentVersionExpiration><NoncurrentDays>%d</NoncurrentDays></NoncurrentVersionExpiration>' % \
|
|
|
|
rule['NoncurrentVersionExpiration']['NoncurrentDays']
|
|
|
|
if 'NoncurrentVersionTransition' in rule.keys():
|
|
|
|
for t in rule['NoncurrentVersionTransition']:
|
|
|
|
body += '<NoncurrentVersionTransition>'
|
|
|
|
body += '<NoncurrentDays>%d</NoncurrentDays>' % \
|
|
|
|
t['NoncurrentDays']
|
|
|
|
body += '<StorageClass>%s</StorageClass>' % \
|
|
|
|
t['StorageClass']
|
|
|
|
body += '</NoncurrentVersionTransition>'
|
|
|
|
if 'AbortIncompleteMultipartUpload' in rule.keys():
|
|
|
|
body += '<AbortIncompleteMultipartUpload><DaysAfterInitiation>%d</DaysAfterInitiation>' \
|
|
|
|
'</AbortIncompleteMultipartUpload>' % rule['AbortIncompleteMultipartUpload']['DaysAfterInitiation']
|
|
|
|
body += '</Rule>'
|
|
|
|
body += '</LifecycleConfiguration>'
|
|
|
|
return body
|
2015-10-23 09:52:05 +00:00
|
|
|
|
|
|
|
|
2011-12-22 21:08:03 +00:00
|
|
|
@attr(resource='bucket')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='set lifecycle config with noncurrent version expiration')
|
|
|
|
@attr('lifecycle')
|
|
|
|
@attr('lifecycle_transition')
|
|
|
|
def test_lifecycle_set_noncurrent_transition():
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 3:
|
|
|
|
raise SkipTest
|
2011-08-09 00:24:40 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
bucket = get_new_bucket()
|
|
|
|
rules = [
|
|
|
|
{
|
|
|
|
'ID': 'rule1',
|
|
|
|
'Prefix': 'test1/',
|
|
|
|
'Status': 'Enabled',
|
|
|
|
'NoncurrentVersionTransition': [
|
|
|
|
{
|
|
|
|
'NoncurrentDays': 2,
|
|
|
|
'StorageClass': sc[1]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
'NoncurrentDays': 4,
|
|
|
|
'StorageClass': sc[2]
|
|
|
|
}
|
|
|
|
],
|
|
|
|
'NoncurrentVersionExpiration': {
|
|
|
|
'NoncurrentDays': 6
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}}
|
|
|
|
]
|
|
|
|
body = generate_lifecycle_body(rules)
|
|
|
|
fp = StringIO(body)
|
|
|
|
md5 = boto.utils.compute_md5(fp)
|
|
|
|
headers = {'Content-MD5': md5[1], 'Content-Type': 'text/xml'}
|
|
|
|
res = bucket.connection.make_request('PUT', bucket.name, data=fp.getvalue(), query_args='lifecycle',
|
|
|
|
headers=headers)
|
|
|
|
eq(res.status, 200)
|
|
|
|
eq(res.reason, 'OK')
|
2011-08-09 00:24:40 +00:00
|
|
|
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2011-12-22 21:08:03 +00:00
|
|
|
@attr(resource='bucket')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='test lifecycle non-current version expiration')
|
|
|
|
@attr('lifecycle')
|
|
|
|
@attr('lifecycle_expiration')
|
|
|
|
@attr('lifecycle_transition')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_lifecycle_noncur_transition():
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 3:
|
|
|
|
raise SkipTest
|
2011-12-22 21:08:03 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
bucket = get_new_bucket()
|
|
|
|
check_configure_versioning_retry(bucket, True, "Enabled")
|
2011-08-09 00:39:02 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
rules = [
|
|
|
|
{
|
|
|
|
'ID': 'rule1',
|
|
|
|
'Prefix': 'test1/',
|
|
|
|
'Status': 'Enabled',
|
|
|
|
'NoncurrentVersionTransition': [
|
|
|
|
{
|
|
|
|
'NoncurrentDays': 1,
|
|
|
|
'StorageClass': sc[1]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
'NoncurrentDays': 3,
|
|
|
|
'StorageClass': sc[2]
|
|
|
|
}
|
|
|
|
],
|
|
|
|
'NoncurrentVersionExpiration': {
|
|
|
|
'NoncurrentDays': 5
|
|
|
|
}
|
|
|
|
}
|
|
|
|
]
|
|
|
|
body = generate_lifecycle_body(rules)
|
|
|
|
fp = StringIO(body)
|
|
|
|
md5 = boto.utils.compute_md5(fp)
|
|
|
|
headers = {'Content-MD5': md5[1], 'Content-Type': 'text/xml'}
|
|
|
|
bucket.connection.make_request('PUT', bucket.name, data=fp.getvalue(), query_args='lifecycle',
|
|
|
|
headers=headers)
|
2011-08-09 22:02:54 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
create_multiple_versions(bucket, "test1/a", 3)
|
|
|
|
create_multiple_versions(bucket, "test1/b", 3)
|
|
|
|
init_keys = bucket.get_all_versions()
|
|
|
|
eq(len(init_keys), 6)
|
2011-08-09 22:02:54 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
time.sleep(25)
|
|
|
|
expire1_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(expire1_keys['STANDARD']), 2)
|
|
|
|
eq(len(expire1_keys[sc[1]]), 4)
|
|
|
|
eq(len(expire1_keys[sc[2]]), 0)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
time.sleep(20)
|
|
|
|
expire1_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(expire1_keys['STANDARD']), 2)
|
|
|
|
eq(len(expire1_keys[sc[1]]), 0)
|
|
|
|
eq(len(expire1_keys[sc[2]]), 4)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
time.sleep(20)
|
|
|
|
expire_keys = bucket.get_all_versions()
|
|
|
|
expire1_keys = list_bucket_storage_class(bucket)
|
|
|
|
eq(len(expire1_keys['STANDARD']), 2)
|
|
|
|
eq(len(expire1_keys[sc[1]]), 0)
|
|
|
|
eq(len(expire1_keys[sc[2]]), 0)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def transfer_part(bucket, mp_id, mp_keyname, i, part, headers=None):
|
|
|
|
"""Transfer a part of a multipart upload. Designed to be run in parallel.
|
|
|
|
"""
|
|
|
|
mp = boto.s3.multipart.MultiPartUpload(bucket)
|
|
|
|
mp.key_name = mp_keyname
|
|
|
|
mp.id = mp_id
|
|
|
|
part_out = StringIO(part)
|
|
|
|
mp.upload_part_from_file(part_out, i+1, headers=headers)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def generate_random(size, part_size=5*1024*1024):
|
|
|
|
"""
|
|
|
|
Generate the specified number random data.
|
|
|
|
(actually each MB is a repetition of the first KB)
|
|
|
|
"""
|
|
|
|
chunk = 1024
|
|
|
|
allowed = string.ascii_letters
|
|
|
|
for x in range(0, size, part_size):
|
|
|
|
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
|
|
|
|
s = ''
|
|
|
|
left = size - x
|
|
|
|
this_part_size = min(left, part_size)
|
|
|
|
for y in range(this_part_size / chunk):
|
|
|
|
s = s + strpart
|
|
|
|
if this_part_size > len(s):
|
|
|
|
s = s + strpart[0:this_part_size - len(s)]
|
|
|
|
yield s
|
|
|
|
if (x == size):
|
|
|
|
return
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _multipart_upload(bucket, s3_key_name, size, part_size=5*1024*1024, do_list=None, headers=None, metadata=None, storage_class=None, resend_parts=[]):
|
|
|
|
"""
|
|
|
|
generate a multi-part upload for a random file of specifed size,
|
|
|
|
if requested, generate a list of the parts
|
|
|
|
return the upload descriptor
|
|
|
|
"""
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if storage_class is not None:
|
|
|
|
if not headers:
|
|
|
|
headers = {}
|
|
|
|
headers['X-Amz-Storage-Class'] = storage_class
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
upload = bucket.initiate_multipart_upload(s3_key_name, headers=headers, metadata=metadata)
|
|
|
|
s = ''
|
|
|
|
for i, part in enumerate(generate_random(size, part_size)):
|
|
|
|
s += part
|
|
|
|
transfer_part(bucket, upload.id, upload.key_name, i, part, headers)
|
|
|
|
if i in resend_parts:
|
|
|
|
transfer_part(bucket, upload.id, upload.key_name, i, part, headers)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if do_list is not None:
|
|
|
|
l = bucket.list_multipart_uploads()
|
|
|
|
l = list(l)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
return (upload, s)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _populate_key(bucket, keyname, size=7*1024*1024, storage_class=None):
|
|
|
|
if bucket is None:
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
key = bucket.new_key(keyname)
|
|
|
|
if storage_class:
|
|
|
|
key.storage_class = storage_class
|
|
|
|
data_str = str(generate_random(size, size).next())
|
|
|
|
data = StringIO(data_str)
|
|
|
|
key.set_contents_from_file(fp=data)
|
|
|
|
return (key, data_str)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
|
|
|
|
return ''.join(random.choice(chars) for _ in range(size))
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def verify_object(bucket, k, data=None, storage_class=None):
|
|
|
|
if storage_class:
|
|
|
|
eq(k.storage_class, storage_class)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if data:
|
|
|
|
read_data = k.get_contents_as_string()
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
equal = data == read_data # avoid spamming log if data not equal
|
|
|
|
eq(equal, True)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def copy_object_storage_class(src_bucket, src_key, dest_bucket, dest_key, storage_class):
|
|
|
|
query_args=None
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if dest_key.version_id:
|
|
|
|
query_arg='versionId={v}'.format(v=dest_key.version_id)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
headers = {}
|
|
|
|
headers['X-Amz-Copy-Source'] = '/{bucket}/{object}'.format(bucket=src_bucket.name, object=src_key.name)
|
|
|
|
if src_key.version_id:
|
|
|
|
headers['X-Amz-Copy-Source-Version-Id'] = src_key.version_id
|
|
|
|
headers['X-Amz-Storage-Class'] = storage_class
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
res = dest_bucket.connection.make_request('PUT', dest_bucket.name, dest_key.name,
|
|
|
|
query_args=query_args, headers=headers)
|
|
|
|
eq(res.status, 200)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _populate_multipart_key(bucket, kname, size, storage_class=None):
|
|
|
|
(upload, data) = _multipart_upload(bucket, kname, size, storage_class=storage_class)
|
|
|
|
upload.complete_upload()
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
k = bucket.get_key(kname)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
return (k, data)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Create a lifecycle config. Either days (int) and prefix (string) is given, or rules.
|
|
|
|
# Rules is an array of dictionaries, each dict has a 'days' and a 'prefix' key
|
|
|
|
def create_lifecycle(days = None, prefix = 'test/', rules = None):
|
|
|
|
lifecycle = boto.s3.lifecycle.Lifecycle()
|
|
|
|
if rules == None:
|
|
|
|
expiration = boto.s3.lifecycle.Expiration(days=days)
|
|
|
|
rule = boto.s3.lifecycle.Rule(id=prefix, prefix=prefix, status='Enabled',
|
|
|
|
expiration=expiration)
|
|
|
|
lifecycle.append(rule)
|
|
|
|
else:
|
|
|
|
for rule in rules:
|
|
|
|
expiration = None
|
|
|
|
transition = None
|
|
|
|
try:
|
|
|
|
expiration = boto.s3.lifecycle.Expiration(days=rule['days'])
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
transition = rule['transition']
|
|
|
|
except:
|
|
|
|
pass
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
_id = rule.get('id',None)
|
|
|
|
rule = boto.s3.lifecycle.Rule(id=_id, prefix=rule['prefix'],
|
|
|
|
status=rule['status'], expiration=expiration, transition=transition)
|
|
|
|
lifecycle.append(rule)
|
|
|
|
return lifecycle
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def set_lifecycle(rules = None):
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
lifecycle = create_lifecycle(rules=rules)
|
|
|
|
bucket.configure_lifecycle(lifecycle)
|
|
|
|
return bucket
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def configured_storage_classes():
|
|
|
|
sc = [ 'STANDARD' ]
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if 'storage_classes' in config['main']:
|
|
|
|
extra_sc = re.split('\W+', config['main']['storage_classes'])
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
for item in extra_sc:
|
|
|
|
if item != 'STANDARD':
|
|
|
|
sc.append(item)
|
2011-08-10 18:40:46 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
return sc
|
2011-08-09 00:39:02 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def lc_transition(days=None, date=None, storage_class=None):
|
|
|
|
return boto.s3.lifecycle.Transition(days=days, date=date, storage_class=storage_class)
|
2011-08-09 00:39:02 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def lc_transitions(transitions=None):
|
|
|
|
result = boto.s3.lifecycle.Transitions()
|
|
|
|
for t in transitions:
|
|
|
|
result.add_transition(days=t.days, date=t.date, storage_class=t.storage_class)
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
return result
|
2011-08-10 01:35:18 +00:00
|
|
|
|
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='test create object with storage class')
|
|
|
|
@attr('storage_class')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_object_storage_class():
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 2:
|
|
|
|
raise SkipTest
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
bucket = get_new_bucket()
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
for storage_class in sc:
|
|
|
|
kname = 'foo-' + storage_class
|
|
|
|
k, data = _populate_key(bucket, kname, size=9*1024*1024, storage_class=storage_class)
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
verify_object(bucket, k, data, storage_class)
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='test create multipart object with storage class')
|
|
|
|
@attr('storage_class')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_object_storage_class_multipart():
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 2:
|
|
|
|
raise SkipTest
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
bucket = get_new_bucket()
|
|
|
|
size = 11 * 1024 * 1024
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
for storage_class in sc:
|
|
|
|
key = "mymultipart-" + storage_class
|
|
|
|
(upload, data) = _multipart_upload(bucket, key, size, storage_class=storage_class)
|
|
|
|
upload.complete_upload()
|
|
|
|
key2 = bucket.get_key(key)
|
|
|
|
eq(key2.size, size)
|
|
|
|
eq(key2.storage_class, storage_class)
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _do_test_object_modify_storage_class(obj_write_func, size):
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 2:
|
|
|
|
raise SkipTest
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
bucket = get_new_bucket()
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
for storage_class in sc:
|
|
|
|
kname = 'foo-' + storage_class
|
|
|
|
k, data = obj_write_func(bucket, kname, size, storage_class=storage_class)
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
verify_object(bucket, k, data, storage_class)
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
for new_storage_class in sc:
|
|
|
|
if new_storage_class == storage_class:
|
|
|
|
continue
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
copy_object_storage_class(bucket, k, bucket, k, new_storage_class)
|
|
|
|
verify_object(bucket, k, data, storage_class)
|
2011-08-10 01:35:18 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='test changing objects storage class')
|
|
|
|
@attr('storage_class')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_object_modify_storage_class():
|
|
|
|
_do_test_object_modify_storage_class(_populate_key, size=9*1024*1024)
|
2011-08-10 01:42:06 +00:00
|
|
|
|
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='test changing objects storage class')
|
|
|
|
@attr('storage_class')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_object_modify_storage_class_multipart():
|
|
|
|
_do_test_object_modify_storage_class(_populate_multipart_key, size=11*1024*1024)
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _do_test_object_storage_class_copy(obj_write_func, size):
|
|
|
|
sc = configured_storage_classes()
|
|
|
|
if len(sc) < 2:
|
|
|
|
raise SkipTest
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
src_bucket = get_new_bucket()
|
|
|
|
dest_bucket = get_new_bucket()
|
|
|
|
kname = 'foo'
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
src_key, data = obj_write_func(src_bucket, kname, size)
|
|
|
|
verify_object(src_bucket, src_key, data)
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
for new_storage_class in sc:
|
|
|
|
if new_storage_class == src_key.storage_class:
|
|
|
|
continue
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
dest_key = dest_bucket.get_key('foo-' + new_storage_class, validate=False)
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
copy_object_storage_class(src_bucket, src_key, dest_bucket, dest_key, new_storage_class)
|
|
|
|
verify_object(dest_bucket, dest_key, data, new_storage_class)
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='copy')
|
|
|
|
@attr(operation='test copy object to object with different storage class')
|
|
|
|
@attr('storage_class')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_object_storage_class_copy():
|
|
|
|
_do_test_object_storage_class_copy(_populate_key, size=9*1024*1024)
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='copy')
|
|
|
|
@attr(operation='test changing objects storage class')
|
|
|
|
@attr('storage_class')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_object_storage_class_copy_multipart():
|
|
|
|
_do_test_object_storage_class_copy(_populate_multipart_key, size=9*1024*1024)
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
class FakeFile(object):
|
|
|
|
"""
|
|
|
|
file that simulates seek, tell, and current character
|
|
|
|
"""
|
|
|
|
def __init__(self, char='A', interrupt=None):
|
|
|
|
self.offset = 0
|
|
|
|
self.char = char
|
|
|
|
self.interrupt = interrupt
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def seek(self, offset, whence=os.SEEK_SET):
|
|
|
|
if whence == os.SEEK_SET:
|
|
|
|
self.offset = offset
|
|
|
|
elif whence == os.SEEK_END:
|
|
|
|
self.offset = self.size + offset;
|
|
|
|
elif whence == os.SEEK_CUR:
|
|
|
|
self.offset += offset
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def tell(self):
|
|
|
|
return self.offset
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
class FakeWriteFile(FakeFile):
|
|
|
|
"""
|
|
|
|
file that simulates interruptable reads of constant data
|
|
|
|
"""
|
|
|
|
def __init__(self, size, char='A', interrupt=None):
|
|
|
|
FakeFile.__init__(self, char, interrupt)
|
|
|
|
self.size = size
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def read(self, size=-1):
|
|
|
|
if size < 0:
|
|
|
|
size = self.size - self.offset
|
|
|
|
count = min(size, self.size - self.offset)
|
|
|
|
self.offset += count
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# Sneaky! do stuff before we return (the last time)
|
|
|
|
if self.interrupt != None and self.offset == self.size and count > 0:
|
|
|
|
self.interrupt()
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
return self.char*count
|
2011-08-10 01:42:06 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
class FakeFileVerifier(object):
|
2011-12-22 21:08:03 +00:00
|
|
|
"""
|
2019-04-01 04:44:42 +00:00
|
|
|
file that verifies expected data has been written
|
2011-12-22 21:08:03 +00:00
|
|
|
"""
|
2019-04-01 04:44:42 +00:00
|
|
|
def __init__(self, char=None):
|
|
|
|
self.char = char
|
|
|
|
self.size = 0
|
2011-08-11 22:56:41 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def write(self, data):
|
|
|
|
size = len(data)
|
|
|
|
if self.char == None:
|
|
|
|
self.char = data[0]
|
|
|
|
self.size += size
|
|
|
|
eq(data, self.char*size)
|
2017-04-10 09:58:26 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _verify_atomic_key_data(key, size=-1, char=None):
|
|
|
|
"""
|
|
|
|
Make sure file is of the expected size and (simulated) content
|
|
|
|
"""
|
|
|
|
fp_verify = FakeFileVerifier(char)
|
|
|
|
key.get_contents_to_file(fp_verify)
|
|
|
|
if size >= 0:
|
|
|
|
eq(fp_verify.size, size)
|
2017-04-10 09:58:26 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _test_atomic_dual_conditional_write(file_size):
|
|
|
|
"""
|
|
|
|
create an object, two sessions writing different contents
|
|
|
|
confirm that it is all one or the other
|
|
|
|
"""
|
2017-09-24 03:56:36 +00:00
|
|
|
bucket = get_new_bucket()
|
2019-04-01 04:44:42 +00:00
|
|
|
objname = 'testobj'
|
|
|
|
key = bucket.new_key(objname)
|
2011-08-11 22:56:41 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
fp_a = FakeWriteFile(file_size, 'A')
|
|
|
|
key.set_contents_from_file(fp_a)
|
|
|
|
_verify_atomic_key_data(key, file_size, 'A')
|
|
|
|
etag_fp_a = key.etag.replace('"', '').strip()
|
2011-08-11 22:56:41 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# get a second key object (for the same key)
|
|
|
|
# so both can be writing without interfering
|
|
|
|
key2 = bucket.new_key(objname)
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# write <file_size> file of C's
|
|
|
|
# but before we're done, try to write all B's
|
|
|
|
fp_b = FakeWriteFile(file_size, 'B')
|
|
|
|
fp_c = FakeWriteFile(file_size, 'C',
|
|
|
|
lambda: key2.set_contents_from_file(fp_b, rewind=True, headers={'If-Match': etag_fp_a})
|
|
|
|
)
|
|
|
|
# key.set_contents_from_file(fp_c, headers={'If-Match': etag_fp_a})
|
|
|
|
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_c,
|
|
|
|
headers={'If-Match': etag_fp_a})
|
|
|
|
eq(e.status, 412)
|
|
|
|
eq(e.reason, 'Precondition Failed')
|
|
|
|
eq(e.error_code, 'PreconditionFailed')
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# verify the file
|
|
|
|
_verify_atomic_key_data(key, file_size, 'B')
|
2017-10-17 14:27:27 +00:00
|
|
|
|
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(operation='write one or the other')
|
|
|
|
@attr(assertion='1MB successful')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_atomic_dual_conditional_write_1mb():
|
|
|
|
_test_atomic_dual_conditional_write(1024*1024)
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='write file in deleted bucket')
|
|
|
|
@attr(assertion='fail 404')
|
|
|
|
@attr('fails_on_aws')
|
|
|
|
def test_atomic_write_bucket_gone():
|
2017-10-17 14:27:27 +00:00
|
|
|
bucket = get_new_bucket()
|
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def remove_bucket():
|
|
|
|
bucket.delete()
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
# create file of A's but delete the bucket it's in before we finish writing
|
|
|
|
# all of them
|
|
|
|
key = bucket.new_key('foo')
|
|
|
|
fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
|
|
|
|
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_a)
|
|
|
|
eq(e.status, 404)
|
|
|
|
eq(e.reason, 'Not Found')
|
|
|
|
eq(e.error_code, 'NoSuchBucket')
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _multipart_upload_enc(bucket, s3_key_name, size, part_size=5*1024*1024,
|
|
|
|
do_list=None, init_headers=None, part_headers=None,
|
|
|
|
metadata=None, resend_parts=[]):
|
|
|
|
"""
|
|
|
|
generate a multi-part upload for a random file of specifed size,
|
|
|
|
if requested, generate a list of the parts
|
|
|
|
return the upload descriptor
|
|
|
|
"""
|
|
|
|
upload = bucket.initiate_multipart_upload(s3_key_name, headers=init_headers, metadata=metadata)
|
|
|
|
s = ''
|
|
|
|
for i, part in enumerate(generate_random(size, part_size)):
|
|
|
|
s += part
|
|
|
|
transfer_part(bucket, upload.id, upload.key_name, i, part, part_headers)
|
|
|
|
if i in resend_parts:
|
|
|
|
transfer_part(bucket, upload.id, upload.key_name, i, part, part_headers)
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
if do_list is not None:
|
|
|
|
l = bucket.list_multipart_uploads()
|
|
|
|
l = list(l)
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
return (upload, s)
|
2017-10-17 14:27:27 +00:00
|
|
|
|
2017-10-17 15:39:49 +00:00
|
|
|
|
|
|
|
|
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(operation='multipart upload with bad key for uploading chunks')
|
|
|
|
@attr(assertion='successful')
|
|
|
|
@attr('encryption')
|
|
|
|
def test_encryption_sse_c_multipart_invalid_chunks_1():
|
2017-10-17 15:39:49 +00:00
|
|
|
bucket = get_new_bucket()
|
2019-04-01 04:44:42 +00:00
|
|
|
key = "multipart_enc"
|
|
|
|
content_type = 'text/bla'
|
|
|
|
objlen = 30 * 1024 * 1024
|
|
|
|
init_headers = {
|
|
|
|
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
|
|
|
|
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
|
|
|
|
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
|
|
|
|
'Content-Type': content_type
|
|
|
|
}
|
|
|
|
part_headers = {
|
|
|
|
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
|
|
|
|
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
|
|
|
|
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
|
|
|
|
}
|
|
|
|
e = assert_raises(boto.exception.S3ResponseError,
|
|
|
|
_multipart_upload_enc, bucket, key, objlen,
|
|
|
|
init_headers=init_headers, part_headers=part_headers,
|
|
|
|
metadata={'foo': 'bar'})
|
|
|
|
eq(e.status, 400)
|
2017-10-17 17:03:28 +00:00
|
|
|
|
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(operation='multipart upload with bad md5 for chunks')
|
|
|
|
@attr(assertion='successful')
|
|
|
|
@attr('encryption')
|
|
|
|
def test_encryption_sse_c_multipart_invalid_chunks_2():
|
2017-10-17 17:03:28 +00:00
|
|
|
bucket = get_new_bucket()
|
2019-04-01 04:44:42 +00:00
|
|
|
key = "multipart_enc"
|
|
|
|
content_type = 'text/plain'
|
|
|
|
objlen = 30 * 1024 * 1024
|
|
|
|
init_headers = {
|
|
|
|
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
|
|
|
|
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
|
|
|
|
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
|
|
|
|
'Content-Type': content_type
|
|
|
|
}
|
|
|
|
part_headers = {
|
|
|
|
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
|
|
|
|
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
|
|
|
|
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
|
|
|
|
}
|
|
|
|
e = assert_raises(boto.exception.S3ResponseError,
|
|
|
|
_multipart_upload_enc, bucket, key, objlen,
|
|
|
|
init_headers=init_headers, part_headers=part_headers,
|
|
|
|
metadata={'foo': 'bar'})
|
|
|
|
eq(e.status, 400)
|
2017-10-17 17:03:28 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='bucket')
|
|
|
|
@attr(method='get')
|
|
|
|
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
|
|
|
|
@attr(assertion='succeeds')
|
|
|
|
@attr('bucket-policy')
|
|
|
|
def test_bucket_policy_different_tenant():
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
key = bucket.new_key('asdf')
|
|
|
|
key.set_contents_from_string('asdf')
|
|
|
|
l = bucket.list()
|
|
|
|
resource1 = "arn:aws:s3::*:" + bucket.name
|
|
|
|
resource2 = "arn:aws:s3::*:" + bucket.name + "/*"
|
|
|
|
policy_document = json.dumps(
|
|
|
|
{
|
|
|
|
"Version": "2012-10-17",
|
|
|
|
"Statement": [{
|
|
|
|
"Effect": "Allow",
|
|
|
|
"Principal": {"AWS": "*"},
|
|
|
|
"Action": "s3:ListBucket",
|
|
|
|
"Resource": [
|
|
|
|
"{}".format(resource1),
|
|
|
|
"{}".format(resource2)
|
|
|
|
]
|
|
|
|
}]
|
|
|
|
})
|
2017-10-17 17:03:28 +00:00
|
|
|
bucket.set_policy(policy_document)
|
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
new_conn = boto.s3.connection.S3Connection(
|
|
|
|
aws_access_key_id=s3['tenant'].aws_access_key_id,
|
|
|
|
aws_secret_access_key=s3['tenant'].aws_secret_access_key,
|
|
|
|
is_secure=s3['tenant'].is_secure,
|
|
|
|
port=s3['tenant'].port,
|
|
|
|
host=s3['tenant'].host,
|
|
|
|
calling_format=s3['tenant'].calling_format,
|
|
|
|
)
|
|
|
|
bucket_name = ":" + bucket.name
|
|
|
|
b = new_conn.get_bucket(bucket_name)
|
|
|
|
b.get_all_keys()
|
2017-10-20 11:42:30 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(resource='bucket')
|
2017-10-20 11:42:30 +00:00
|
|
|
@attr(method='put')
|
2019-04-01 04:44:42 +00:00
|
|
|
@attr(operation='Test put condition operator end with ifExists')
|
2017-10-20 11:42:30 +00:00
|
|
|
@attr('bucket-policy')
|
2019-04-01 04:44:42 +00:00
|
|
|
def test_bucket_policy_set_condition_operator_end_with_IfExists():
|
|
|
|
bucket = _create_keys(keys=['foo'])
|
|
|
|
policy = '''{
|
|
|
|
"Version":"2012-10-17",
|
|
|
|
"Statement": [{
|
|
|
|
"Sid": "Allow Public Access to All Objects",
|
|
|
|
"Effect": "Allow",
|
|
|
|
"Principal": "*",
|
|
|
|
"Action": "s3:GetObject",
|
|
|
|
"Condition": {
|
|
|
|
"StringLikeIfExists": {
|
|
|
|
"aws:Referer": "http://www.example.com/*"
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"Resource": "arn:aws:s3:::%s/*"
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}''' % bucket.name
|
|
|
|
eq(bucket.set_policy(policy), True)
|
|
|
|
res = _make_request('GET', bucket.name, bucket.get_key("foo"),
|
|
|
|
request_headers={'referer': 'http://www.example.com/'})
|
2017-10-20 11:42:30 +00:00
|
|
|
eq(res.status, 200)
|
2019-04-01 04:44:42 +00:00
|
|
|
res = _make_request('GET', bucket.name, bucket.get_key("foo"),
|
|
|
|
request_headers={'referer': 'http://www.example.com/index.html'})
|
2017-10-20 11:42:30 +00:00
|
|
|
eq(res.status, 200)
|
2019-04-01 04:44:42 +00:00
|
|
|
res = _make_request('GET', bucket.name, bucket.get_key("foo"))
|
|
|
|
eq(res.status, 200)
|
|
|
|
res = _make_request('GET', bucket.name, bucket.get_key("foo"),
|
|
|
|
request_headers={'referer': 'http://example.com'})
|
|
|
|
eq(res.status, 403)
|
2017-10-20 11:42:30 +00:00
|
|
|
|
2019-04-01 04:44:42 +00:00
|
|
|
def _make_arn_resource(path="*"):
|
|
|
|
return "arn:aws:s3:::{}".format(path)
|
2017-10-20 13:41:32 +00:00
|
|
|
|
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='Deny put obj requests without encryption')
|
|
|
|
@attr(assertion='success')
|
|
|
|
@attr('encryption')
|
|
|
|
@attr('bucket-policy')
|
|
|
|
def test_bucket_policy_put_obj_enc():
|
|
|
|
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
|
|
|
|
deny_incorrect_algo = {
|
|
|
|
"StringNotEquals": {
|
|
|
|
"s3:x-amz-server-side-encryption": "AES256"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
deny_unencrypted_obj = {
|
|
|
|
"Null" : {
|
|
|
|
"s3:x-amz-server-side-encryption": "true"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
p = Policy()
|
|
|
|
resource = _make_arn_resource("{}/{}".format(bucket.name, "*"))
|
|
|
|
|
|
|
|
s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
|
|
|
|
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
|
|
|
|
policy_document = p.add_statement(s1).add_statement(s2).to_json()
|
|
|
|
|
|
|
|
bucket.set_policy(policy_document)
|
|
|
|
|
|
|
|
key1_str ='testobj'
|
|
|
|
key1 = bucket.new_key(key1_str)
|
|
|
|
check_access_denied(key1.set_contents_from_string, key1_str)
|
|
|
|
|
|
|
|
sse_client_headers = {
|
|
|
|
'x-amz-server-side-encryption' : 'AES256',
|
|
|
|
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
|
|
|
|
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
|
|
|
|
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
key1.set_contents_from_string(key1_str, headers=sse_client_headers)
|
2017-10-20 14:20:52 +00:00
|
|
|
|
|
|
|
@attr(resource='object')
|
|
|
|
@attr(method='put')
|
|
|
|
@attr(operation='put obj with RequestObjectTag')
|
|
|
|
@attr(assertion='success')
|
|
|
|
@attr('tagging')
|
|
|
|
@attr('bucket-policy')
|
|
|
|
def test_bucket_policy_put_obj_request_obj_tag():
|
|
|
|
|
|
|
|
bucket = get_new_bucket()
|
|
|
|
|
|
|
|
tag_conditional = {"StringEquals": {
|
|
|
|
"s3:RequestObjectTag/security" : "public"
|
|
|
|
}}
|
|
|
|
|
|
|
|
p = Policy()
|
|
|
|
resource = _make_arn_resource("{}/{}".format(bucket.name, "*"))
|
|
|
|
|
|
|
|
s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
|
|
|
|
policy_document = p.add_statement(s1).to_json()
|
|
|
|
|
|
|
|
bucket.set_policy(policy_document)
|
|
|
|
|
|
|
|
new_conn = _get_alt_connection()
|
|
|
|
bucket1 = new_conn.get_bucket(bucket.name, validate=False)
|
|
|
|
key1_str ='testobj'
|
|
|
|
key1 = bucket1.new_key(key1_str)
|
|
|
|
check_access_denied(key1.set_contents_from_string, key1_str)
|
|
|
|
|
|
|
|
headers = {"x-amz-tagging" : "security=public"}
|
|
|
|
key1.set_contents_from_string(key1_str, headers=headers)
|
2017-10-20 14:23:55 +00:00
|
|
|
|