[#312] add new test for s3

Signed-off-by: Yulia Kovshova <y.kovshova@yadro.com>
This commit is contained in:
Юлия Ковшова 2022-09-19 17:22:10 +03:00 committed by Julia Kovshova
parent b385c2466c
commit ee2ed667c6
12 changed files with 2754 additions and 784 deletions

View file

@ -0,0 +1,45 @@
from typing import Optional
import allure
import pytest
from steps import s3_gate_bucket, s3_gate_object
@allure.step("Expected all objects are presented in the bucket")
def check_objects_in_bucket(
s3_client, bucket, expected_objects: list, unexpected_objects: Optional[list] = None
) -> None:
unexpected_objects = unexpected_objects or []
bucket_objects = s3_gate_object.list_objects_s3(s3_client, bucket)
assert len(bucket_objects) == len(
expected_objects
), f"Expected {len(expected_objects)} objects in the bucket"
for bucket_object in expected_objects:
assert (
bucket_object in bucket_objects
), f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects:
assert (
bucket_object not in bucket_objects
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@allure.step("Try to get object and got error")
def try_to_get_objects_and_expect_error(s3_client, bucket: str, object_keys: list) -> None:
for obj in object_keys:
try:
s3_gate_object.get_object_s3(s3_client, bucket, obj)
raise AssertionError(f"Object {obj} found in bucket {bucket}")
except Exception as err:
assert "The specified key does not exist" in str(
err
), f"Expected error in exception {err}"
@allure.step("Set versioning enable for bucket")
def set_bucket_versioning(s3_client, bucket: str, status: s3_gate_bucket.VersioningStatus):
s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket)
s3_gate_bucket.set_bucket_versioning(s3_client, bucket, status=status)
bucket_status = s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket)
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"

View file

View file

@ -0,0 +1,406 @@
import json
import logging
import os
from datetime import datetime
from typing import Optional
import allure
from cli_helpers import _cmd_run, _configure_aws_cli
from common import ASSETS_DIR, S3_GATE
logger = logging.getLogger("NeoLogger")
REGULAR_TIMEOUT = 90
LONG_TIMEOUT = 240
class AwsCliClient:
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
# certificate in devenv) and disable automatic pagination in CLI output
common_flags = "--no-verify-ssl --no-paginate"
def __init__(self, access_key_id: str, secret_access_key: str):
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.config_aws_client()
def config_aws_client(self):
cmd = "aws configure"
logger.info(f"Executing command: {cmd}")
_configure_aws_cli(cmd, self.access_key_id, self.secret_access_key)
def create_bucket(self, Bucket: str, ObjectLockEnabledForBucket: bool = None):
if ObjectLockEnabledForBucket is None:
object_lock = ""
elif ObjectLockEnabledForBucket:
object_lock = " --object-lock-enabled-for-bucket"
else:
object_lock = " --no-object-lock-enabled-for-bucket"
cmd = (
f"aws {self.common_flags} s3api create-bucket --bucket {Bucket} "
f"{object_lock} --endpoint {S3_GATE}"
)
_cmd_run(cmd, REGULAR_TIMEOUT)
def list_buckets(self) -> dict:
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {S3_GATE}"
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_versioning(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def put_bucket_versioning(self, Bucket: str, VersioningConfiguration: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} "
f'--versioning-configuration Status={VersioningConfiguration.get("Status")} '
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {Bucket} " f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects_v2(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_object_versions(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def copy_object(
self,
Bucket: str,
CopySource: str,
Key: str,
ACL: Optional[str] = None,
MetadataDirective: Optional[str] = None,
Metadata: Optional[dict] = None,
TaggingDirective: Optional[str] = None,
Tagging: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api copy-object --copy-source {CopySource} "
f"--bucket {Bucket} --key {Key} --endpoint {S3_GATE}"
)
if ACL:
cmd += f" --acl {ACL}"
if MetadataDirective:
cmd += f" --metadata-directive {MetadataDirective}"
if Metadata:
cmd += " --metadata "
for key, value in Metadata.items():
cmd += f" {key}={value}"
if TaggingDirective:
cmd += f" --tagging-directive {TaggingDirective}"
if Tagging:
cmd += f" --tagging {Tagging}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def head_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {S3_GATE}"
output = _cmd_run(cmd)
return self._to_json(output)
def put_object(
self,
Body: str,
Bucket: str,
Key: str,
Metadata: Optional[dict] = None,
Tagging: Optional[str] = None,
ACL: Optional[str] = None,
ObjectLockMode: Optional[str] = None,
ObjectLockRetainUntilDate: Optional[datetime] = None,
ObjectLockLegalHoldStatus: Optional[str] = None,
GrantFullControl: Optional[str] = None,
GrantRead: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} "
f"--body {Body} --endpoint {S3_GATE}"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if Tagging:
cmd += f" --tagging {Tagging}"
if ACL:
cmd += f" --acl {ACL}"
if ObjectLockMode:
cmd += f" --object-lock-mode {ObjectLockMode}"
if ObjectLockRetainUntilDate:
cmd += f' --object-lock-retain-until-date "{ObjectLockRetainUntilDate}"'
if ObjectLockLegalHoldStatus:
cmd += f" --object-lock-legal-hold-status {ObjectLockLegalHoldStatus}"
if GrantFullControl:
cmd += f" --grant-full-control {GrantFullControl}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def head_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_object(
self,
Bucket: str,
Key: str,
file_path: str,
VersionId: Optional[str] = None,
Range: Optional[str] = None,
) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} "
f"{version} {file_path} --endpoint {S3_GATE}"
)
if Range:
cmd += f" --range {Range}"
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def get_object_acl(self, Bucket: str, Key: str, VersionId: Optional[str] = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object-acl --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def delete_objects(self, Bucket: str, Delete: dict) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/delete.json"
with open(file_path, "w") as out_file:
out_file.write(json.dumps(Delete))
logger.info(f"Input file for delete-objects: {json.dumps(Delete)}")
cmd = (
f"aws {self.common_flags} s3api delete-objects --bucket {Bucket} "
f"--delete file://{file_path} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def delete_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api delete-object --bucket {Bucket} "
f"--key {Key} {version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def get_object_attributes(
self,
bucket: str,
key: str,
*attributes: str,
version_id: str = None,
max_parts: int = None,
part_number: int = None,
) -> dict:
attrs = ",".join(attributes)
version = f" --version-id {version_id}" if version_id else ""
parts = f"--max-parts {max_parts}" if max_parts else ""
part_number = f"--part-number-marker {part_number}" if part_number else ""
cmd = (
f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} "
f"--key {key} {version} {parts} {part_number} --object-attributes {attrs} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def get_bucket_tagging(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket_tagging(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_object_tagging(self, Bucket: str, Key: str, VersionId: Optional[str] = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} "
f"--key {Key} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
@allure.step("Sync directory S3")
def sync(
self,
bucket_name: str,
dir_path: str,
ACL: Optional[str] = None,
Metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} "
f"--endpoint-url {S3_GATE}"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if ACL:
cmd += f" --acl {ACL}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
@allure.step("CP directory S3")
def cp(
self,
bucket_name: str,
dir_path: str,
ACL: Optional[str] = None,
Metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket_name} "
f"--endpoint-url {S3_GATE} --recursive"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if ACL:
cmd += f" --acl {ACL}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} "
f"--key {Key} --endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_multipart_uploads(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} "
f"--endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def upload_part(self, UploadId: str, Bucket: str, Key: str, PartNumber: int, Body: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --part-number {PartNumber} --body {Body} "
f"--endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def complete_multipart_upload(
self, Bucket: str, Key: str, UploadId: str, MultipartUpload: dict
) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/parts.json"
with open(file_path, "w") as out_file:
out_file.write(json.dumps(MultipartUpload))
logger.info(f"Input file for complete-multipart-upload: {json.dumps(MultipartUpload)}")
cmd = (
f"aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} "
f"--endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
@staticmethod
def _to_json(output: str) -> dict:
json_output = {}
try:
json_output = json.loads(output)
except Exception:
if "{" not in output and "}" not in output:
logger.warning(f"Could not parse json from output {output}")
return json_output
json_output = json.loads(output[output.index("{") :])
return json_output

View file

@ -0,0 +1,38 @@
import os
import allure
import pytest
from python_keywords.container import list_containers
from steps import s3_gate_bucket
from steps.aws_cli_client import AwsCliClient
class TestS3GateBase:
s3_client = None
@pytest.fixture(scope='class', autouse=True)
@allure.title('[Class/Autouse]: Create S3 client')
def s3_client(self, prepare_wallet_and_deposit, request):
wallet = prepare_wallet_and_deposit
s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json"
cid, bucket, access_key_id, secret_access_key, owner_private_key = \
s3_gate_bucket.init_s3_credentials(
wallet, s3_bearer_rules_file=s3_bearer_rules_file)
containers_list = list_containers(wallet)
assert cid in containers_list, f'Expected cid {cid} in {containers_list}'
if request.param == 'aws cli':
try:
client = AwsCliClient(access_key_id, secret_access_key)
except Exception as err:
if 'command was not found or was not executable' in str(err):
pytest.skip('AWS CLI was not found')
else:
raise RuntimeError(
'Error on creating instance for AwsCliClient') from err
else:
client = s3_gate_bucket.config_s3_client(
access_key_id, secret_access_key)
TestS3GateBase.s3_client = client
TestS3GateBase.wallet = wallet

View file

@ -0,0 +1,227 @@
#!/usr/bin/python3
import json
import logging
import os
import re
import uuid
from enum import Enum
from time import sleep
from typing import Optional
import allure
import boto3
import urllib3
from botocore.exceptions import ClientError
from cli_helpers import _run_with_passwd, log_command_execution
from common import NEOFS_ENDPOINT, S3_GATE, S3_GATE_WALLET_PASS, S3_GATE_WALLET_PATH
from data_formatters import get_wallet_public_key
##########################################################
# Disabling warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env.
urllib3.disable_warnings()
##########################################################
logger = logging.getLogger("NeoLogger")
CREDENTIALS_CREATE_TIMEOUT = "30s"
NEOFS_EXEC = os.getenv("NEOFS_EXEC", "neofs-authmate")
# Artificial delay that we add after object deletion and container creation
# Delay is added because sometimes immediately after deletion object still appears
# to be existing (probably because tombstone object takes some time to replicate)
# TODO: remove after https://github.com/nspcc-dev/neofs-s3-gw/issues/610 is fixed
S3_SYNC_WAIT_TIME = 5
class VersioningStatus(Enum):
ENABLED = "Enabled"
SUSPENDED = "Suspended"
@allure.step("Init S3 Credentials")
def init_s3_credentials(wallet_path, s3_bearer_rules_file: Optional[str] = None):
bucket = str(uuid.uuid4())
s3_bearer_rules = s3_bearer_rules_file or "robot/resources/files/s3_bearer_rules.json"
gate_public_key = get_wallet_public_key(S3_GATE_WALLET_PATH, S3_GATE_WALLET_PASS)
cmd = (
f"{NEOFS_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} "
f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} "
f"--peer {NEOFS_ENDPOINT} --container-friendly-name {bucket} "
f"--bearer-rules {s3_bearer_rules}"
)
logger.info(f"Executing command: {cmd}")
try:
output = _run_with_passwd(cmd)
logger.info(f"Command completed with output: {output}")
# output contains some debug info and then several JSON structures, so we find each
# JSON structure by curly brackets (naive approach, but works while JSON is not nested)
# and then we take JSON containing secret_access_key
json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL)
for json_block in json_blocks:
try:
parsed_json_block = json.loads(json_block)
if "secret_access_key" in parsed_json_block:
return (
parsed_json_block["container_id"],
bucket,
parsed_json_block["access_key_id"],
parsed_json_block["secret_access_key"],
parsed_json_block["owner_private_key"],
)
except json.JSONDecodeError:
raise AssertionError(f"Could not parse info from output\n{output}")
raise AssertionError(f"Could not find AWS credentials in output:\n{output}")
except Exception as exc:
raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc
@allure.step("Config S3 client")
def config_s3_client(access_key_id: str, secret_access_key: str):
try:
session = boto3.session.Session()
s3_client = session.client(
service_name="s3",
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
endpoint_url=S3_GATE,
verify=False,
)
return s3_client
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Create bucket S3")
def create_bucket_s3(s3_client, object_lock_enabled_for_bucket: Optional[bool] = None):
bucket_name = str(uuid.uuid4())
try:
params = {"Bucket": bucket_name}
if object_lock_enabled_for_bucket is not None:
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
s3_bucket = s3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket_name}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME)
return bucket_name
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List buckets S3")
def list_buckets_s3(s3_client):
found_buckets = []
try:
response = s3_client.list_buckets()
log_command_execution("S3 List buckets result", response)
for bucket in response["Buckets"]:
found_buckets.append(bucket["Name"])
return found_buckets
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete bucket S3")
def delete_bucket_s3(s3_client, bucket: str):
try:
response = s3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
log_command_execution("S3 Delete bucket error", str(err))
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Head bucket S3")
def head_bucket(s3_client, bucket: str):
try:
response = s3_client.head_bucket(Bucket=bucket)
log_command_execution("S3 Head bucket result", response)
return response
except ClientError as err:
log_command_execution("S3 Head bucket error", str(err))
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Set bucket versioning status")
def set_bucket_versioning(s3_client, bucket_name: str, status: VersioningStatus) -> None:
try:
response = s3_client.put_bucket_versioning(
Bucket=bucket_name, VersioningConfiguration={"Status": status.value}
)
log_command_execution("S3 Set bucket versioning to", response)
except ClientError as err:
raise Exception(f"Got error during set bucket versioning: {err}") from err
@allure.step("Get bucket versioning status")
def get_bucket_versioning_status(s3_client, bucket_name: str) -> str:
try:
response = s3_client.get_bucket_versioning(Bucket=bucket_name)
status = response.get("Status")
log_command_execution("S3 Got bucket versioning status", response)
return status
except ClientError as err:
raise Exception(f"Got error during get bucket versioning status: {err}") from err
@allure.step("Put bucket tagging")
def put_bucket_tagging(s3_client, bucket_name: str, tags: list):
try:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
response = s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tagging)
log_command_execution("S3 Put bucket tagging", response)
except ClientError as err:
raise Exception(f"Got error during put bucket tagging: {err}") from err
@allure.step("Get bucket tagging")
def get_bucket_tagging(s3_client, bucket_name: str) -> list:
try:
response = s3_client.get_bucket_tagging(Bucket=bucket_name)
log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet")
except ClientError as err:
raise Exception(f"Got error during get bucket tagging: {err}") from err
@allure.step("Delete bucket tagging")
def delete_bucket_tagging(s3_client, bucket_name: str) -> None:
try:
response = s3_client.delete_bucket_tagging(Bucket=bucket_name)
log_command_execution("S3 Delete bucket tagging", response)
except ClientError as err:
raise Exception(f"Got error during delete bucket tagging: {err}") from err

View file

@ -0,0 +1,467 @@
#!/usr/bin/python3.9
import logging
import os
import uuid
from enum import Enum
from time import sleep
from typing import Optional
import allure
import urllib3
from botocore.exceptions import ClientError
from cli_helpers import log_command_execution
from steps.aws_cli_client import AwsCliClient
from steps.s3_gate_bucket import S3_SYNC_WAIT_TIME
##########################################################
# Disabling warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env.
urllib3.disable_warnings()
##########################################################
logger = logging.getLogger("NeoLogger")
CREDENTIALS_CREATE_TIMEOUT = "30s"
ACL_COPY = [
"private",
"public-read",
"public-read-write",
"authenticated-read",
"aws-exec-read",
"bucket-owner-read",
"bucket-owner-full-control",
]
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@allure.step("List objects S3 v2")
def list_objects_s3_v2(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_objects_v2(Bucket=bucket)
content = response.get("Contents", [])
log_command_execution("S3 v2 List objects result", response)
obj_list = []
for obj in content:
obj_list.append(obj["Key"])
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List objects S3")
def list_objects_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_objects(Bucket=bucket)
content = response.get("Contents", [])
log_command_execution("S3 List objects result", response)
obj_list = []
for obj in content:
obj_list.append(obj["Key"])
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List objects versions S3")
def list_objects_versions_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_object_versions(Bucket=bucket)
versions = response.get("Versions", [])
log_command_execution("S3 List objects versions result", response)
return response if full_output else versions
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object S3")
def put_object_s3(s3_client, bucket: str, filepath: str, **kwargs):
filename = os.path.basename(filepath)
if isinstance(s3_client, AwsCliClient):
file_content = filepath
else:
with open(filepath, "rb") as put_file:
file_content = put_file.read()
try:
params = {"Body": file_content, "Bucket": bucket, "Key": filename}
if kwargs:
params = {**params, **kwargs}
response = s3_client.put_object(**params)
log_command_execution("S3 Put object result", response)
return response.get("VersionId")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Head object S3")
def head_object_s3(s3_client, bucket: str, object_key: str, version_id: Optional[str] = None):
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
response = s3_client.head_object(**params)
log_command_execution("S3 Head object result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete object S3")
def delete_object_s3(
s3_client, bucket: str, object_key: str, version_id: Optional[str] = None
) -> dict:
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
response = s3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete objects S3")
def delete_objects_s3(s3_client, bucket: str, object_keys: list):
try:
response = s3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(object_keys))
log_command_execution("S3 Delete objects result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete object versions S3")
def delete_object_versions_s3(s3_client, bucket: str, object_versions: list):
try:
# Build deletion list in S3 format
delete_list = {
"Objects": [
{
"Key": object_version["Key"],
"VersionId": object_version["VersionId"],
}
for object_version in object_versions
]
}
response = s3_client.delete_objects(Bucket=bucket, Delete=delete_list)
log_command_execution("S3 Delete objects result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Get object ACL")
def get_object_acl_s3(s3_client, bucket: str, object_key: str, version_id: Optional[str] = None):
params = {"Bucket": bucket, "Key": object_key}
try:
if version_id:
params.update({"VersionId": version_id})
response = s3_client.get_object_acl(**params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Copy object S3")
def copy_object_s3(
s3_client, bucket: str, object_key: str, bucket_dst: Optional[str] = None, **kwargs
):
filename = f"{os.getcwd()}/{uuid.uuid4()}"
try:
params = {
"Bucket": bucket_dst or bucket,
"CopySource": f"{bucket}/{object_key}",
"Key": filename,
}
if "ACL" in kwargs and kwargs["ACL"] in ACL_COPY:
params.update({"ACL": kwargs["ACL"]})
if "metadata_directive" in kwargs.keys():
params.update({"MetadataDirective": kwargs["metadata_directive"]})
if "metadata_directive" in kwargs.keys() and "metadata" in kwargs.keys():
params.update({"Metadata": kwargs["metadata"]})
if "tagging_directive" in kwargs.keys():
params.update({"TaggingDirective": kwargs["tagging_directive"]})
if "tagging_directive" in kwargs.keys() and "tagging" in kwargs.keys():
params.update({"Tagging": kwargs["tagging"]})
response = s3_client.copy_object(**params)
log_command_execution("S3 Copy objects result", response)
return filename
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Get object S3")
def get_object_s3(
s3_client,
bucket: str,
object_key: str,
version_id: Optional[str] = None,
range: Optional[list] = None,
full_output: bool = False,
):
filename = f"{ASSETS_DIR}/{uuid.uuid4()}"
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
if isinstance(s3_client, AwsCliClient):
params["file_path"] = filename
if range:
params["Range"] = f"bytes={range[0]}-{range[1]}"
response = s3_client.get_object(**params)
log_command_execution("S3 Get objects result", response)
if not isinstance(s3_client, AwsCliClient):
with open(f"{filename}", "wb") as get_file:
chunk = response["Body"].read(1024)
while chunk:
get_file.write(chunk)
chunk = response["Body"].read(1024)
return response if full_output else filename
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Create multipart upload S3")
def create_multipart_upload_s3(s3_client, bucket_name: str, object_key: str) -> str:
try:
response = s3_client.create_multipart_upload(Bucket=bucket_name, Key=object_key)
log_command_execution("S3 Created multipart upload", response)
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
return response.get("UploadId")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List multipart uploads S3")
def list_multipart_uploads_s3(s3_client, bucket_name: str) -> Optional[list[dict]]:
try:
response = s3_client.list_multipart_uploads(Bucket=bucket_name)
log_command_execution("S3 List multipart upload", response)
return response.get("Uploads")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Abort multipart upload S3")
def abort_multipart_uploads_s3(s3_client, bucket_name: str, object_key: str, upload_id: str):
try:
response = s3_client.abort_multipart_upload(
Bucket=bucket_name, Key=object_key, UploadId=upload_id
)
log_command_execution("S3 Abort multipart upload", response)
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Upload part S3")
def upload_part_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, part_num: int, filepath: str
) -> str:
if isinstance(s3_client, AwsCliClient):
file_content = filepath
else:
with open(filepath, "rb") as put_file:
file_content = put_file.read()
try:
response = s3_client.upload_part(
UploadId=upload_id,
Bucket=bucket_name,
Key=object_key,
PartNumber=part_num,
Body=file_content,
)
log_command_execution("S3 Upload part", response)
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response.get("ETag")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List parts S3")
def list_parts_s3(s3_client, bucket_name: str, object_key: str, upload_id: str) -> list[dict]:
try:
response = s3_client.list_parts(UploadId=upload_id, Bucket=bucket_name, Key=object_key)
log_command_execution("S3 List part", response)
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
return response.get("Parts")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Complete multipart upload S3")
def complete_multipart_upload_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, parts: list
):
try:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
response = s3_client.complete_multipart_upload(
Bucket=bucket_name, Key=object_key, UploadId=upload_id, MultipartUpload={"Parts": parts}
)
log_command_execution("S3 Complete multipart upload", response)
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object tagging")
def put_object_tagging(s3_client, bucket_name: str, object_key: str, tags: list):
try:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
s3_client.put_object_tagging(Bucket=bucket_name, Key=object_key, Tagging=tagging)
log_command_execution("S3 Put object tagging", str(tags))
except ClientError as err:
raise Exception(f"Got error during put object tagging: {err}") from err
@allure.step("Get object tagging")
def get_object_tagging(
s3_client, bucket_name: str, object_key: str, version_id: Optional[str] = None
) -> list:
try:
params = {"Bucket": bucket_name, "Key": object_key}
if version_id:
params.update({"VersionId": version_id})
response = s3_client.get_object_tagging(**params)
log_command_execution("S3 Get object tagging", response)
return response.get("TagSet")
except ClientError as err:
raise Exception(f"Got error during get object tagging: {err}") from err
@allure.step("Delete object tagging")
def delete_object_tagging(s3_client, bucket_name: str, object_key: str):
try:
response = s3_client.delete_object_tagging(Bucket=bucket_name, Key=object_key)
log_command_execution("S3 Delete object tagging", response)
except ClientError as err:
raise Exception(f"Got error during delete object tagging: {err}") from err
@allure.step("Get object attributes")
def get_object_attributes(
s3_client,
bucket_name: str,
object_key: str,
*attributes: str,
version_id: Optional[str] = None,
max_parts: Optional[int] = None,
part_number: Optional[int] = None,
get_full_resp: bool = True,
) -> dict:
try:
if not isinstance(s3_client, AwsCliClient):
logger.warning("Method get_object_attributes is not supported by boto3 client")
return {}
response = s3_client.get_object_attributes(
bucket_name,
object_key,
*attributes,
VersionId=version_id,
max_parts=max_parts,
part_number=part_number,
)
log_command_execution("S3 Get object attributes", response)
for attr in attributes:
assert attr in response, f"Expected attribute {attr} in {response}"
if get_full_resp:
return response
else:
return response.get(attributes[0])
except ClientError as err:
raise Exception(f"Got error during get object attributes: {err}") from err
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {"Key": key}
objs_list.append(obj_dict)
objs_dict = {"Objects": objs_list}
return objs_dict

View file

@ -0,0 +1,586 @@
import logging
import os
from random import choice, choices, randrange
from time import sleep
import allure
import pytest
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from epoch import tick_epoch
from python_keywords.container import list_containers
from python_keywords.utility_keywords import (
generate_file,
generate_file_and_file_hash,
get_file_hash,
)
from s3_helper import (
check_objects_in_bucket,
set_bucket_versioning,
try_to_get_objects_and_expect_error,
)
from steps import s3_gate_bucket, s3_gate_object
from steps.aws_cli_client import AwsCliClient
from steps.s3_gate_base import TestS3GateBase
from utility import create_file_with_content, get_file_content, split_file
logger = logging.getLogger("NeoLogger")
def pytest_generate_tests(metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
@allure.link("https://github.com/nspcc-dev/neofs-s3-gw#neofs-s3-gateway", name="neofs-s3-gateway")
@pytest.mark.s3_gate
class TestS3Gate(TestS3GateBase):
@pytest.fixture
@allure.title("Create two buckets")
def create_buckets(self):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
return bucket_1, bucket_2
@pytest.fixture
@allure.title("Create/delete bucket")
def bucket(self):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
yield bucket
# Delete all objects from bucket
versioning_status = s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
if versioning_status == s3_gate_bucket.VersioningStatus.ENABLED.value:
# From versioned bucket we should delete all versions of all objects
objects_versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
if objects_versions:
s3_gate_object.delete_object_versions_s3(self.s3_client, bucket, objects_versions)
else:
# From non-versioned bucket it's sufficient to delete objects by key
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
if objects:
s3_gate_object.delete_objects_s3(self.s3_client, bucket, objects)
# Delete the bucket itself
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)
@allure.title("Test S3 Bucket API")
def test_s3_buckets(self):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file()
file_name = self.object_key_from_file_path(file_path)
with allure.step("Create buckets"):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step("Check buckets are presented in the system"):
# We have an issue that sometimes bucket is not available in the list
# immediately after creation, so we take several attempts with sleep
# TODO: remove after https://github.com/nspcc-dev/neofs-s3-gw/issues/628 is fixed
buckets = []
for attempt in range(8):
with allure.step(f"Loading buckets list (attempt #{attempt})"):
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
if bucket_1 in buckets and bucket_2 in buckets:
break # If both buckets are in the list, stop attempts
with allure.step(f"Buckets were not in the list, waiting before retry"):
sleep(s3_gate_bucket.S3_SYNC_WAIT_TIME)
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 in buckets, f"Expected bucket {bucket_2} is in the list"
with allure.step("Bucket must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Check buckets are visible with S3 head command"):
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
with allure.step("Check we can put/list object with S3 commands"):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket_1, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket_1)
assert (
file_name in bucket_objects
), f"Expected file {file_name} in objects list {bucket_objects}"
with allure.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
with allure.step(f"Delete empty bucket {bucket_2}"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_2)
tick_epoch()
with allure.step(f"Check bucket {bucket_2} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 not in buckets, f"Expected bucket {bucket_2} is not in the list"
@allure.title("Test S3 Object API")
@pytest.mark.sanity
@pytest.mark.parametrize(
"file_type", ["simple", "large"], ids=["Simple object", "Large object"]
)
def test_s3_api_object(self, file_type):
"""
Test base S3 Object API (Put/Head/List) for simple and large objects.
"""
file_path = generate_file(SIMPLE_OBJ_SIZE if file_type == "simple" else COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path)
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
for bucket in (bucket_1, bucket_2):
with allure.step("Bucket must be empty"):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert (
file_name in bucket_objects
), f"Expected file {file_name} in objects list {bucket_objects}"
with allure.step("Check object's attributes"):
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_gate_object.get_object_attributes(self.s3_client, bucket, file_name, *attrs)
@allure.title("Test S3 Sync directory")
def test_s3_sync_dir(self, bucket):
"""
Test checks sync directory with AWS CLI utility.
"""
file_path_1 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_1"
file_path_2 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_2"
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
if not isinstance(self.s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
create_file_with_content(file_path=file_path_1)
create_file_with_content(file_path=file_path_2)
self.s3_client.sync(bucket_name=bucket, dir_path=os.path.dirname(file_path_1))
with allure.step("Check objects are synced"):
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
with allure.step("Check these are the same objects"):
assert set(key_to_path.keys()) == set(
objects
), f"Expected all abjects saved. Got {objects}"
for obj_key in objects:
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
@allure.title("Test S3 Object versioning")
def test_s3_api_versioning(self, bucket):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_simple)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
create_file_with_content(file_path=file_name_simple, content=version_2_content)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
with allure.step("Check bucket shows all versions"):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with allure.step("Show information about particular version"):
for version_id in (version_id_1, version_id_2):
response = s3_gate_object.head_object_s3(
self.s3_client, bucket, obj_key, version_id=version_id
)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert (
response.get("VersionId") == version_id
), f"Expected VersionId is {version_id}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with allure.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_gate_object.get_object_attributes(
self.s3_client, bucket, obj_key, "ETag", version_id=version_id
)
if got_attrs:
assert (
got_attrs.get("VersionId") == version_id
), f"Expected VersionId is {version_id}"
with allure.step("Delete object and check it was deleted"):
response = s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key)
version_id_delete = response.get("VersionId")
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_gate_object.head_object_s3(self.s3_client, bucket, obj_key)
with allure.step("Get content for all versions and check it is correct"):
for version, content in (
(version_id_2, version_2_content),
(version_id_1, version_1_content),
):
file_name = s3_gate_object.get_object_s3(
self.s3_client, bucket, obj_key, version_id=version
)
got_content = get_file_content(file_name)
assert (
got_content == content
), f"Expected object content is\n{content}\nGot\n{got_content}"
with allure.step("Restore previous object version"):
s3_gate_object.delete_object_s3(
self.s3_client, bucket, obj_key, version_id=version_id_delete
)
file_name = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
got_content = get_file_content(file_name)
assert (
got_content == version_2_content
), f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
@allure.title("Test S3 Object Multipart API")
def test_s3_api_multipart(self, bucket):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
"""
parts_count = 3
file_name_large, _ = generate_file_and_file_hash(
SIMPLE_OBJ_SIZE * 1024 * 6 * parts_count
) # 5Mb - min part
# file_name_large, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE * 1024 * 30 * parts_count) # 5Mb - min part
object_key = self.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Create and abort multipart upload"):
upload_id = s3_gate_object.create_multipart_upload_s3(
self.s3_client, bucket, object_key
)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert uploads, f"Expected there one upload in bucket {bucket}"
assert (
uploads[0].get("Key") == object_key
), f"Expected correct key {object_key} in upload {uploads}"
assert (
uploads[0].get("UploadId") == upload_id
), f"Expected correct UploadId {upload_id} in upload {uploads}"
s3_gate_object.abort_multipart_uploads_s3(self.s3_client, bucket, object_key, upload_id)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Create new multipart upload and upload several parts"):
upload_id = s3_gate_object.create_multipart_upload_s3(
self.s3_client, bucket, object_key
)
for part_id, file_path in enumerate(part_files, start=1):
etag = s3_gate_object.upload_part_s3(
self.s3_client, bucket, object_key, upload_id, part_id, file_path
)
parts.append((part_id, etag))
with allure.step("Check all parts are visible in bucket"):
got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id)
assert len(got_parts) == len(
part_files
), f"Expected {parts_count} parts, got\n{got_parts}"
s3_gate_object.complete_multipart_upload_s3(
self.s3_client, bucket, object_key, upload_id, parts
)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Check we can get whole object from bucket"):
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
self.check_object_attributes(bucket, object_key, parts_count)
@allure.title("Test S3 Bucket tagging API")
def test_s3_api_bucket_tagging(self, bucket):
"""
Test checks S3 Bucket tagging API (Put tag/Get tag).
"""
key_value_pair = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair)
got_tags = s3_gate_bucket.get_bucket_tagging(self.s3_client, bucket)
with allure.step("Check all tags are presented"):
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": key, "Value": value} for key, value in key_value_pair]
for tag in expected_tags:
assert tag in got_tags
s3_gate_bucket.delete_bucket_tagging(self.s3_client, bucket)
tags = s3_gate_bucket.get_bucket_tagging(self.s3_client, bucket)
assert not tags, f"Expected there is no tags for bucket {bucket}, got {tags}"
@allure.title("Test S3 Object tagging API")
def test_s3_api_object_tagging(self, bucket):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
key_value_pair_bucket = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
key_value_pair_obj = [
("some-key-obj", "some-value-obj"),
("some-key--obj2", "some-value--obj2"),
]
key_value_pair_obj_new = [("some-key-obj-new", "some-value-obj-new")]
file_name_simple, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE)
obj_key = self.object_key_from_file_path(file_name_simple)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair_bucket)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
for tags in (key_value_pair_obj, key_value_pair_obj_new):
s3_gate_object.put_object_tagging(self.s3_client, bucket, obj_key, tags)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, obj_key)
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": key, "Value": value} for key, value in tags]
for tag in expected_tags:
assert tag in got_tags
s3_gate_object.delete_object_tagging(self.s3_client, bucket, obj_key)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, obj_key)
assert not got_tags, f"Expected there is no tags for bucket {bucket}, got {got_tags}"
@allure.title("Test S3: Delete object & delete objects S3 API")
def test_s3_api_delete(self, create_buckets):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE]
bucket_1, bucket_2 = create_buckets
with allure.step(f"Generate {max_obj_count} files"):
for _ in range(max_obj_count):
file_paths.append(generate_file_and_file_hash(choice(obj_sizes))[0])
for bucket in (bucket_1, bucket_2):
with allure.step(f"Bucket {bucket} must be empty as it just created"):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
for file_path in file_paths:
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
put_objects.append(self.object_key_from_file_path(file_path))
with allure.step(f"Check all objects put in bucket {bucket} successfully"):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
assert set(put_objects) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
with allure.step("Delete some objects from bucket_1 one by one"):
objects_to_delete_b1 = choices(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, obj)
with allure.step("Check deleted objects are not visible in bucket bucket_1"):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
try_to_get_objects_and_expect_error(bucket_1, objects_to_delete_b1)
with allure.step("Delete some objects from bucket_2 at once"):
objects_to_delete_b2 = choices(put_objects, k=max_delete_objects)
s3_gate_object.delete_objects_s3(self.s3_client, bucket_2, objects_to_delete_b2)
with allure.step("Check deleted objects are not visible in bucket bucket_2"):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(
objects_list
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
try_to_get_objects_and_expect_error(bucket_2, objects_to_delete_b2)
@allure.title("Test S3: Copy object to the same bucket")
def test_s3_copy_same_bucket(self):
"""
Test object can be copied to the same bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step("Bucket must be empty"):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put objects into bucket"):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
with allure.step("Copy one object into the same bucket"):
copy_obj_path = s3_gate_object.copy_object_s3(self.s3_client, bucket, file_name_simple)
bucket_objects.append(copy_obj_path)
check_objects_in_bucket(bucket, bucket_objects)
with allure.step("Check copied object has the same content"):
got_copied_file = s3_gate_object.get_object_s3(self.s3_client, bucket, copy_obj_path)
assert get_file_hash(file_path_simple) == get_file_hash(
got_copied_file
), "Hashes must be the same"
with allure.step("Delete one object from bucket"):
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_simple)
bucket_objects.remove(file_name_simple)
check_objects_in_bucket(
bucket, expected_objects=bucket_objects, unexpected_objects=[file_name_simple]
)
@allure.title("Test S3: Copy object to another bucket")
def test_s3_copy_to_another_bucket(self):
"""
Test object can be copied to another bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step("Buckets must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put objects into one bucket"):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
with allure.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, file_name_large, bucket_dst=bucket_2
)
check_objects_in_bucket(bucket_1, expected_objects=bucket_1_objects)
check_objects_in_bucket(bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Check copied object has the same content"):
got_copied_file_b2 = s3_gate_object.get_object_s3(
self.s3_client, bucket_2, copy_obj_path_b2
)
assert get_file_hash(file_path_large) == get_file_hash(
got_copied_file_b2
), "Hashes must be the same"
with allure.step("Delete one object from first bucket"):
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, file_name_simple)
bucket_1_objects.remove(file_name_simple)
check_objects_in_bucket(bucket_1, expected_objects=bucket_1_objects)
check_objects_in_bucket(bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Delete one object from second bucket and check it is empty"):
s3_gate_object.delete_object_s3(self.s3_client, bucket_2, copy_obj_path_b2)
check_objects_in_bucket(bucket_2, expected_objects=[])
def check_object_attributes(self, bucket: str, object_key: str, parts_count: int):
if not isinstance(self.s3_client, AwsCliClient):
logger.warning("Attributes check is not supported for boto3 implementation")
return
with allure.step("Check object's attributes"):
obj_parts = s3_gate_object.get_object_attributes(
self.s3_client, bucket, object_key, "ObjectParts", get_full_resp=False
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
), f"Expected TotalPartsCount is {parts_count}"
assert (
len(obj_parts.get("Parts")) == parts_count
), f"Expected Parts cunt is {parts_count}"
with allure.step("Check object's attribute max-parts"):
max_parts = 2
obj_parts = s3_gate_object.get_object_attributes(
self.s3_client,
bucket,
object_key,
"ObjectParts",
max_parts=max_parts,
get_full_resp=False,
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
), f"Expected TotalPartsCount is {parts_count}"
assert obj_parts.get("MaxParts") == max_parts, f"Expected MaxParts is {parts_count}"
assert (
len(obj_parts.get("Parts")) == max_parts
), f"Expected Parts count is {parts_count}"
with allure.step("Check object's attribute part-number-marker"):
part_number_marker = 3
obj_parts = s3_gate_object.get_object_attributes(
self.s3_client,
bucket,
object_key,
"ObjectParts",
part_number=part_number_marker,
get_full_resp=False,
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
), f"Expected TotalPartsCount is {parts_count}"
assert (
obj_parts.get("PartNumberMarker") == part_number_marker
), f"Expected PartNumberMarker is {part_number_marker}"
assert len(obj_parts.get("Parts")) == 1, f"Expected Parts count is {parts_count}"
@staticmethod
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)

View file

@ -0,0 +1,914 @@
import os
import uuid
from datetime import datetime, timedelta
from random import sample
import allure
import pytest
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE
from data_formatters import get_wallet_public_key
from python_keywords.container import list_containers
from python_keywords.payment_neogo import neofs_deposit, transfer_mainnet_gas
from python_keywords.utility_keywords import concat_files, generate_file, get_file_hash
from s3_helper import check_objects_in_bucket, set_bucket_versioning
from steps import s3_gate_bucket, s3_gate_object
from steps.aws_cli_client import AwsCliClient
from steps.s3_gate_base import TestS3GateBase
from utility import create_file_with_content
from wallet import init_wallet
def pytest_generate_tests(metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
@pytest.mark.s3_gate_object
class TestS3GateObject(TestS3GateBase):
@staticmethod
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)
@allure.title("Test S3: Copy object")
def test_s3_copy_object(self):
file_path = generate_file()
file_name = self.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket_1)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put object into one bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
with allure.step("Copy one object into the same bucket"):
copy_obj_path = s3_gate_object.copy_object_s3(self.s3_client, bucket_1, file_name)
bucket_1_objects.append(copy_obj_path)
check_objects_in_bucket(self.s3_client, bucket_1, bucket_1_objects)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket_2)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, file_name, bucket_dst=bucket_2
)
check_objects_in_bucket(self.s3_client, bucket_1, expected_objects=bucket_1_objects)
check_objects_in_bucket(self.s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Check copied object has the same content"):
got_copied_file_b2 = s3_gate_object.get_object_s3(
self.s3_client, bucket_2, copy_obj_path_b2
)
assert get_file_hash(file_path) == get_file_hash(
got_copied_file_b2
), "Hashes must be the same"
with allure.step("Delete one object from first bucket"):
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, file_name)
bucket_1_objects.remove(file_name)
check_objects_in_bucket(self.s3_client, bucket_1, expected_objects=bucket_1_objects)
check_objects_in_bucket(self.s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Copy one object into the same bucket"):
with pytest.raises(Exception):
s3_gate_object.copy_object_s3(self.s3_client, bucket_1, file_name)
@allure.title("Test S3: Copy version of object")
def test_s3_copy_version_object(self):
version_1_content = "Version 1"
file_name_simple = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_simple)
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket_1, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_name_simple)
bucket_1_objects = [obj_key]
check_objects_in_bucket(self.s3_client, bucket_1, [obj_key])
with allure.step("Copy one object into the same bucket"):
copy_obj_path = s3_gate_object.copy_object_s3(self.s3_client, bucket_1, obj_key)
bucket_1_objects.append(copy_obj_path)
check_objects_in_bucket(self.s3_client, bucket_1, bucket_1_objects)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket_2, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, obj_key, bucket_dst=bucket_2
)
check_objects_in_bucket(self.s3_client, bucket_1, expected_objects=bucket_1_objects)
check_objects_in_bucket(self.s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Delete one object from first bucket and check object in bucket"):
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, obj_key)
bucket_1_objects.remove(obj_key)
check_objects_in_bucket(self.s3_client, bucket_1, expected_objects=bucket_1_objects)
with allure.step("Copy one object into the same bucket"):
with pytest.raises(Exception):
s3_gate_object.copy_object_s3(self.s3_client, bucket_1, obj_key)
@allure.title("Test S3: Checking copy with acl")
def test_s3_copy_acl(self):
version_1_content = "Version 1"
file_name_simple = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_simple)
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket_1, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_name_simple)
check_objects_in_bucket(self.s3_client, bucket_1, [obj_key])
with allure.step("Copy object and check acl attribute"):
copy_obj_path = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, obj_key, ACL="public-read-write"
)
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket_1, copy_obj_path)
for control in obj_acl:
assert (
control.get("Permission") == "FULL_CONTROL"
), "Permission for all groups is FULL_CONTROL"
@allure.title("Test S3: Copy object with metadata")
def test_s3_copy_metadate(self):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file()
file_name = self.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket_1, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put object into bucket"):
s3_gate_object.put_object_s3(
self.s3_client, bucket_1, file_path, Metadata=object_metadata
)
bucket_1_objects = [file_name]
check_objects_in_bucket(self.s3_client, bucket_1, bucket_1_objects)
with allure.step("Copy one object"):
copy_obj_path = s3_gate_object.copy_object_s3(self.s3_client, bucket_1, file_name)
bucket_1_objects.append(copy_obj_path)
check_objects_in_bucket(self.s3_client, bucket_1, bucket_1_objects)
obj_head = s3_gate_object.head_object_s3(self.s3_client, bucket_1, copy_obj_path)
assert (
obj_head.get("Metadata") == object_metadata
), f"Metadata must be {object_metadata}"
with allure.step("Copy one object with metadata"):
copy_obj_path = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, file_name, metadata_directive="COPY"
)
bucket_1_objects.append(copy_obj_path)
obj_head = s3_gate_object.head_object_s3(self.s3_client, bucket_1, copy_obj_path)
assert (
obj_head.get("Metadata") == object_metadata
), f"Metadata must be {object_metadata}"
with allure.step("Copy one object with new metadata"):
object_metadata_1 = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
copy_obj_path = s3_gate_object.copy_object_s3(
self.s3_client,
bucket_1,
file_name,
metadata_directive="REPLACE",
metadata=object_metadata_1,
)
bucket_1_objects.append(copy_obj_path)
obj_head = s3_gate_object.head_object_s3(self.s3_client, bucket_1, copy_obj_path)
assert (
obj_head.get("Metadata") == object_metadata_1
), f"Metadata must be {object_metadata_1}"
@allure.title("Test S3: Copy object with tagging")
def test_s3_copy_tagging(self):
object_tagging = [(f"{uuid.uuid4()}", f"{uuid.uuid4()}")]
file_path = generate_file()
file_name_simple = self.object_key_from_file_path(file_path)
bucket_1_objects = [file_name_simple]
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket_1, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
version_id_1 = s3_gate_object.put_object_tagging(
self.s3_client, bucket_1, file_name_simple, tags=object_tagging
)
bucket_1_objects = [file_name_simple]
check_objects_in_bucket(self.s3_client, bucket_1, bucket_1_objects)
with allure.step("Copy one object without tag"):
copy_obj_path = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, file_name_simple
)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket_1, copy_obj_path)
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": key, "Value": value} for key, value in object_tagging]
for tag in expected_tags:
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
with allure.step("Copy one object with tag"):
copy_obj_path_1 = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, file_name_simple, tagging_directive="COPY"
)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket_1, copy_obj_path_1)
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": key, "Value": value} for key, value in object_tagging]
for tag in expected_tags:
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
with allure.step("Copy one object with new tag"):
tag_key = "tag1"
tag_value = uuid.uuid4()
new_tag = f"{tag_key}={tag_value}"
copy_obj_path = s3_gate_object.copy_object_s3(
self.s3_client,
bucket_1,
file_name_simple,
tagging_directive="REPLACE",
tagging=new_tag,
)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket_1, copy_obj_path)
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": tag_key, "Value": str(tag_value)}]
for tag in expected_tags:
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
@allure.title("Test S3: Delete version of object")
def test_s3_delete_versioning(self):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_simple)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
file_name_1 = create_file_with_content(
file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
with allure.step("Check bucket shows all versions"):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with allure.step("Delete 1 version of object"):
delete_obj = s3_gate_object.delete_object_s3(
self.s3_client, bucket, obj_key, version_id=version_id_1
)
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == {version_id_2}, f"Expected object has versions: {version_id_2}"
assert not "DeleteMarkers" in delete_obj.keys(), "Delete markes not found"
with allure.step("Delete second version of object"):
delete_obj = s3_gate_object.delete_object_s3(
self.s3_client, bucket, obj_key, version_id=version_id_2
)
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert not obj_versions, "Expected object not found"
assert not "DeleteMarkers" in delete_obj.keys(), "Delete markes not found"
with allure.step("Put new object into bucket"):
file_name_simple = generate_file(COMPLEX_OBJ_SIZE)
obj_key = os.path.basename(file_name_simple)
version_id = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
with allure.step("Delete last object"):
delete_obj = s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key)
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket, True)
assert versions.get("DeleteMarkers", None), f"Expected delete Marker"
assert "DeleteMarker" in delete_obj.keys(), f"Expected delete Marker"
@allure.title("Test S3: bulk delete version of object")
def test_s3_bulk_delete_versioning(self):
version_1_content = "Version 1"
version_2_content = "Version 2"
version_3_content = "Version 3"
version_4_content = "Version 4"
file_name_1 = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_1)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
file_name_2 = create_file_with_content(file_path=file_name_1, content=version_2_content)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_2)
file_name_3 = create_file_with_content(file_path=file_name_1, content=version_3_content)
version_id_3 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_3)
file_name_4 = create_file_with_content(file_path=file_name_1, content=version_4_content)
version_id_4 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_4)
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
with allure.step("Check bucket shows all versions"):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == version_ids, f"Expected object has versions: {version_ids}"
with allure.step("Delete two objects from bucket one by one"):
version_to_delete_b1 = sample(
[version_id_1, version_id_2, version_id_3, version_id_4], k=2
)
version_to_save = list(set(version_ids) - set(version_to_delete_b1))
for ver in version_to_delete_b1:
s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key, ver)
with allure.step("Check bucket shows all versions"):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = [
version.get("VersionId") for version in versions if version.get("Key") == obj_key
]
assert (
obj_versions.sort() == version_to_save.sort()
), f"Expected object has versions: {version_to_save}"
@allure.title("Test S3: Get versions of object")
def test_s3_get_versioning(self):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_simple)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
file_name_1 = create_file_with_content(
file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
with allure.step("Get first version of object"):
object_1 = s3_gate_object.get_object_s3(
self.s3_client, bucket, obj_key, version_id_1, full_output=True
)
assert (
object_1.get("VersionId") == version_id_1
), f"Get object with version {version_id_1}"
with allure.step("Get second version of object"):
object_2 = s3_gate_object.get_object_s3(
self.s3_client, bucket, obj_key, version_id_2, full_output=True
)
assert (
object_2.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"
with allure.step("Get object"):
object_3 = s3_gate_object.get_object_s3(
self.s3_client, bucket, obj_key, full_output=True
)
assert (
object_3.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"
@allure.title("Test S3: Get range")
def test_s3_get_range(self):
file_path = generate_file(COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path)
file_hash = get_file_hash(file_path)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
file_name_1 = create_file_with_content(file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
with allure.step("Get first version of object"):
object_1_part_1 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_1,
range=[0, int(COMPLEX_OBJ_SIZE / 3)],
)
object_1_part_2 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_1,
range=[int(COMPLEX_OBJ_SIZE / 3) + 1, 2 * int(COMPLEX_OBJ_SIZE / 3)],
)
object_1_part_3 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_1,
range=[2 * int(COMPLEX_OBJ_SIZE / 3) + 1, COMPLEX_OBJ_SIZE],
)
con_file = concat_files([object_1_part_1, object_1_part_2, object_1_part_3])
assert get_file_hash(con_file) == file_hash, "Hashes must be the same"
with allure.step("Get second version of object"):
object_2_part_1 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, version_id_2, range=[0, int(SIMPLE_OBJ_SIZE / 3)]
)
object_2_part_2 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_2,
range=[int(SIMPLE_OBJ_SIZE / 3) + 1, 2 * int(SIMPLE_OBJ_SIZE / 3)],
)
object_2_part_3 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_2,
range=[2 * int(SIMPLE_OBJ_SIZE / 3) + 1, COMPLEX_OBJ_SIZE],
)
con_file_1 = concat_files([object_2_part_1, object_2_part_2, object_2_part_3])
assert get_file_hash(con_file_1) == get_file_hash(
file_name_1
), "Hashes must be the same"
with allure.step("Get object"):
object_3_part_1 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, range=[0, int(SIMPLE_OBJ_SIZE / 3)]
)
object_3_part_2 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
range=[int(SIMPLE_OBJ_SIZE / 3) + 1, 2 * int(SIMPLE_OBJ_SIZE / 3)],
)
object_3_part_3 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
range=[2 * int(SIMPLE_OBJ_SIZE / 3) + 1, COMPLEX_OBJ_SIZE],
)
con_file = concat_files([object_3_part_1, object_3_part_2, object_3_part_3])
assert get_file_hash(con_file) == get_file_hash(file_name_1), "Hashes must be the same"
@allure.title("Test S3: Copy object with metadata")
def test_s3_head_object(self):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file(COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path, Metadata=object_metadata
)
file_name_1 = create_file_with_content(file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
with allure.step("Get head of first version of object"):
response = s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert response.get("Metadata") == {}, "Expected Metadata empty"
assert (
response.get("VersionId") == version_id_2
), f"Expected VersionId is {version_id_2}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with allure.step("Get head ob first version of object"):
response = s3_gate_object.head_object_s3(
self.s3_client, bucket, file_name, version_id=version_id_1
)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert (
response.get("Metadata") == object_metadata
), f"Expected Metadata is {object_metadata}"
assert (
response.get("VersionId") == version_id_1
), f"Expected VersionId is {version_id_1}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
@allure.title("Test S3: list of object with versions")
@pytest.mark.parametrize("list_type", ["v1", "v2"])
def test_s3_list_object(self, list_type: str):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(COMPLEX_OBJ_SIZE)
file_name_2 = self.object_key_from_file_path(file_path_2)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_1)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_2)
with allure.step("Get list of object"):
if list_type == "v1":
list_obj = s3_gate_object.list_objects_s3(self.s3_client, bucket)
elif list_type == "v2":
list_obj = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
assert len(list_obj) == 2, f"bucket have 2 objects"
assert (
list_obj.sort() == [file_name, file_name_2].sort()
), f"bucket have object key {file_name, file_name_2}"
with allure.step("Delete object"):
delete_obj = s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name)
if list_type == "v1":
list_obj_1 = s3_gate_object.list_objects_s3(
self.s3_client, bucket, full_output=True
)
elif list_type == "v2":
list_obj_1 = s3_gate_object.list_objects_s3_v2(
self.s3_client, bucket, full_output=True
)
contents = list_obj_1.get("Contents", [])
assert len(contents) == 1, f"bucket have only 1 object"
assert contents[0].get("Key") == file_name_2, f"bucket has object key {file_name_2}"
assert "DeleteMarker" in delete_obj.keys(), f"Expected delete Marker"
@allure.title("Test S3: put object")
def test_s3_put_object(self):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path_1)
object_1_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_1 = "tag1"
tag_value_1 = uuid.uuid4()
tag_1 = f"{tag_key_1}={tag_value_1}"
object_2_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_2 = "tag2"
tag_value_2 = uuid.uuid4()
tag_2 = f"{tag_key_2}={tag_value_2}"
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(
self.s3_client, bucket, s3_gate_bucket.VersioningStatus.SUSPENDED.value
)
with allure.step("Put first object into bucket"):
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_1, Metadata=object_1_metadata, Tagging=tag_1
)
obj_head = s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
assert obj_head.get("Metadata") == object_1_metadata, "Matadata must be the same"
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, file_name)
assert got_tags, f"Expected tags, got {got_tags}"
assert got_tags == [
{"Key": tag_key_1, "Value": str(tag_value_1)}
], "Tags must be the same"
with allure.step("Rewrite file into bucket"):
file_path_2 = create_file_with_content(file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_2, Metadata=object_2_metadata, Tagging=tag_2
)
obj_head = s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
assert obj_head.get("Metadata") == object_2_metadata, "Matadata must be the same"
got_tags_1 = s3_gate_object.get_object_tagging(self.s3_client, bucket, file_name)
assert got_tags_1, f"Expected tags, got {got_tags_1}"
assert got_tags_1 == [
{"Key": tag_key_2, "Value": str(tag_value_2)}
], "Tags must be the same"
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
file_path_3 = generate_file(COMPLEX_OBJ_SIZE)
file_hash = get_file_hash(file_path_3)
file_name_3 = self.object_key_from_file_path(file_path_3)
object_3_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_3 = "tag3"
tag_value_3 = uuid.uuid4()
tag_3 = f"{tag_key_3}={tag_value_3}"
with allure.step("Put third object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_3, Metadata=object_3_metadata, Tagging=tag_3
)
obj_head_3 = s3_gate_object.head_object_s3(self.s3_client, bucket, file_name_3)
assert obj_head_3.get("Metadata") == object_3_metadata, "Matadata must be the same"
got_tags_3 = s3_gate_object.get_object_tagging(self.s3_client, bucket, file_name_3)
assert got_tags_3, f"Expected tags, got {got_tags_3}"
assert got_tags_3 == [
{"Key": tag_key_3, "Value": str(tag_value_3)}
], "Tags must be the same"
with allure.step("Put new version of file into bucket"):
file_path_4 = create_file_with_content(file_path=file_path_3)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_4)
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {
version.get("VersionId")
for version in versions
if version.get("Key") == file_name_3
}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with allure.step("Get object"):
object_3 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name_3, full_output=True
)
assert (
object_3.get("VersionId") == version_id_2
), f"get object with version {version_id_2}"
object_3 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name_3)
assert get_file_hash(file_path_4) == get_file_hash(object_3), "Hashes must be the same"
with allure.step("Get first version of object"):
object_4 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name_3, version_id_1, full_output=True
)
assert (
object_4.get("VersionId") == version_id_1
), f"get object with version {version_id_1}"
object_4 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name_3, version_id_1
)
assert file_hash == get_file_hash(object_4), "Hashes must be the same"
obj_head_3 = s3_gate_object.head_object_s3(
self.s3_client, bucket, file_name_3, version_id_1
)
assert obj_head_3.get("Metadata") == object_3_metadata, "Matadata must be the same"
got_tags_3 = s3_gate_object.get_object_tagging(
self.s3_client, bucket, file_name_3, version_id_1
)
assert got_tags_3, f"Expected tags, got {got_tags_3}"
assert got_tags_3 == [
{"Key": tag_key_3, "Value": str(tag_value_3)}
], "Tags must be the same"
@pytest.fixture
def prepare_two_wallets(self, prepare_wallet_and_deposit):
self.main_wallet = prepare_wallet_and_deposit
self.main_public_key = get_wallet_public_key(self.main_wallet, "")
self.other_wallet, _, _ = init_wallet(ASSETS_DIR)
self.other_public_key = get_wallet_public_key(self.other_wallet, "")
if not FREE_STORAGE:
transfer_mainnet_gas(self.other_wallet, 31)
neofs_deposit(self.other_wallet, 30)
@allure.title("Test S3: put object with ACL")
@pytest.mark.parametrize("bucket_versioning", ["ENABLED", "SUSPENDED"])
def test_s3_put_object_acl(self, prepare_two_wallets, bucket_versioning):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path_1)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
if bucket_versioning == "ENABLED":
status = s3_gate_bucket.VersioningStatus.ENABLED
elif bucket_versioning == "SUSPENDED":
status = s3_gate_bucket.VersioningStatus.SUSPENDED
set_bucket_versioning(self.s3_client, bucket, status)
with allure.step("Put object with acl private"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_1, ACL="private")
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
obj_permission = [permission.get("Permission") for permission in obj_acl]
assert obj_permission == ["FULL_CONTROL"], "Permission for all groups is FULL_CONTROL"
object_1 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name)
assert get_file_hash(file_path_1) == get_file_hash(object_1), "Hashes must be the same"
with allure.step("Put object with acl public-read"):
file_path_2 = create_file_with_content(file_path=file_path_1)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_2, ACL="public-read")
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
obj_permission = [permission.get("Permission") for permission in obj_acl]
assert obj_permission == [
"FULL_CONTROL",
"FULL_CONTROL",
], "Permission for all groups is FULL_CONTROL"
object_2 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name)
assert get_file_hash(file_path_2) == get_file_hash(object_2), "Hashes must be the same"
with allure.step("Put object with acl public-read-write"):
file_path_3 = create_file_with_content(file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_3, ACL="public-read-write"
)
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
obj_permission = [permission.get("Permission") for permission in obj_acl]
assert obj_permission == [
"FULL_CONTROL",
"FULL_CONTROL",
], "Permission for all groups is FULL_CONTROL"
object_3 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name)
assert get_file_hash(file_path_3) == get_file_hash(object_3), "Hashes must be the same"
with allure.step("Put object with acl authenticated-read"):
file_path_4 = create_file_with_content(file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_4, ACL="authenticated-read"
)
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
obj_permission = [permission.get("Permission") for permission in obj_acl]
assert obj_permission == [
"FULL_CONTROL",
"FULL_CONTROL",
], "Permission for all groups is FULL_CONTROL"
object_4 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name)
assert get_file_hash(file_path_4) == get_file_hash(object_4), "Hashes must be the same"
file_path_5 = generate_file(COMPLEX_OBJ_SIZE)
file_hash = get_file_hash(file_path_5)
file_name_5 = self.object_key_from_file_path(file_path_5)
with allure.step("Put object with --grant-full-control id=mycanonicaluserid"):
file_path_6 = create_file_with_content(file_path=file_path_5)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
file_path_6,
GrantFullControl=f"'id={self.other_public_key}'",
)
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name_5)
obj_permission = [permission.get("Permission") for permission in obj_acl]
assert obj_permission == [
"FULL_CONTROL",
"FULL_CONTROL",
], "Permission for all groups is FULL_CONTROL"
object_4 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name_5)
assert get_file_hash(file_path_5) == get_file_hash(object_4), "Hashes must be the same"
with allure.step(
"Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
file_path_7 = create_file_with_content(file_path=file_path_5)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
file_path_7,
GrantRead="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name_5)
obj_permission = [permission.get("Permission") for permission in obj_acl]
assert obj_permission == [
"FULL_CONTROL",
"FULL_CONTROL",
], "Permission for all groups is FULL_CONTROL"
object_7 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name_5)
assert get_file_hash(file_path_7) == get_file_hash(object_7), "Hashes must be the same"
@allure.title("Test S3: put object with lock-mode")
def test_s3_put_object_lock_mode(self):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path_1)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put object with lock-mode"):
with pytest.raises(
Exception,
match=r".*fetch time to epoch: time '0001-01-01T00:00:00Z' must be in the future.*",
):
# An error occurred (InternalError) when calling the PutObject operation (reached max retries: 2):
# fetch time to epoch: time '0001-01-01T00:00:00Z' must be in the future (after 2022-09-15T08:59:30Z)
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_1, ObjectLockMode="COMPLIANCE"
)
with allure.step(
"Put object with lock-mode GOVERNANCE lock-retain-until-date +1day, lock-legal-hold-status"
):
date_obj = datetime.utcnow() + timedelta(days=1)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
file_path_1,
ObjectLockMode="GOVERNANCE",
ObjectLockRetainUntilDate=date_obj.strftime("%Y-%m-%dT%H:%M:%S"),
ObjectLockLegalHoldStatus="OFF",
)
object_4 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, full_output=True
)
assert (
object_4.get("ObjectLockMode") == "GOVERNANCE"
), "Expected Object Lock Mode is GOVERNANCE"
assert str(date_obj.strftime("%Y-%m-%dT%H:%M:%S")) in object_4.get(
"ObjectLockRetainUntilDate"
), f'Expected Object Lock Retain Until Date is {str(date_obj.strftime("%Y-%m-%dT%H:%M:%S"))}'
assert (
object_4.get("ObjectLockLegalHoldStatus") == "OFF"
), "Expected Object Lock Legal Hold Status is OFF"
with allure.step(
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"
):
date_obj = datetime.utcnow() + timedelta(days=2)
file_name_1 = create_file_with_content(file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
file_path_1,
ObjectLockMode="COMPLIANCE",
ObjectLockRetainUntilDate=date_obj,
)
object_4 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, full_output=True
)
assert (
object_4.get("ObjectLockMode") == "GOVERNANCE"
), "Expected Object Lock Mode is GOVERNANCE"
assert str(date_obj.strftime("%Y-%m-%dT%H:%M:%S")) in object_4.get(
"ObjectLockRetainUntilDate"
), f'Expected Object Lock Retain Until Date is {str(date_obj.strftime("%Y-%m-%dT%H:%M:%S"))}'
assert (
object_4.get("ObjectLockLegalHoldStatus") == "OFF"
), "Expected Object Lock Legal Hold Status is OFF"
with allure.step(
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"
):
date_obj = datetime.utcnow() + timedelta(days=3)
file_name_1 = create_file_with_content(file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
file_path_1,
ObjectLockMode="COMPLIANCE",
ObjectLockRetainUntilDate=date_obj,
ObjectLockLegalHoldStatus="ON",
)
object_4 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, full_output=True
)
assert (
object_4.get("ObjectLockMode") == "GOVERNANCE"
), "Expected Object Lock Mode is GOVERNANCE"
assert str(date_obj.strftime("%Y-%m-%dT%H:%M:%S")) in object_4.get(
"ObjectLockRetainUntilDate"
), f'Expected Object Lock Retain Until Date is {str(date_obj.strftime("%Y-%m-%dT%H:%M:%S"))}'
assert (
object_4.get("ObjectLockLegalHoldStatus") == "ON"
), "Expected Object Lock Legal Hold Status is ON"
@allure.title("Test S3 Sync directory")
@pytest.mark.parametrize("sync_type", ["sync", "cp"])
def test_s3_sync_dir(self, sync_type):
file_path_1 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_1"
file_path_2 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_2"
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
if not isinstance(self.s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
create_file_with_content(file_path=file_path_1)
create_file_with_content(file_path=file_path_2)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
if sync_type == "sync":
self.s3_client.sync(
bucket_name=bucket,
dir_path=os.path.dirname(file_path_1),
ACL="public-read-write",
Metadata=object_metadata,
)
elif sync_type == "cp":
self.s3_client.cp(
bucket_name=bucket,
dir_path=os.path.dirname(file_path_1),
ACL="public-read-write",
Metadata=object_metadata,
)
with allure.step("Check objects are synced"):
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert set(key_to_path.keys()) == set(
objects
), f"Expected all abjects saved. Got {objects}"
with allure.step("Check these are the same objects"):
for obj_key in objects:
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
obj_head = s3_gate_object.head_object_s3(self.s3_client, bucket, obj_key)
assert (
obj_head.get("Metadata") == object_metadata
), f"Metadata of object is {object_metadata}"
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, obj_key)
obj_permission = [permission.get("Permission") for permission in obj_acl]
assert obj_permission == [
"FULL_CONTROL",
"FULL_CONTROL",
], "Permission for all groups is FULL_CONTROL"

View file

@ -1,546 +0,0 @@
import logging
import os
from random import choice, choices
from time import sleep
import allure
import pytest
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from epoch import tick_epoch
from python_keywords import s3_gate_bucket, s3_gate_object
from python_keywords.aws_cli_client import AwsCliClient
from python_keywords.container import list_containers
from python_keywords.utility_keywords import (generate_file, generate_file_and_file_hash,
get_file_hash)
from utility import create_file_with_content, get_file_content, split_file
logger = logging.getLogger('NeoLogger')
def pytest_generate_tests(metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ['aws cli', 'boto3'], indirect=True)
@allure.link('https://github.com/nspcc-dev/neofs-s3-gw#neofs-s3-gateway', name='neofs-s3-gateway')
@pytest.mark.s3_gate
class TestS3Gate:
s3_client = None
@pytest.fixture(scope='class', autouse=True)
@allure.title('[Class/Autouse]: Create S3 client')
def s3_client(self, prepare_wallet_and_deposit, request):
wallet = prepare_wallet_and_deposit
s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json"
cid, bucket, access_key_id, secret_access_key, owner_private_key = \
s3_gate_bucket.init_s3_credentials(wallet, s3_bearer_rules_file=s3_bearer_rules_file)
containers_list = list_containers(wallet)
assert cid in containers_list, f'Expected cid {cid} in {containers_list}'
if request.param == 'aws cli':
try:
client = AwsCliClient(access_key_id, secret_access_key)
except Exception as err:
if 'command was not found or was not executable' in str(err):
pytest.skip('AWS CLI was not found')
else:
raise RuntimeError('Error on creating instance for AwsCliClient') from err
else:
client = s3_gate_bucket.config_s3_client(access_key_id, secret_access_key)
TestS3Gate.s3_client = client
@pytest.fixture
@allure.title('Create two buckets')
def create_buckets(self):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
return bucket_1, bucket_2
@pytest.fixture
@allure.title('Create/delete bucket')
def bucket(self):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
yield bucket
# Delete all objects from bucket
versioning_status = s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
if versioning_status == s3_gate_bucket.VersioningStatus.ENABLED.value:
# From versioned bucket we should delete all versions of all objects
objects_versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
if objects_versions:
s3_gate_object.delete_object_versions_s3(self.s3_client, bucket, objects_versions)
else:
# From non-versioned bucket it's sufficient to delete objects by key
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
if objects:
s3_gate_object.delete_objects_s3(self.s3_client, bucket, objects)
# Delete the bucket itself
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)
@allure.title('Test S3 Bucket API')
def test_s3_buckets(self):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file()
file_name = self.object_key_from_file_path(file_path)
with allure.step('Create buckets'):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step('Check buckets are presented in the system'):
# We have an issue that sometimes bucket is not available in the list
# immediately after creation, so we take several attempts with sleep
# TODO: remove after https://github.com/nspcc-dev/neofs-s3-gw/issues/628 is fixed
buckets = []
for attempt in range(8):
with allure.step(f'Loading buckets list (attempt #{attempt})'):
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
if bucket_1 in buckets and bucket_2 in buckets:
break # If both buckets are in the list, stop attempts
with allure.step(f'Buckets were not in the list, waiting before retry'):
sleep(s3_gate_bucket.S3_SYNC_WAIT_TIME)
assert bucket_1 in buckets, f'Expected bucket {bucket_1} is in the list'
assert bucket_2 in buckets, f'Expected bucket {bucket_2} is in the list'
with allure.step('Bucket must be empty'):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
with allure.step('Check buckets are visible with S3 head command'):
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
with allure.step('Check we can put/list object with S3 commands'):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket_1, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket_1)
assert file_name in bucket_objects, \
f'Expected file {file_name} in objects list {bucket_objects}'
with allure.step('Try to delete not empty bucket and get error'):
with pytest.raises(Exception, match=r'.*The bucket you tried to delete is not empty.*'):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
with allure.step(f'Delete empty bucket {bucket_2}'):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_2)
tick_epoch()
with allure.step(f'Check bucket {bucket_2} deleted'):
with pytest.raises(Exception, match=r'.*Not Found.*'):
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
assert bucket_1 in buckets, f'Expected bucket {bucket_1} is in the list'
assert bucket_2 not in buckets, f'Expected bucket {bucket_2} is not in the list'
@allure.title('Test S3 Object API')
@pytest.mark.sanity
@pytest.mark.parametrize('file_type', ['simple', 'large'], ids=['Simple object', 'Large object'])
def test_s3_api_object(self, file_type):
"""
Test base S3 Object API (Put/Head/List) for simple and large objects.
"""
file_path = generate_file(SIMPLE_OBJ_SIZE if file_type == 'simple' else COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path)
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
for bucket in (bucket_1, bucket_2):
with allure.step('Bucket must be empty'):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert file_name in bucket_objects, \
f'Expected file {file_name} in objects list {bucket_objects}'
with allure.step("Check object's attributes"):
for attrs in (['ETag'], ['ObjectSize', 'StorageClass']):
s3_gate_object.get_object_attributes(self.s3_client, bucket, file_name, *attrs)
@allure.title('Test S3 Sync directory')
def test_s3_sync_dir(self, bucket):
"""
Test checks sync directory with AWS CLI utility.
"""
file_path_1 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_1"
file_path_2 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_2"
key_to_path = {'test_file_1': file_path_1, 'test_file_2': file_path_2}
if not isinstance(self.s3_client, AwsCliClient):
pytest.skip('This test is not supported with boto3 client')
create_file_with_content(file_path=file_path_1)
create_file_with_content(file_path=file_path_2)
self.s3_client.sync(bucket_name=bucket, dir_path=os.path.dirname(file_path_1))
with allure.step('Check objects are synced'):
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
with allure.step('Check these are the same objects'):
assert set(key_to_path.keys()) == set(objects), f'Expected all abjects saved. Got {objects}'
for obj_key in objects:
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(key_to_path.get(obj_key)), \
'Expected hashes are the same'
@allure.title('Test S3 Object versioning')
def test_s3_api_versioning(self, bucket):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = 'Version 1'
version_2_content = 'Version 2'
file_name_simple = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_simple)
with allure.step('Set versioning enable for bucket'):
s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
s3_gate_bucket.set_bucket_versioning(self.s3_client, bucket, status=s3_gate_bucket.VersioningStatus.ENABLED)
status = s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
assert status == s3_gate_bucket.VersioningStatus.ENABLED.value, f'Expected enabled status. Got {status}'
with allure.step('Put several versions of object into bucket'):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
create_file_with_content(file_path=file_name_simple, content=version_2_content)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
with allure.step('Check bucket shows all versions'):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {version.get('VersionId') for version in versions if version.get('Key') == obj_key}
assert obj_versions == {version_id_1, version_id_2}, \
f'Expected object has versions: {version_id_1, version_id_2}'
with allure.step('Show information about particular version'):
for version_id in (version_id_1, version_id_2):
response = s3_gate_object.head_object_s3(self.s3_client, bucket, obj_key, version_id=version_id)
assert 'LastModified' in response, 'Expected LastModified field'
assert 'ETag' in response, 'Expected ETag field'
assert response.get('VersionId') == version_id, f'Expected VersionId is {version_id}'
assert response.get('ContentLength') != 0, 'Expected ContentLength is not zero'
with allure.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_gate_object.get_object_attributes(self.s3_client, bucket, obj_key, 'ETag',
version_id=version_id)
if got_attrs:
assert got_attrs.get('VersionId') == version_id, f'Expected VersionId is {version_id}'
with allure.step('Delete object and check it was deleted'):
response = s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key)
version_id_delete = response.get('VersionId')
with pytest.raises(Exception, match=r'.*Not Found.*'):
s3_gate_object.head_object_s3(self.s3_client, bucket, obj_key)
with allure.step('Get content for all versions and check it is correct'):
for version, content in ((version_id_2, version_2_content), (version_id_1, version_1_content)):
file_name = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert got_content == content, f'Expected object content is\n{content}\nGot\n{got_content}'
with allure.step('Restore previous object version'):
s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key, version_id=version_id_delete)
file_name = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
got_content = get_file_content(file_name)
assert got_content == version_2_content, \
f'Expected object content is\n{version_2_content}\nGot\n{got_content}'
@allure.title('Test S3 Object Multipart API')
def test_s3_api_multipart(self, bucket):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
"""
parts_count = 3
file_name_large, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE * 1024 * 6 * parts_count) # 5Mb - min part
# file_name_large, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE * 1024 * 30 * parts_count) # 5Mb - min part
object_key = self.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f'Expected there is no uploads in bucket {bucket}'
with allure.step('Create and abort multipart upload'):
upload_id = s3_gate_object.create_multipart_upload_s3(self.s3_client, bucket, object_key)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert uploads, f'Expected there one upload in bucket {bucket}'
assert uploads[0].get('Key') == object_key, f'Expected correct key {object_key} in upload {uploads}'
assert uploads[0].get('UploadId') == upload_id, f'Expected correct UploadId {upload_id} in upload {uploads}'
s3_gate_object.abort_multipart_uploads_s3(self.s3_client, bucket, object_key, upload_id)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f'Expected there is no uploads in bucket {bucket}'
with allure.step('Create new multipart upload and upload several parts'):
upload_id = s3_gate_object.create_multipart_upload_s3(self.s3_client, bucket, object_key)
for part_id, file_path in enumerate(part_files, start=1):
etag = s3_gate_object.upload_part_s3(self.s3_client, bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with allure.step('Check all parts are visible in bucket'):
got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id)
assert len(got_parts) == len(part_files), f'Expected {parts_count} parts, got\n{got_parts}'
s3_gate_object.complete_multipart_upload_s3(self.s3_client, bucket, object_key, upload_id, parts)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f'Expected there is no uploads in bucket {bucket}'
with allure.step('Check we can get whole object from bucket'):
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
self.check_object_attributes(bucket, object_key, parts_count)
@allure.title('Test S3 Bucket tagging API')
def test_s3_api_bucket_tagging(self, bucket):
"""
Test checks S3 Bucket tagging API (Put tag/Get tag).
"""
key_value_pair = [('some-key', 'some-value'), ('some-key-2', 'some-value-2')]
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair)
got_tags = s3_gate_bucket.get_bucket_tagging(self.s3_client, bucket)
with allure.step('Check all tags are presented'):
assert got_tags, f'Expected tags, got {got_tags}'
expected_tags = [{'Key': key, 'Value': value} for key, value in key_value_pair]
for tag in expected_tags:
assert tag in got_tags
s3_gate_bucket.delete_bucket_tagging(self.s3_client, bucket)
tags = s3_gate_bucket.get_bucket_tagging(self.s3_client, bucket)
assert not tags, f'Expected there is no tags for bucket {bucket}, got {tags}'
@allure.title('Test S3 Object tagging API')
def test_s3_api_object_tagging(self, bucket):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
key_value_pair_bucket = [('some-key', 'some-value'), ('some-key-2', 'some-value-2')]
key_value_pair_obj = [('some-key-obj', 'some-value-obj'), ('some-key--obj2', 'some-value--obj2')]
key_value_pair_obj_new = [('some-key-obj-new', 'some-value-obj-new')]
file_name_simple, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE)
obj_key = self.object_key_from_file_path(file_name_simple)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair_bucket)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
for tags in (key_value_pair_obj, key_value_pair_obj_new):
s3_gate_object.put_object_tagging(self.s3_client, bucket, obj_key, tags)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, obj_key)
assert got_tags, f'Expected tags, got {got_tags}'
expected_tags = [{'Key': key, 'Value': value} for key, value in tags]
for tag in expected_tags:
assert tag in got_tags
s3_gate_object.delete_object_tagging(self.s3_client, bucket, obj_key)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, obj_key)
assert not got_tags, f'Expected there is no tags for bucket {bucket}, got {got_tags}'
@allure.title('Test S3: Delete object & delete objects S3 API')
def test_s3_api_delete(self, create_buckets):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE]
bucket_1, bucket_2 = create_buckets
with allure.step(f'Generate {max_obj_count} files'):
for _ in range(max_obj_count):
file_paths.append(generate_file_and_file_hash(choice(obj_sizes))[0])
for bucket in (bucket_1, bucket_2):
with allure.step(f'Bucket {bucket} must be empty as it just created'):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
for file_path in file_paths:
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
put_objects.append(self.object_key_from_file_path(file_path))
with allure.step(f'Check all objects put in bucket {bucket} successfully'):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
assert set(put_objects) == set(bucket_objects), \
f'Expected all objects {put_objects} in objects list {bucket_objects}'
with allure.step('Delete some objects from bucket_1 one by one'):
objects_to_delete_b1 = choices(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, obj)
with allure.step('Check deleted objects are not visible in bucket bucket_1'):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(bucket_objects), \
f'Expected all objects {put_objects} in objects list {bucket_objects}'
self.try_to_get_object_and_got_error(bucket_1, objects_to_delete_b1)
with allure.step('Delete some objects from bucket_2 at once'):
objects_to_delete_b2 = choices(put_objects, k=max_delete_objects)
s3_gate_object.delete_objects_s3(self.s3_client, bucket_2, objects_to_delete_b2)
with allure.step('Check deleted objects are not visible in bucket bucket_2'):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(objects_list), \
f'Expected all objects {put_objects} in objects list {bucket_objects}'
self.try_to_get_object_and_got_error(bucket_2, objects_to_delete_b2)
@allure.title('Test S3: Copy object to the same bucket')
def test_s3_copy_same_bucket(self):
"""
Test object can be copied to the same bucket.
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step('Bucket must be empty'):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
with allure.step('Put objects into bucket'):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
with allure.step('Copy one object into the same bucket'):
copy_obj_path = s3_gate_object.copy_object_s3(self.s3_client, bucket, file_name_simple)
bucket_objects.append(copy_obj_path)
self.check_objects_in_bucket(bucket, bucket_objects)
with allure.step('Check copied object has the same content'):
got_copied_file = s3_gate_object.get_object_s3(self.s3_client, bucket, copy_obj_path)
assert get_file_hash(file_path_simple) == get_file_hash(got_copied_file), 'Hashes must be the same'
with allure.step('Delete one object from bucket'):
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_simple)
bucket_objects.remove(file_name_simple)
self.check_objects_in_bucket(bucket, expected_objects=bucket_objects, unexpected_objects=[file_name_simple])
@allure.title('Test S3: Copy object to another bucket')
def test_s3_copy_to_another_bucket(self):
"""
Test object can be copied to another bucket.
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step('Buckets must be empty'):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
with allure.step('Put objects into one bucket'):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
with allure.step('Copy object from first bucket into second'):
copy_obj_path_b2 = s3_gate_object.copy_object_s3(self.s3_client, bucket_1, file_name_large,
bucket_dst=bucket_2)
self.check_objects_in_bucket(bucket_1, expected_objects=bucket_1_objects)
self.check_objects_in_bucket(bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step('Check copied object has the same content'):
got_copied_file_b2 = s3_gate_object.get_object_s3(self.s3_client, bucket_2, copy_obj_path_b2)
assert get_file_hash(file_path_large) == get_file_hash(got_copied_file_b2), 'Hashes must be the same'
with allure.step('Delete one object from first bucket'):
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, file_name_simple)
bucket_1_objects.remove(file_name_simple)
self.check_objects_in_bucket(bucket_1, expected_objects=bucket_1_objects)
self.check_objects_in_bucket(bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step('Delete one object from second bucket and check it is empty'):
s3_gate_object.delete_object_s3(self.s3_client, bucket_2, copy_obj_path_b2)
self.check_objects_in_bucket(bucket_2, expected_objects=[])
def check_object_attributes(self, bucket: str, object_key: str, parts_count: int):
if not isinstance(self.s3_client, AwsCliClient):
logger.warning('Attributes check is not supported for boto3 implementation')
return
with allure.step("Check object's attributes"):
obj_parts = s3_gate_object.get_object_attributes(self.s3_client, bucket, object_key, 'ObjectParts',
get_full_resp=False)
assert obj_parts.get('TotalPartsCount') == parts_count, f'Expected TotalPartsCount is {parts_count}'
assert len(obj_parts.get('Parts')) == parts_count, f'Expected Parts cunt is {parts_count}'
with allure.step("Check object's attribute max-parts"):
max_parts = 2
obj_parts = s3_gate_object.get_object_attributes(self.s3_client, bucket, object_key, 'ObjectParts',
max_parts=max_parts, get_full_resp=False)
assert obj_parts.get('TotalPartsCount') == parts_count, f'Expected TotalPartsCount is {parts_count}'
assert obj_parts.get('MaxParts') == max_parts, f'Expected MaxParts is {parts_count}'
assert len(obj_parts.get('Parts')) == max_parts, f'Expected Parts count is {parts_count}'
with allure.step("Check object's attribute part-number-marker"):
part_number_marker = 3
obj_parts = s3_gate_object.get_object_attributes(self.s3_client, bucket, object_key, 'ObjectParts',
part_number=part_number_marker, get_full_resp=False)
assert obj_parts.get('TotalPartsCount') == parts_count, f'Expected TotalPartsCount is {parts_count}'
assert obj_parts.get(
'PartNumberMarker') == part_number_marker, f'Expected PartNumberMarker is {part_number_marker}'
assert len(obj_parts.get('Parts')) == 1, f'Expected Parts count is {parts_count}'
@allure.step('Expected all objects are presented in the bucket')
def check_objects_in_bucket(self, bucket, expected_objects: list, unexpected_objects: list = None):
unexpected_objects = unexpected_objects or []
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert len(bucket_objects) == len(expected_objects), f'Expected {len(expected_objects)} objects in the bucket'
for bucket_object in expected_objects:
assert bucket_object in bucket_objects, \
f'Expected object {bucket_object} in objects list {bucket_objects}'
for bucket_object in unexpected_objects:
assert bucket_object not in bucket_objects, \
f'Expected object {bucket_object} not in objects list {bucket_objects}'
@allure.step('Try to get object and got error')
def try_to_get_object_and_got_error(self, bucket: str, unexpected_objects: list):
for obj in unexpected_objects:
try:
s3_gate_object.get_object_s3(self.s3_client, bucket, obj)
raise AssertionError(f'Object {obj} found in bucket {bucket}')
except Exception as err:
assert 'The specified key does not exist' in str(err), f'Expected error in exception {err}'
@staticmethod
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)

View file

@ -13,6 +13,30 @@
} }
] ]
}, },
{
"operation":"HEAD",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"DELETE",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{ {
"operation":"SEARCH", "operation":"SEARCH",
"action":"ALLOW", "action":"ALLOW",
@ -36,6 +60,30 @@
"keys":[] "keys":[]
} }
] ]
},
{
"operation":"GETRANGE",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"GETRANGEHASH",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
} }
] ]
} }

View file

@ -1,232 +0,0 @@
import json
import logging
import os
import allure
from cli_helpers import _cmd_run, _configure_aws_cli
from common import ASSETS_DIR, S3_GATE
logger = logging.getLogger('NeoLogger')
class AwsCliClient:
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
# certificate in devenv) and disable automatic pagination in CLI output
common_flags = "--no-verify-ssl --no-paginate"
def __init__(self, access_key_id: str, secret_access_key: str):
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.config_aws_client()
def config_aws_client(self):
cmd = 'aws configure'
logger.info(f'Executing command: {cmd}')
_configure_aws_cli(cmd, self.access_key_id, self.secret_access_key)
def create_bucket(self, Bucket: str):
cmd = f'aws {self.common_flags} s3api create-bucket --bucket {Bucket} ' \
f'--endpoint {S3_GATE}'
_cmd_run(cmd, timeout=90)
def list_buckets(self) -> dict:
cmd = f'aws {self.common_flags} s3api list-buckets --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_versioning(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_versioning(self, Bucket: str, VersioningConfiguration: dict) -> dict:
cmd = f'aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} ' \
f'--versioning-configuration Status={VersioningConfiguration.get("Status")} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api list-objects --bucket {Bucket} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects_v2(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_object_versions(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api list-object-versions --bucket {Bucket} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def copy_object(self, Bucket: str, CopySource: str, Key: str) -> dict:
cmd = f'aws {self.common_flags} s3api copy-object --copy-source {CopySource} ' \
f'--bucket {Bucket} --key {Key} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def head_bucket(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_object(self, Body: str, Bucket: str, Key: str) -> dict:
cmd = f'aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} ' \
f'--body {Body} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def head_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f' --version-id {VersionId}' if VersionId else ''
cmd = f'aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} ' \
f'{version} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def get_object(self, Bucket: str, Key: str, file_path: str, VersionId: str = None) -> dict:
version = f' --version-id {VersionId}' if VersionId else ''
cmd = f'aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} ' \
f'{version} {file_path} --endpoint {S3_GATE}'
output = _cmd_run(cmd, timeout=90)
return self._to_json(output)
def delete_objects(self, Bucket: str, Delete: dict) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/delete.json"
with open(file_path, 'w') as out_file:
out_file.write(json.dumps(Delete))
logger.info(f"Input file for delete-objects: {json.dumps(Delete)}")
cmd = f'aws {self.common_flags} s3api delete-objects --bucket {Bucket} ' \
f'--delete file://{file_path} --endpoint {S3_GATE}'
output = _cmd_run(cmd, timeout=90)
return self._to_json(output)
def delete_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f' --version-id {VersionId}' if VersionId else ''
cmd = f'aws {self.common_flags} s3api delete-object --bucket {Bucket} ' \
f'--key {Key} {version} --endpoint {S3_GATE}'
output = _cmd_run(cmd, timeout=90)
return self._to_json(output)
def get_object_attributes(self, bucket: str, key: str, *attributes: str, version_id: str = None,
max_parts: int = None, part_number: int = None) -> dict:
attrs = ','.join(attributes)
version = f' --version-id {version_id}' if version_id else ''
parts = f'--max-parts {max_parts}' if max_parts else ''
part_number = f'--part-number-marker {part_number}' if part_number else ''
cmd = f'aws {self.common_flags} s3api get-object-attributes --bucket {bucket} ' \
f'--key {key} {version} {parts} {part_number} --object-attributes {attrs} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}'
output = _cmd_run(cmd, timeout=90)
return self._to_json(output)
def get_bucket_tagging(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict:
cmd = f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} " \
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket_tagging(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict:
cmd = f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} " \
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
output = _cmd_run(cmd)
return self._to_json(output)
def get_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = f'aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def delete_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = f'aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} ' \
f'--key {Key} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
@allure.step('Sync directory S3')
def sync(self, bucket_name: str, dir_path: str) -> dict:
cmd = f'aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} ' \
f'--endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
cmd = f'aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} ' \
f'--key {Key} --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_multipart_uploads(self, Bucket: str) -> dict:
cmd = f'aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} ' \
f'--endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
cmd = f'aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} ' \
f'--key {Key} --upload-id {UploadId} --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def upload_part(self, UploadId: str, Bucket: str, Key: str, PartNumber: int, Body: str) -> dict:
cmd = f'aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} ' \
f'--upload-id {UploadId} --part-number {PartNumber} --body {Body} ' \
f'--endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict:
cmd = f'aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} ' \
f'--upload-id {UploadId} --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def complete_multipart_upload(self, Bucket: str, Key: str, UploadId: str,
MultipartUpload: dict) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/parts.json"
with open(file_path, 'w') as out_file:
out_file.write(json.dumps(MultipartUpload))
logger.info(f"Input file for complete-multipart-upload: {json.dumps(MultipartUpload)}")
cmd = f'aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} ' \
f'--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} ' \
f'--endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
@staticmethod
def _to_json(output: str) -> dict:
json_output = {}
try:
json_output = json.loads(output)
except Exception:
if '{' not in output and '}' not in output:
logger.warning(f'Could not parse json from output {output}')
return json_output
json_output = json.loads(output[output.index('{'):])
return json_output

View file

@ -1,12 +1,11 @@
#!/usr/bin/python3.8 #!/usr/bin/python3.8
import allure
import hashlib import hashlib
import logging import logging
import os import os
import tarfile import tarfile
import uuid import uuid
from typing import Tuple from typing import Optional, Tuple
import allure import allure
import docker import docker
@ -15,7 +14,6 @@ from cli_helpers import _cmd_run
from common import ASSETS_DIR, SIMPLE_OBJ_SIZE from common import ASSETS_DIR, SIMPLE_OBJ_SIZE
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
ROBOT_AUTO_KEYWORDS = False
def generate_file(size: int = SIMPLE_OBJ_SIZE) -> str: def generate_file(size: int = SIMPLE_OBJ_SIZE) -> str:
@ -52,7 +50,7 @@ def generate_file_and_file_hash(size: int) -> Tuple[str, str]:
@allure.step("Get File Hash") @allure.step("Get File Hash")
def get_file_hash(filename: str, len: int = None): def get_file_hash(filename: str, len: Optional[int] = None):
""" """
This function generates hash for the specified file. This function generates hash for the specified file.
Args: Args:
@ -108,7 +106,7 @@ def make_up(services: list = [], config_dict: dict = {}):
cmd = f"make up/{service}" cmd = f"make up/{service}"
_cmd_run(cmd) _cmd_run(cmd)
else: else:
cmd = f"make up/basic;" f"make update.max_object_size val={SIMPLE_OBJ_SIZE}" cmd = f"make up/basic; make update.max_object_size val={SIMPLE_OBJ_SIZE}"
_cmd_run(cmd, timeout=120) _cmd_run(cmd, timeout=120)
os.chdir(test_path) os.chdir(test_path)
@ -131,3 +129,22 @@ def make_down(services: list = []):
_cmd_run(cmd, timeout=60) _cmd_run(cmd, timeout=60)
os.chdir(test_path) os.chdir(test_path)
@allure.step("Concatenation set of files to one file")
def concat_files(list_of_parts: list, new_file_name: Optional[str] = None) -> str:
"""
Concatenates a set of files into a single file.
Args:
list_of_parts (list): list with files to concratination
new_file_name (str): file name to the generated file
Returns:
(str): the path to the generated file
"""
if not new_file_name:
new_file_name = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
with open(new_file_name, "wb") as f:
for file in list_of_parts:
with open(file, "rb") as part_file:
f.write(part_file.read())
return new_file_name