forked from TrueCloudLab/frostfs-testcases
[#312] add new test for s3
Signed-off-by: Yulia Kovshova <y.kovshova@yadro.com>
This commit is contained in:
parent
b385c2466c
commit
ee2ed667c6
12 changed files with 2754 additions and 784 deletions
|
@ -1,232 +0,0 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
import allure
|
||||
from cli_helpers import _cmd_run, _configure_aws_cli
|
||||
from common import ASSETS_DIR, S3_GATE
|
||||
|
||||
logger = logging.getLogger('NeoLogger')
|
||||
|
||||
|
||||
class AwsCliClient:
|
||||
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
|
||||
# certificate in devenv) and disable automatic pagination in CLI output
|
||||
common_flags = "--no-verify-ssl --no-paginate"
|
||||
|
||||
def __init__(self, access_key_id: str, secret_access_key: str):
|
||||
self.access_key_id = access_key_id
|
||||
self.secret_access_key = secret_access_key
|
||||
self.config_aws_client()
|
||||
|
||||
def config_aws_client(self):
|
||||
cmd = 'aws configure'
|
||||
logger.info(f'Executing command: {cmd}')
|
||||
_configure_aws_cli(cmd, self.access_key_id, self.secret_access_key)
|
||||
|
||||
def create_bucket(self, Bucket: str):
|
||||
cmd = f'aws {self.common_flags} s3api create-bucket --bucket {Bucket} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
_cmd_run(cmd, timeout=90)
|
||||
|
||||
def list_buckets(self) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api list-buckets --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def get_bucket_versioning(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def put_bucket_versioning(self, Bucket: str, VersioningConfiguration: dict) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} ' \
|
||||
f'--versioning-configuration Status={VersioningConfiguration.get("Status")} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def list_objects(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api list-objects --bucket {Bucket} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def list_objects_v2(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def list_object_versions(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api list-object-versions --bucket {Bucket} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def copy_object(self, Bucket: str, CopySource: str, Key: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api copy-object --copy-source {CopySource} ' \
|
||||
f'--bucket {Bucket} --key {Key} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def head_bucket(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def put_object(self, Body: str, Bucket: str, Key: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} ' \
|
||||
f'--body {Body} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def head_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
|
||||
version = f' --version-id {VersionId}' if VersionId else ''
|
||||
cmd = f'aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} ' \
|
||||
f'{version} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def get_object(self, Bucket: str, Key: str, file_path: str, VersionId: str = None) -> dict:
|
||||
version = f' --version-id {VersionId}' if VersionId else ''
|
||||
cmd = f'aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} ' \
|
||||
f'{version} {file_path} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd, timeout=90)
|
||||
return self._to_json(output)
|
||||
|
||||
def delete_objects(self, Bucket: str, Delete: dict) -> dict:
|
||||
file_path = f"{os.getcwd()}/{ASSETS_DIR}/delete.json"
|
||||
with open(file_path, 'w') as out_file:
|
||||
out_file.write(json.dumps(Delete))
|
||||
logger.info(f"Input file for delete-objects: {json.dumps(Delete)}")
|
||||
|
||||
cmd = f'aws {self.common_flags} s3api delete-objects --bucket {Bucket} ' \
|
||||
f'--delete file://{file_path} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd, timeout=90)
|
||||
return self._to_json(output)
|
||||
|
||||
def delete_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
|
||||
version = f' --version-id {VersionId}' if VersionId else ''
|
||||
cmd = f'aws {self.common_flags} s3api delete-object --bucket {Bucket} ' \
|
||||
f'--key {Key} {version} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd, timeout=90)
|
||||
return self._to_json(output)
|
||||
|
||||
def get_object_attributes(self, bucket: str, key: str, *attributes: str, version_id: str = None,
|
||||
max_parts: int = None, part_number: int = None) -> dict:
|
||||
attrs = ','.join(attributes)
|
||||
version = f' --version-id {version_id}' if version_id else ''
|
||||
parts = f'--max-parts {max_parts}' if max_parts else ''
|
||||
part_number = f'--part-number-marker {part_number}' if part_number else ''
|
||||
cmd = f'aws {self.common_flags} s3api get-object-attributes --bucket {bucket} ' \
|
||||
f'--key {key} {version} {parts} {part_number} --object-attributes {attrs} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def delete_bucket(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd, timeout=90)
|
||||
return self._to_json(output)
|
||||
|
||||
def get_bucket_tagging(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict:
|
||||
cmd = f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} " \
|
||||
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def delete_bucket_tagging(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict:
|
||||
cmd = f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} " \
|
||||
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def get_object_tagging(self, Bucket: str, Key: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def delete_object_tagging(self, Bucket: str, Key: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} ' \
|
||||
f'--key {Key} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
@allure.step('Sync directory S3')
|
||||
def sync(self, bucket_name: str, dir_path: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} ' \
|
||||
f'--endpoint-url {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} ' \
|
||||
f'--key {Key} --endpoint-url {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def list_multipart_uploads(self, Bucket: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} ' \
|
||||
f'--endpoint-url {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} ' \
|
||||
f'--key {Key} --upload-id {UploadId} --endpoint-url {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def upload_part(self, UploadId: str, Bucket: str, Key: str, PartNumber: int, Body: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} ' \
|
||||
f'--upload-id {UploadId} --part-number {PartNumber} --body {Body} ' \
|
||||
f'--endpoint-url {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict:
|
||||
cmd = f'aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} ' \
|
||||
f'--upload-id {UploadId} --endpoint-url {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
def complete_multipart_upload(self, Bucket: str, Key: str, UploadId: str,
|
||||
MultipartUpload: dict) -> dict:
|
||||
file_path = f"{os.getcwd()}/{ASSETS_DIR}/parts.json"
|
||||
with open(file_path, 'w') as out_file:
|
||||
out_file.write(json.dumps(MultipartUpload))
|
||||
logger.info(f"Input file for complete-multipart-upload: {json.dumps(MultipartUpload)}")
|
||||
|
||||
cmd = f'aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} ' \
|
||||
f'--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} ' \
|
||||
f'--endpoint-url {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
return self._to_json(output)
|
||||
|
||||
@staticmethod
|
||||
def _to_json(output: str) -> dict:
|
||||
json_output = {}
|
||||
try:
|
||||
json_output = json.loads(output)
|
||||
except Exception:
|
||||
if '{' not in output and '}' not in output:
|
||||
logger.warning(f'Could not parse json from output {output}')
|
||||
return json_output
|
||||
json_output = json.loads(output[output.index('{'):])
|
||||
|
||||
return json_output
|
|
@ -1,12 +1,11 @@
|
|||
#!/usr/bin/python3.8
|
||||
|
||||
import allure
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import tarfile
|
||||
import uuid
|
||||
from typing import Tuple
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import allure
|
||||
import docker
|
||||
|
@ -15,7 +14,6 @@ from cli_helpers import _cmd_run
|
|||
from common import ASSETS_DIR, SIMPLE_OBJ_SIZE
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
ROBOT_AUTO_KEYWORDS = False
|
||||
|
||||
|
||||
def generate_file(size: int = SIMPLE_OBJ_SIZE) -> str:
|
||||
|
@ -52,7 +50,7 @@ def generate_file_and_file_hash(size: int) -> Tuple[str, str]:
|
|||
|
||||
|
||||
@allure.step("Get File Hash")
|
||||
def get_file_hash(filename: str, len: int = None):
|
||||
def get_file_hash(filename: str, len: Optional[int] = None):
|
||||
"""
|
||||
This function generates hash for the specified file.
|
||||
Args:
|
||||
|
@ -108,7 +106,7 @@ def make_up(services: list = [], config_dict: dict = {}):
|
|||
cmd = f"make up/{service}"
|
||||
_cmd_run(cmd)
|
||||
else:
|
||||
cmd = f"make up/basic;" f"make update.max_object_size val={SIMPLE_OBJ_SIZE}"
|
||||
cmd = f"make up/basic; make update.max_object_size val={SIMPLE_OBJ_SIZE}"
|
||||
_cmd_run(cmd, timeout=120)
|
||||
|
||||
os.chdir(test_path)
|
||||
|
@ -131,3 +129,22 @@ def make_down(services: list = []):
|
|||
_cmd_run(cmd, timeout=60)
|
||||
|
||||
os.chdir(test_path)
|
||||
|
||||
|
||||
@allure.step("Concatenation set of files to one file")
|
||||
def concat_files(list_of_parts: list, new_file_name: Optional[str] = None) -> str:
|
||||
"""
|
||||
Concatenates a set of files into a single file.
|
||||
Args:
|
||||
list_of_parts (list): list with files to concratination
|
||||
new_file_name (str): file name to the generated file
|
||||
Returns:
|
||||
(str): the path to the generated file
|
||||
"""
|
||||
if not new_file_name:
|
||||
new_file_name = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
|
||||
with open(new_file_name, "wb") as f:
|
||||
for file in list_of_parts:
|
||||
with open(file, "rb") as part_file:
|
||||
f.write(part_file.read())
|
||||
return new_file_name
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue