diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2e61679..dbece66 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -14,13 +14,15 @@ from frostfs_testlib.resources.common import ( S3_SYNC_WAIT_TIME, ) from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _cmd_run, _configure_aws_cli +from frostfs_testlib.utils.cli_utils import _configure_aws_cli reporter = get_reporter() logger = logging.getLogger("NeoLogger") -LONG_TIMEOUT = 240 +command_options = CommandOptions(timeout=240) class AwsCliClient(S3ClientWrapper): @@ -34,10 +36,13 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Configure S3 client (aws cli)") def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: self.s3gate_endpoint = s3gate_endpoint + self.local_shell = LocalShell() try: _configure_aws_cli("aws configure", access_key_id, secret_access_key) - _cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") - _cmd_run(f"aws configure set retry_mode {RETRY_MODE}") + self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") + self.local_shell.exec( + f"aws configure set retry_mode {RETRY_MODE}", + ) except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err @@ -79,7 +84,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-read {grant_read}" if location_constraint: cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" - _cmd_run(cmd) + self.local_shell.exec(cmd) sleep(S3_SYNC_WAIT_TIME) return bucket @@ -87,20 +92,20 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List buckets S3") def list_buckets(self) -> list[str]: cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] @reporter.step_deco("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" - _cmd_run(cmd, LONG_TIMEOUT) + self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) @reporter.step_deco("Head bucket S3") def head_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: @@ -109,7 +114,7 @@ class AwsCliClient(S3ClientWrapper): f"--versioning-configuration Status={status.value} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: @@ -117,7 +122,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Status") @@ -130,7 +135,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: @@ -138,7 +143,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") @@ -148,7 +153,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -158,7 +163,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("LocationConstraint") @@ -168,7 +173,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) obj_list = [obj["Key"] for obj in response.get("Contents", [])] @@ -182,7 +187,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) obj_list = [obj["Key"] for obj in response.get("Contents", [])] @@ -196,7 +201,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else response.get("Versions", []) @@ -206,7 +211,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else response.get("DeleteMarkers", []) @@ -245,7 +250,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --tagging-directive {tagging_directive}" if tagging: cmd += f" --tagging {tagging}" - _cmd_run(cmd, LONG_TIMEOUT) + self.local_shell.exec(cmd, command_options) return key @reporter.step_deco("Put object S3") @@ -288,7 +293,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-full-control '{grant_full_control}'" if grant_read: cmd += f" --grant-read {grant_read}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) return response.get("VersionId") @@ -299,7 +304,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response @@ -320,7 +325,7 @@ class AwsCliClient(S3ClientWrapper): ) if object_range: cmd += f" --range bytes={object_range[0]}-{object_range[1]}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else file_path @@ -331,7 +336,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -354,7 +359,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-write {grant_write}" if grant_read: cmd += f" --grant-read {grant_read}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -376,7 +381,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-write {grant_write}" if grant_read: cmd += f" --grant-read {grant_read}" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: @@ -390,7 +395,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) sleep(S3_SYNC_WAIT_TIME) return response @@ -402,7 +407,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-object --bucket {bucket} " f"--key {key} {version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @@ -429,7 +434,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @@ -462,7 +467,7 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) for attr in attributes: @@ -479,7 +484,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") @@ -496,7 +501,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: @@ -504,7 +509,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") @@ -514,7 +519,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: @@ -522,7 +527,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: @@ -530,7 +535,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object retention") def put_object_retention( @@ -548,7 +553,7 @@ class AwsCliClient(S3ClientWrapper): ) if bypass_governance_retention is not None: cmd += " --bypass-governance-retention" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object legal hold") def put_object_legal_hold( @@ -564,7 +569,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: @@ -574,7 +579,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: @@ -583,7 +588,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") @@ -593,7 +598,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " f"--key {key} --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Sync directory S3") def sync( @@ -613,7 +618,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" {key}={value}" if acl: cmd += f" --acl {acl}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) @reporter.step_deco("CP directory S3") @@ -634,7 +639,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" {key}={value}" if acl: cmd += f" --acl {acl}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) @reporter.step_deco("Create multipart upload S3") @@ -643,7 +648,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " f"--key {key} --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" @@ -656,7 +661,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Uploads") @@ -666,7 +671,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Upload part S3") def upload_part( @@ -677,7 +682,7 @@ class AwsCliClient(S3ClientWrapper): f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @@ -691,7 +696,7 @@ class AwsCliClient(S3ClientWrapper): f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) assert response.get("CopyPartResult", []).get( "ETag" @@ -705,7 +710,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) assert response.get("Parts"), f"Expected Parts in response:\n{response}" @@ -727,7 +732,7 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " f"--endpoint-url {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: @@ -735,7 +740,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout return self._to_json(output) @reporter.step_deco("Get object lock configuration") @@ -744,7 +749,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("ObjectLockConfiguration") diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 8080689..2b70d6c 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -12,12 +12,13 @@ import requests from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE -from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT +from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell +from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.storage.cluster import StorageNode -from frostfs_testlib.utils.cli_utils import _cmd_run +from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash reporter = get_reporter() @@ -25,6 +26,7 @@ reporter = get_reporter() logger = logging.getLogger("NeoLogger") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") +local_shell = LocalShell() @reporter.step_deco("Get via HTTP Gate") @@ -51,7 +53,9 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) + resp = requests.get( + request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -72,7 +76,9 @@ def get_via_http_gate( @reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): +def get_via_zip_http_gate( + cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300 +): """ This function gets given object from HTTP gate cid: container id to get object from @@ -130,7 +136,9 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) + resp = requests.get( + request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname} + ) if not resp.ok: raise Exception( @@ -165,7 +173,9 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) + resp = requests.post( + request, files=files, data=body, headers=headers, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -223,16 +233,16 @@ def upload_via_http_gate_curl( large_object = is_object_large(filepath) if large_object: # pre-clean - _cmd_run("rm pipe -f") + local_shell.exec("rm pipe -f") files = f"file=@pipe;filename={os.path.basename(filepath)}" cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = local_shell.exec(cmd, command_options) # clean up pipe - _cmd_run("rm pipe") + local_shell.exec("rm pipe") else: files = f"file=@{filepath};filename={os.path.basename(filepath)}" cmd = f"curl -k -F '{files}' {attributes} {request}" - output = _cmd_run(cmd) + output = local_shell.exec(cmd) if error_pattern: match = error_pattern.casefold() in str(output).casefold() @@ -245,6 +255,7 @@ def upload_via_http_gate_curl( return oid_re.group(1) +@retry(max_attempts=3, sleep_interval=1) @reporter.step_deco("Get via HTTP Gate using Curl") def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ @@ -257,8 +268,8 @@ def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f"curl -k -H \"Host: {http_hostname}\" {request} > {file_path}" - _cmd_run(cmd) + cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}' + local_shell.exec(cmd) return file_path @@ -271,7 +282,11 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): @reporter.step_deco("Try to get object and expect error") def try_to_get_object_and_expect_error( - cid: str, oid: str, error_pattern: str, endpoint: str, http_hostname: str, + cid: str, + oid: str, + error_pattern: str, + endpoint: str, + http_hostname: str, ) -> None: try: get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) @@ -283,9 +298,16 @@ def try_to_get_object_and_expect_error( @reporter.step_deco("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( - oid: str, file_name: str, cid: str, attrs: dict, endpoint: str, http_hostname: str, + oid: str, + file_name: str, + cid: str, + attrs: dict, + endpoint: str, + http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = get_via_http_gate( + cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname + ) got_file_path_http_attr = get_via_http_gate_by_attribute( cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) @@ -326,7 +348,9 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = object_getter( + cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname + ) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -369,10 +393,20 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname) + get_via_http_gate( + cid=cid, + oid=oid, + endpoint=endpoint, + request_path=http_request_path, + http_hostname=http_hostname, + ) else: get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname + cid=cid, + attribute=attrs, + endpoint=endpoint, + request_path=http_request_path, + http_hostname=http_hostname, ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index c18b8d8..deb8c7f 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -241,10 +241,11 @@ class ClusterStateController: @reporter.step_deco("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) - if self.suspended_services.get(process_name): - self.suspended_services[process_name].append(node) - else: - self.suspended_services[process_name] = [node] + if ( + self.suspended_services.get(process_name) + and node in self.suspended_services[process_name] + ): + self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start suspend processes services") diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0fa6cde..e1dfcd1 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -28,55 +28,6 @@ COLOR_GREEN = "\033[92m" COLOR_OFF = "\033[0m" -def _cmd_run(cmd: str, timeout: int = 90) -> str: - """ - Runs given shell command , in case of success returns its stdout, - in case of failure returns error message. - """ - compl_proc = None - start_time = datetime.now() - try: - logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}") - start_time = datetime.utcnow() - compl_proc = subprocess.run( - cmd, - check=True, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - timeout=timeout, - shell=True, - ) - output = compl_proc.stdout - return_code = compl_proc.returncode - end_time = datetime.utcnow() - logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}") - _attach_allure_log(cmd, output, return_code, start_time, end_time) - - return output - except subprocess.CalledProcessError as exc: - logger.info( - f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}" - ) - end_time = datetime.now() - return_code, cmd_output = subprocess.getstatusoutput(cmd) - _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) - - raise RuntimeError( - f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}" - ) from exc - except OSError as exc: - raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc - except Exception as exc: - return_code, cmd_output = subprocess.getstatusoutput(cmd) - end_time = datetime.now() - _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) - logger.info( - f"Command: {cmd}\n" f"Error:\nreturn code: {return_code}\n" f"Output: {cmd_output}" - ) - raise - - def _run_with_passwd(cmd: str) -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1