From bd05aae585f9609c8c863816b51067c81a1599ce Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 6 Dec 2022 01:31:45 +0300 Subject: [PATCH] Refactor for cluster usage Signed-off-by: Andrey Berezin --- .../{steps => helpers}/aws_cli_client.py | 101 +- pytest_tests/helpers/cluster.py | 324 +++++ pytest_tests/helpers/container.py | 14 +- pytest_tests/helpers/wallet.py | 8 +- pytest_tests/steps/cluster_test_base.py | 25 + pytest_tests/steps/s3_gate_base.py | 64 +- pytest_tests/steps/s3_gate_object.py | 6 +- pytest_tests/steps/session_token.py | 8 +- pytest_tests/steps/storage_object.py | 15 +- pytest_tests/testsuites/acl/conftest.py | 55 +- .../acl/storage_group/test_storagegroup.py | 280 +++-- pytest_tests/testsuites/acl/test_acl.py | 90 +- pytest_tests/testsuites/acl/test_bearer.py | 59 +- pytest_tests/testsuites/acl/test_eacl.py | 239 ++-- .../testsuites/acl/test_eacl_filters.py | 197 ++- pytest_tests/testsuites/conftest.py | 42 +- .../testsuites/container/test_container.py | 160 ++- .../failovers/test_failover_network.py | 155 +-- .../failovers/test_failover_storage.py | 236 ++-- .../network/test_node_management.py | 926 +++++++------- .../testsuites/object/test_object_api.py | 666 +++++----- .../testsuites/object/test_object_lifetime.py | 64 +- .../testsuites/object/test_object_lock.py | 190 +-- .../testsuites/payment/test_balance.py | 19 +- .../services/s3_gate/test_s3_gate.py | 8 +- .../services/s3_gate/test_s3_object.py | 13 +- .../services/s3_gate/test_s3_policy.py | 22 +- .../testsuites/services/test_http_gate.py | 201 ++- .../test_object_session_token.py | 213 ++-- .../test_static_object_session_token.py | 1078 +++++++++-------- .../test_static_session_token_container.py | 65 +- robot/resources/lib/python_keywords/acl.py | 17 +- .../python_keywords/complex_object_actions.py | 24 +- .../lib/python_keywords/container.py | 44 +- .../lib/python_keywords/container_access.py | 59 +- robot/resources/lib/python_keywords/epoch.py | 54 +- .../lib/python_keywords/failover_utils.py | 38 +- .../lib/python_keywords/http_gate.py | 59 +- .../lib/python_keywords/neofs_verbs.py | 245 ++-- .../lib/python_keywords/node_management.py | 171 ++- .../lib/python_keywords/object_access.py | 38 +- .../lib/python_keywords/payment_neogo.py | 81 +- .../lib/python_keywords/storage_group.py | 35 +- .../lib/python_keywords/storage_policy.py | 60 +- .../lib/python_keywords/tombstone.py | 6 +- robot/variables/common.py | 88 -- 46 files changed, 3859 insertions(+), 2703 deletions(-) rename pytest_tests/{steps => helpers}/aws_cli_client.py (87%) create mode 100644 pytest_tests/helpers/cluster.py create mode 100644 pytest_tests/steps/cluster_test_base.py diff --git a/pytest_tests/steps/aws_cli_client.py b/pytest_tests/helpers/aws_cli_client.py similarity index 87% rename from pytest_tests/steps/aws_cli_client.py rename to pytest_tests/helpers/aws_cli_client.py index 5b256b9..213a383 100644 --- a/pytest_tests/steps/aws_cli_client.py +++ b/pytest_tests/helpers/aws_cli_client.py @@ -6,7 +6,7 @@ from typing import Optional import allure from cli_helpers import _cmd_run -from common import ASSETS_DIR, S3_GATE +from common import ASSETS_DIR logger = logging.getLogger("NeoLogger") REGULAR_TIMEOUT = 90 @@ -17,6 +17,10 @@ class AwsCliClient: # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed # certificate in devenv) and disable automatic pagination in CLI output common_flags = "--no-verify-ssl --no-paginate" + s3gate_endpoint: str + + def __init__(self, s3gate_endpoint) -> None: + self.s3gate_endpoint = s3gate_endpoint def create_bucket( self, @@ -36,7 +40,7 @@ class AwsCliClient: object_lock = " --no-object-lock-enabled-for-bucket" cmd = ( f"aws {self.common_flags} s3api create-bucket --bucket {Bucket} " - f"{object_lock} --endpoint {S3_GATE}" + f"{object_lock} --endpoint {self.s3gate_endpoint}" ) if ACL: cmd += f" --acl {ACL}" @@ -51,14 +55,14 @@ class AwsCliClient: _cmd_run(cmd, REGULAR_TIMEOUT) def list_buckets(self) -> dict: - cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {S3_GATE}" + cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" output = _cmd_run(cmd) return self._to_json(output) def get_bucket_acl(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api get-bucket-acl --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd, REGULAR_TIMEOUT) return self._to_json(output) @@ -66,7 +70,7 @@ class AwsCliClient: def get_bucket_versioning(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd, REGULAR_TIMEOUT) return self._to_json(output) @@ -74,7 +78,7 @@ class AwsCliClient: def get_bucket_location(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api get-bucket-location --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd, REGULAR_TIMEOUT) return self._to_json(output) @@ -83,14 +87,15 @@ class AwsCliClient: cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} " f'--versioning-configuration Status={VersioningConfiguration.get("Status")} ' - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) def list_objects(self, Bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api list-objects --bucket {Bucket} " f"--endpoint {S3_GATE}" + f"aws {self.common_flags} s3api list-objects --bucket {Bucket} " + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -98,7 +103,7 @@ class AwsCliClient: def list_objects_v2(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -106,7 +111,7 @@ class AwsCliClient: def list_object_versions(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -124,7 +129,7 @@ class AwsCliClient: ) -> dict: cmd = ( f"aws {self.common_flags} s3api copy-object --copy-source {CopySource} " - f"--bucket {Bucket} --key {Key} --endpoint {S3_GATE}" + f"--bucket {Bucket} --key {Key} --endpoint {self.s3gate_endpoint}" ) if ACL: cmd += f" --acl {ACL}" @@ -142,7 +147,7 @@ class AwsCliClient: return self._to_json(output) def head_bucket(self, Bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {S3_GATE}" + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {self.s3gate_endpoint}" output = _cmd_run(cmd) return self._to_json(output) @@ -162,7 +167,7 @@ class AwsCliClient: ) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} " - f"--body {Body} --endpoint {S3_GATE}" + f"--body {Body} --endpoint {self.s3gate_endpoint}" ) if Metadata: cmd += f" --metadata" @@ -189,7 +194,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} " - f"{version} --endpoint {S3_GATE}" + f"{version} --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -205,7 +210,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} " - f"{version} {file_path} --endpoint {S3_GATE}" + f"{version} {file_path} --endpoint {self.s3gate_endpoint}" ) if Range: cmd += f" --range {Range}" @@ -216,7 +221,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api get-object-acl --bucket {Bucket} --key {Key} " - f"{version} --endpoint {S3_GATE}" + f"{version} --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd, REGULAR_TIMEOUT) return self._to_json(output) @@ -231,7 +236,7 @@ class AwsCliClient: ) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object-acl --bucket {Bucket} --key {Key} " - f" --endpoint {S3_GATE}" + f" --endpoint {self.s3gate_endpoint}" ) if ACL: cmd += f" --acl {ACL}" @@ -251,7 +256,7 @@ class AwsCliClient: ) -> dict: cmd = ( f"aws {self.common_flags} s3api put-bucket-acl --bucket {Bucket} " - f" --endpoint {S3_GATE}" + f" --endpoint {self.s3gate_endpoint}" ) if ACL: cmd += f" --acl {ACL}" @@ -270,7 +275,7 @@ class AwsCliClient: cmd = ( f"aws {self.common_flags} s3api delete-objects --bucket {Bucket} " - f"--delete file://{file_path} --endpoint {S3_GATE}" + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd, LONG_TIMEOUT) return self._to_json(output) @@ -279,7 +284,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api delete-object --bucket {Bucket} " - f"--key {Key} {version} --endpoint {S3_GATE}" + f"--key {Key} {version} --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd, LONG_TIMEOUT) return self._to_json(output) @@ -300,20 +305,20 @@ class AwsCliClient: cmd = ( f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " f"--key {key} {version} {parts} {part_number} --object-attributes {attrs} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) def delete_bucket(self, Bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}" + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {self.s3gate_endpoint}" output = _cmd_run(cmd, LONG_TIMEOUT) return self._to_json(output) def get_bucket_tagging(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -321,7 +326,7 @@ class AwsCliClient: def get_bucket_policy(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api get-bucket-policy --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -329,7 +334,7 @@ class AwsCliClient: def put_bucket_policy(self, Bucket: str, Policy: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-bucket-policy --bucket {Bucket} " - f"--policy {json.dumps(Policy)} --endpoint {S3_GATE}" + f"--policy {json.dumps(Policy)} --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -337,7 +342,7 @@ class AwsCliClient: def get_bucket_cors(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api get-bucket-cors --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -345,7 +350,7 @@ class AwsCliClient: def put_bucket_cors(self, Bucket: str, CORSConfiguration: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {Bucket} " - f"--cors-configuration '{json.dumps(CORSConfiguration)}' --endpoint {S3_GATE}" + f"--cors-configuration '{json.dumps(CORSConfiguration)}' --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -353,7 +358,7 @@ class AwsCliClient: def delete_bucket_cors(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api delete-bucket-cors --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -361,7 +366,7 @@ class AwsCliClient: def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} " - f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}" + f"--tagging '{json.dumps(Tagging)}' --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -369,7 +374,7 @@ class AwsCliClient: def delete_bucket_tagging(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} " - f"--endpoint {S3_GATE}" + f"--endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -380,7 +385,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} " - f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {S3_GATE}" + f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -391,7 +396,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} " - f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {S3_GATE}" + f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -407,7 +412,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} " - f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {S3_GATE}" + f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}" ) if not BypassGovernanceRetention is None: cmd += " --bypass-governance-retention" @@ -420,7 +425,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} " - f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {S3_GATE}" + f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -428,7 +433,7 @@ class AwsCliClient: def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} " - f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}" + f"--tagging '{json.dumps(Tagging)}' --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -437,7 +442,7 @@ class AwsCliClient: version = f" --version-id {VersionId}" if VersionId else "" cmd = ( f"aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} " - f"{version} --endpoint {S3_GATE}" + f"{version} --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd, REGULAR_TIMEOUT) return self._to_json(output) @@ -445,7 +450,7 @@ class AwsCliClient: def delete_object_tagging(self, Bucket: str, Key: str) -> dict: cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} " - f"--key {Key} --endpoint {S3_GATE}" + f"--key {Key} --endpoint {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -460,7 +465,7 @@ class AwsCliClient: ) -> dict: cmd = ( f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} " - f"--endpoint-url {S3_GATE}" + f"--endpoint-url {self.s3gate_endpoint}" ) if Metadata: cmd += f" --metadata" @@ -481,7 +486,7 @@ class AwsCliClient: ) -> dict: cmd = ( f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket_name} " - f"--endpoint-url {S3_GATE} --recursive" + f"--endpoint-url {self.s3gate_endpoint} --recursive" ) if Metadata: cmd += f" --metadata" @@ -495,7 +500,7 @@ class AwsCliClient: def create_multipart_upload(self, Bucket: str, Key: str) -> dict: cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} " - f"--key {Key} --endpoint-url {S3_GATE}" + f"--key {Key} --endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -503,7 +508,7 @@ class AwsCliClient: def list_multipart_uploads(self, Bucket: str) -> dict: cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} " - f"--endpoint-url {S3_GATE}" + f"--endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -511,7 +516,7 @@ class AwsCliClient: def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict: cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} " - f"--key {Key} --upload-id {UploadId} --endpoint-url {S3_GATE}" + f"--key {Key} --upload-id {UploadId} --endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -520,7 +525,7 @@ class AwsCliClient: cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} " f"--upload-id {UploadId} --part-number {PartNumber} --body {Body} " - f"--endpoint-url {S3_GATE}" + f"--endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd, LONG_TIMEOUT) return self._to_json(output) @@ -531,7 +536,7 @@ class AwsCliClient: cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {Bucket} --key {Key} " f"--upload-id {UploadId} --part-number {PartNumber} --copy-source {CopySource} " - f"--endpoint-url {S3_GATE}" + f"--endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd, LONG_TIMEOUT) return self._to_json(output) @@ -539,7 +544,7 @@ class AwsCliClient: def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict: cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} " - f"--upload-id {UploadId} --endpoint-url {S3_GATE}" + f"--upload-id {UploadId} --endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -555,7 +560,7 @@ class AwsCliClient: cmd = ( f"aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} " f"--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} " - f"--endpoint-url {S3_GATE}" + f"--endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -563,7 +568,7 @@ class AwsCliClient: def put_object_lock_configuration(self, Bucket, ObjectLockConfiguration): cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {Bucket} " - f"--object-lock-configuration '{json.dumps(ObjectLockConfiguration)}' --endpoint-url {S3_GATE}" + f"--object-lock-configuration '{json.dumps(ObjectLockConfiguration)}' --endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) @@ -571,7 +576,7 @@ class AwsCliClient: def get_object_lock_configuration(self, Bucket): cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {Bucket} " - f"--endpoint-url {S3_GATE}" + f"--endpoint-url {self.s3gate_endpoint}" ) output = _cmd_run(cmd) return self._to_json(output) diff --git a/pytest_tests/helpers/cluster.py b/pytest_tests/helpers/cluster.py new file mode 100644 index 0000000..f5f2555 --- /dev/null +++ b/pytest_tests/helpers/cluster.py @@ -0,0 +1,324 @@ +import random +import re +from dataclasses import dataclass +from typing import Any + +import data_formatters +from neofs_testlib.blockchain import RPCClient +from neofs_testlib.hosting import Host, Hosting +from neofs_testlib.hosting.config import ServiceConfig +from test_control import wait_for_success + + +@dataclass +class NodeBase: + """ + Represents a node of some underlying service + """ + + id: str + name: str + host: Host + + def __init__(self, id, name, host) -> None: + self.id = id + self.name = name + self.host = host + self.construct() + + def construct(self): + pass + + def __eq__(self, other): + return self.name == other.name + + def __hash__(self): + return id(self.name) + + def __str__(self): + return self.label + + def __repr__(self) -> str: + return self.label + + @property + def label(self) -> str: + return self.name + + @wait_for_success(60, 1) + def start_service(self): + self.host.start_service(self.name) + + @wait_for_success(60, 1) + def stop_service(self): + self.host.stop_service(self.name) + + def get_wallet_password(self) -> str: + return self._get_attribute(_ConfigAttributes.WALLET_PASSWORD) + + def get_wallet_path(self) -> str: + return self._get_attribute( + _ConfigAttributes.LOCAL_WALLET_PATH, + _ConfigAttributes.WALLET_PATH, + ) + + def get_wallet_config_path(self): + return self._get_attribute( + _ConfigAttributes.LOCAL_WALLET_CONFIG, + _ConfigAttributes.WALLET_CONFIG, + ) + + def get_wallet_public_key(self): + storage_wallet_path = self.get_wallet_path() + storage_wallet_pass = self.get_wallet_password() + return data_formatters.get_wallet_public_key(storage_wallet_path, storage_wallet_pass) + + def _get_attribute(self, attribute_name: str, default_attribute_name: str = None) -> list[str]: + config = self.host.get_service_config(self.name) + if default_attribute_name: + return config.attributes.get( + attribute_name, config.attributes.get(default_attribute_name) + ) + else: + return config.attributes.get(attribute_name) + + def _get_service_config(self) -> ServiceConfig: + return self.host.get_service_config(self.name) + + +class InnerRingNode(NodeBase): + """ + Class represents inner ring node in a cluster + + Inner ring node is not always the same as physical host (or physical node, if you will): + It can be service running in a container or on physical host + For testing perspective, it's not relevant how it is actually running, + since neofs network will still treat it as "node" + """ + + pass + + +class S3Gate(NodeBase): + """ + Class represents S3 gateway in a cluster + """ + + def get_endpoint(self) -> str: + return self._get_attribute(_ConfigAttributes.ENDPOINT) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class HTTPGate(NodeBase): + """ + Class represents HTTP gateway in a cluster + """ + + def get_endpoint(self) -> str: + return self._get_attribute(_ConfigAttributes.ENDPOINT) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class MorphChain(NodeBase): + """ + Class represents side-chain aka morph-chain consensus node in a cluster + + Consensus node is not always the same as physical host (or physical node, if you will): + It can be service running in a container or on physical host + For testing perspective, it's not relevant how it is actually running, + since neofs network will still treat it as "node" + """ + + rpc_client: RPCClient = None + + def construct(self): + self.rpc_client = RPCClient(self.get_endpoint()) + + def get_endpoint(self) -> str: + return self._get_attribute(_ConfigAttributes.ENDPOINT) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class MainChain(NodeBase): + """ + Class represents main-chain consensus node in a cluster + + Consensus node is not always the same as physical host: + It can be service running in a container or on physical host (or physical node, if you will): + For testing perspective, it's not relevant how it is actually running, + since neofs network will still treat it as "node" + """ + + rpc_client: RPCClient = None + + def construct(self): + self.rpc_client = RPCClient(self.get_endpoint()) + + def get_endpoint(self) -> str: + return self._get_attribute(_ConfigAttributes.ENDPOINT) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class StorageNode(NodeBase): + """ + Class represents storage node in a storage cluster + + Storage node is not always the same as physical host: + It can be service running in a container or on physical host (or physical node, if you will): + For testing perspective, it's not relevant how it is actually running, + since neofs network will still treat it as "node" + """ + + def get_rpc_endpoint(self) -> str: + return self._get_attribute(_ConfigAttributes.RPC_ENDPOINT) + + def get_control_endpoint(self) -> str: + return self._get_attribute(_ConfigAttributes.CONTROL_ENDPOINT) + + def get_un_locode(self): + return self._get_attribute(_ConfigAttributes.UN_LOCODE) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_rpc_endpoint()}" + + +class Cluster: + """ + This class represents a Cluster object for the whole storage based on provided hosting + """ + + default_rpc_endpoint: str + default_s3_gate_endpoint: str + + def __init__(self, hosting: Hosting) -> None: + self._hosting = hosting + self.default_rpc_endpoint = self.storage_nodes[0].get_rpc_endpoint() + self.default_s3_gate_endpoint = self.s3gates[0].get_endpoint() + self.default_http_gate_endpoint = self.http_gates[0].get_endpoint() + + @property + def hosts(self) -> list[Host]: + """ + Returns list of Hosts + """ + return self._hosting.hosts + + @property + def hosting(self) -> Hosting: + return self._hosting + + @property + def storage_nodes(self) -> list[StorageNode]: + """ + Returns list of Storage Nodes (not physical nodes) + """ + return self._get_nodes(_ServicesNames.STORAGE) + + @property + def s3gates(self) -> list[S3Gate]: + """ + Returns list of S3 gates + """ + return self._get_nodes(_ServicesNames.S3_GATE) + + @property + def http_gates(self) -> list[S3Gate]: + """ + Returns list of HTTP gates + """ + return self._get_nodes(_ServicesNames.HTTP_GATE) + + @property + def morph_chain_nodes(self) -> list[MorphChain]: + """ + Returns list of morph-chain consensus nodes (not physical nodes) + """ + return self._get_nodes(_ServicesNames.MORPH_CHAIN) + + @property + def main_chain_nodes(self) -> list[MainChain]: + """ + Returns list of main-chain consensus nodes (not physical nodes) + """ + return self._get_nodes(_ServicesNames.MAIN_CHAIN) + + @property + def ir_nodes(self) -> list[InnerRingNode]: + """ + Returns list of inner-ring nodes (not physical nodes) + """ + return self._get_nodes(_ServicesNames.INNER_RING) + + def _get_nodes(self, service_name) -> list[StorageNode]: + configs = self.hosting.find_service_configs(f"{service_name}\d*$") + + class_mapping: dict[str, Any] = { + _ServicesNames.STORAGE: StorageNode, + _ServicesNames.INNER_RING: InnerRingNode, + _ServicesNames.MORPH_CHAIN: MorphChain, + _ServicesNames.S3_GATE: S3Gate, + _ServicesNames.HTTP_GATE: HTTPGate, + _ServicesNames.MAIN_CHAIN: MainChain, + } + + cls = class_mapping.get(service_name) + return [ + cls( + self._get_id(config.name), + config.name, + self.hosting.get_host_by_service(config.name), + ) + for config in configs + ] + + def _get_id(self, node_name) -> str: + pattern = "\d*$" + + matches = re.search(pattern, node_name) + if matches: + return int(matches.group()) + + def get_random_storage_rpc_endpoint(self) -> str: + return random.choice(self.get_storage_rpc_endpoints()) + + def get_storage_rpc_endpoints(self) -> list[str]: + nodes = self.storage_nodes + return [node.get_rpc_endpoint() for node in nodes] + + def get_morph_endpoints(self) -> list[str]: + nodes = self.morph_chain_nodes + return [node.get_endpoint() for node in nodes] + + +class _ServicesNames: + STORAGE = "s" + S3_GATE = "s3-gate" + HTTP_GATE = "http-gate" + MORPH_CHAIN = "morph-chain" + INNER_RING = "ir" + MAIN_CHAIN = "main-chain" + + +class _ConfigAttributes: + WALLET_PASSWORD = "wallet_password" + WALLET_PATH = "wallet_path" + WALLET_CONFIG = "wallet_config" + LOCAL_WALLET_PATH = "local_wallet_path" + LOCAL_WALLET_CONFIG = "local_config_path" + RPC_ENDPOINT = "rpc_endpoint" + ENDPOINT = "endpoint" + CONTROL_ENDPOINT = "control_endpoint" + UN_LOCODE = "un_locode" diff --git a/pytest_tests/helpers/container.py b/pytest_tests/helpers/container.py index df32c29..0aac211 100644 --- a/pytest_tests/helpers/container.py +++ b/pytest_tests/helpers/container.py @@ -2,9 +2,10 @@ from dataclasses import dataclass from typing import Optional import allure +from cluster import Cluster from file_helper import generate_file, get_file_hash from neofs_testlib.shell import Shell -from neofs_verbs import put_object +from neofs_verbs import put_object_to_random_node from storage_object import StorageObjectInfo from wallet import WalletFile @@ -16,9 +17,15 @@ class StorageContainerInfo: class StorageContainer: - def __init__(self, storage_container_info: StorageContainerInfo, shell: Shell) -> None: + def __init__( + self, + storage_container_info: StorageContainerInfo, + shell: Shell, + cluster: Cluster, + ) -> None: self.shell = shell self.storage_container_info = storage_container_info + self.cluster = cluster def get_id(self) -> str: return self.storage_container_info.id @@ -36,12 +43,13 @@ class StorageContainer: wallet_path = self.get_wallet_path() with allure.step(f"Put object with size {size} to container {container_id}"): - object_id = put_object( + object_id = put_object_to_random_node( wallet=wallet_path, path=file_path, cid=container_id, expire_at=expire_at, shell=self.shell, + cluster=self.cluster, ) storage_object = StorageObjectInfo( diff --git a/pytest_tests/helpers/wallet.py b/pytest_tests/helpers/wallet.py index 7e20709..fa8069a 100644 --- a/pytest_tests/helpers/wallet.py +++ b/pytest_tests/helpers/wallet.py @@ -3,6 +3,7 @@ import uuid from dataclasses import dataclass from typing import Optional +from cluster import Cluster from common import FREE_STORAGE, WALLET_PASS from neofs_testlib.shell import Shell from neofs_testlib.utils.wallet import get_last_address_from_wallet, init_wallet @@ -25,9 +26,10 @@ class WalletFile: class WalletFactory: - def __init__(self, wallets_dir: str, shell: Shell) -> None: + def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None: self.shell = shell self.wallets_dir = wallets_dir + self.cluster = cluster def create_wallet(self, password: str = WALLET_PASS) -> WalletFile: """ @@ -40,17 +42,21 @@ class WalletFactory: """ wallet_path = os.path.join(self.wallets_dir, f"{str(uuid.uuid4())}.json") init_wallet(wallet_path, password) + if not FREE_STORAGE: + main_chain = self.cluster.main_chain_nodes[0] deposit = 30 transfer_gas( shell=self.shell, amount=deposit + 1, + main_chain=main_chain, wallet_to_path=wallet_path, wallet_to_password=password, ) deposit_gas( shell=self.shell, amount=deposit, + main_chain=main_chain, wallet_from_path=wallet_path, wallet_from_password=password, ) diff --git a/pytest_tests/steps/cluster_test_base.py b/pytest_tests/steps/cluster_test_base.py new file mode 100644 index 0000000..c5f0b32 --- /dev/null +++ b/pytest_tests/steps/cluster_test_base.py @@ -0,0 +1,25 @@ +import epoch +import pytest +from cluster import Cluster +from neofs_testlib.shell import Shell + + +# To skip adding every mandatory singleton dependency to EACH test function +class ClusterTestBase: + shell: Shell + cluster: Cluster + + @pytest.fixture(scope="session", autouse=True) + def fill_mandatory_dependencies(self, cluster: Cluster, client_shell: Shell): + ClusterTestBase.shell = client_shell + ClusterTestBase.cluster = cluster + yield + + def tick_epoch(self): + epoch.tick_epoch(self.shell, self.cluster) + + def get_epoch(self): + return epoch.get_epoch(self.shell, self.cluster) + + def ensure_fresh_epoch(self): + return epoch.ensure_fresh_epoch(self.shell, self.cluster) diff --git a/pytest_tests/steps/s3_gate_base.py b/pytest_tests/steps/s3_gate_base.py index 8b147e6..c6a3539 100644 --- a/pytest_tests/steps/s3_gate_base.py +++ b/pytest_tests/steps/s3_gate_base.py @@ -3,24 +3,25 @@ import logging import os import re import uuid -from typing import Optional +from typing import Any, Optional import allure import boto3 import pytest +import s3_gate_bucket +import s3_gate_object import urllib3 +from aws_cli_client import AwsCliClient from botocore.config import Config from botocore.exceptions import ClientError from cli_helpers import _cmd_run, _configure_aws_cli, _run_with_passwd -from common import NEOFS_AUTHMATE_EXEC, NEOFS_ENDPOINT, S3_GATE, S3_GATE_WALLET_PATH -from data_formatters import get_wallet_public_key -from neofs_testlib.hosting import Hosting +from cluster import Cluster +from cluster_test_base import ClusterTestBase +from common import NEOFS_AUTHMATE_EXEC from neofs_testlib.shell import Shell +from pytest import FixtureRequest from python_keywords.container import list_containers -from steps import s3_gate_bucket, s3_gate_object -from steps.aws_cli_client import AwsCliClient - # Disable warnings on self-signed certificate which the # boto library produces on requests to S3-gate in dev-env urllib3.disable_warnings() @@ -34,13 +35,15 @@ MAX_REQUEST_ATTEMPTS = 1 RETRY_MODE = "standard" -class TestS3GateBase: - s3_client = None +class TestS3GateBase(ClusterTestBase): + s3_client: Any = None @pytest.fixture(scope="class", autouse=True) @allure.title("[Class/Autouse]: Create S3 client") - def s3_client(self, prepare_wallet_and_deposit, client_shell: Shell, request, hosting: Hosting): - wallet = prepare_wallet_and_deposit + def s3_client( + self, default_wallet, client_shell: Shell, request: FixtureRequest, cluster: Cluster + ) -> Any: + wallet = default_wallet s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json" policy = None if isinstance(request.param, str) else request.param[1] ( @@ -49,14 +52,20 @@ class TestS3GateBase: access_key_id, secret_access_key, owner_private_key, - ) = init_s3_credentials(wallet, hosting, s3_bearer_rules_file=s3_bearer_rules_file) - containers_list = list_containers(wallet, shell=client_shell) + ) = init_s3_credentials(wallet, cluster, s3_bearer_rules_file=s3_bearer_rules_file) + containers_list = list_containers( + wallet, shell=client_shell, endpoint=self.cluster.default_rpc_endpoint + ) assert cid in containers_list, f"Expected cid {cid} in {containers_list}" if "aws cli" in request.param: - client = configure_cli_client(access_key_id, secret_access_key) + client = configure_cli_client( + access_key_id, secret_access_key, cluster.default_s3_gate_endpoint + ) else: - client = configure_boto3_client(access_key_id, secret_access_key) + client = configure_boto3_client( + access_key_id, secret_access_key, cluster.default_s3_gate_endpoint + ) TestS3GateBase.s3_client = client TestS3GateBase.wallet = wallet @@ -93,27 +102,22 @@ class TestS3GateBase: s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket) -def get_wallet_password(hosting: Hosting, s3_service_name: str) -> str: - service_config = hosting.get_service_config(s3_service_name) - return service_config.attributes.get("wallet_password") - - @allure.step("Init S3 Credentials") def init_s3_credentials( wallet_path: str, - hosting: Hosting, + cluster: Cluster, s3_bearer_rules_file: Optional[str] = None, policy: Optional[dict] = None, - s3_service_name: str = "s3-gate01", ): bucket = str(uuid.uuid4()) s3_bearer_rules = s3_bearer_rules_file or "robot/resources/files/s3_bearer_rules.json" - s3_password = get_wallet_password(hosting, s3_service_name) - gate_public_key = get_wallet_public_key(S3_GATE_WALLET_PATH, s3_password) + + s3gate_node = cluster.s3gates[0] + gate_public_key = s3gate_node.get_wallet_public_key() cmd = ( f"{NEOFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} " f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} " - f"--peer {NEOFS_ENDPOINT} --container-friendly-name {bucket} " + f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} " f"--bearer-rules {s3_bearer_rules}" ) if policy: @@ -148,9 +152,9 @@ def init_s3_credentials( @allure.step("Configure S3 client (boto3)") -def configure_boto3_client(access_key_id: str, secret_access_key: str): +def configure_boto3_client(access_key_id: str, secret_access_key: str, s3gate_endpoint: str): try: - session = boto3.session.Session() + session = boto3.Session() config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, @@ -163,7 +167,7 @@ def configure_boto3_client(access_key_id: str, secret_access_key: str): aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, config=config, - endpoint_url=S3_GATE, + endpoint_url=s3gate_endpoint, verify=False, ) return s3_client @@ -175,9 +179,9 @@ def configure_boto3_client(access_key_id: str, secret_access_key: str): @allure.step("Configure S3 client (aws cli)") -def configure_cli_client(access_key_id: str, secret_access_key: str): +def configure_cli_client(access_key_id: str, secret_access_key: str, s3gate_endpoint: str): try: - client = AwsCliClient() + client = AwsCliClient(s3gate_endpoint) _configure_aws_cli("aws configure", access_key_id, secret_access_key) _cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") _cmd_run(f"aws configure set retry_mode {RETRY_MODE}") diff --git a/pytest_tests/steps/s3_gate_object.py b/pytest_tests/steps/s3_gate_object.py index e982b26..1266bce 100644 --- a/pytest_tests/steps/s3_gate_object.py +++ b/pytest_tests/steps/s3_gate_object.py @@ -1,18 +1,16 @@ import logging import os import uuid -from enum import Enum from time import sleep from typing import Optional import allure import pytest import urllib3 +from aws_cli_client import AwsCliClient from botocore.exceptions import ClientError from cli_helpers import log_command_execution - -from steps.aws_cli_client import AwsCliClient -from steps.s3_gate_bucket import S3_SYNC_WAIT_TIME +from s3_gate_bucket import S3_SYNC_WAIT_TIME ########################################################## # Disabling warnings on self-signed certificate which the diff --git a/pytest_tests/steps/session_token.py b/pytest_tests/steps/session_token.py index 33390d8..a05bc4d 100644 --- a/pytest_tests/steps/session_token.py +++ b/pytest_tests/steps/session_token.py @@ -9,7 +9,7 @@ from typing import Any, Optional import allure import json_transformers -from common import ASSETS_DIR, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG +from common import ASSETS_DIR, NEOFS_CLI_EXEC, WALLET_CONFIG from data_formatters import get_wallet_public_key from json_transformers import encode_for_json from neofs_testlib.cli import NeofsCli @@ -180,7 +180,7 @@ def generate_object_session_token( ) -@allure.step("Get signed token for object session") +@allure.step("Get signed token for container session") def get_container_signed_token( owner_wallet: WalletFile, user_wallet: WalletFile, @@ -190,7 +190,7 @@ def get_container_signed_token( lifetime: Optional[Lifetime] = None, ) -> str: """ - Returns signed token file path for static object session + Returns signed token file path for static container session """ session_token_file = generate_container_session_token( owner_wallet=owner_wallet, @@ -235,7 +235,7 @@ def create_session_token( owner: str, wallet_path: str, wallet_password: str, - rpc_endpoint: str = NEOFS_ENDPOINT, + rpc_endpoint: str, ) -> str: """ Create session token for an object. diff --git a/pytest_tests/steps/storage_object.py b/pytest_tests/steps/storage_object.py index 6568ab6..e4693aa 100644 --- a/pytest_tests/steps/storage_object.py +++ b/pytest_tests/steps/storage_object.py @@ -3,6 +3,7 @@ from time import sleep import allure import pytest +from cluster import Cluster from epoch import tick_epoch from grpc_responses import OBJECT_ALREADY_REMOVED from neofs_testlib.shell import Shell @@ -16,7 +17,9 @@ CLEANUP_TIMEOUT = 10 @allure.step("Delete Objects") -def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> None: +def delete_objects( + storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster +) -> None: """ Deletes given storage objects. @@ -28,7 +31,11 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No with allure.step("Delete objects"): for storage_object in storage_objects: storage_object.tombstone = delete_object( - storage_object.wallet_file_path, storage_object.cid, storage_object.oid, shell + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + shell=shell, + endpoint=cluster.default_rpc_endpoint, ) verify_head_tombstone( wallet_path=storage_object.wallet_file_path, @@ -36,9 +43,10 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No oid_ts=storage_object.tombstone, oid=storage_object.oid, shell=shell, + endpoint=cluster.default_rpc_endpoint, ) - tick_epoch(shell=shell) + tick_epoch(shell, cluster) sleep(CLEANUP_TIMEOUT) with allure.step("Get objects and check errors"): @@ -49,4 +57,5 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No storage_object.cid, storage_object.oid, shell=shell, + endpoint=cluster.default_rpc_endpoint, ) diff --git a/pytest_tests/testsuites/acl/conftest.py b/pytest_tests/testsuites/acl/conftest.py index 304a3d9..1670ccb 100644 --- a/pytest_tests/testsuites/acl/conftest.py +++ b/pytest_tests/testsuites/acl/conftest.py @@ -1,24 +1,18 @@ import os import uuid from dataclasses import dataclass -from typing import Dict, List, Optional +from typing import Optional import allure import pytest -from common import ( - ASSETS_DIR, - IR_WALLET_CONFIG, - IR_WALLET_PATH, - STORAGE_WALLET_CONFIG, - STORAGE_WALLET_PATH, - WALLET_CONFIG, - WALLET_PASS, -) +from cluster import Cluster +from common import WALLET_CONFIG, WALLET_PASS from file_helper import generate_file +from neofs_testlib.shell import Shell from neofs_testlib.utils.wallet import init_wallet from python_keywords.acl import EACLRole from python_keywords.container import create_container -from python_keywords.neofs_verbs import put_object +from python_keywords.neofs_verbs import put_object_to_random_node from wellknown_acl import PUBLIC_ACL OBJECT_COUNT = 5 @@ -32,35 +26,42 @@ class Wallet: @dataclass class Wallets: - wallets: Dict[EACLRole, List[Wallet]] + wallets: dict[EACLRole, list[Wallet]] def get_wallet(self, role: EACLRole = EACLRole.USER) -> Wallet: return self.wallets[role][0] - def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> List[Wallet]: + def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> list[Wallet]: return self.wallets[role] @pytest.fixture(scope="module") -def wallets(prepare_wallet_and_deposit): +def wallets(default_wallet, temp_directory, cluster: Cluster) -> Wallets: other_wallets_paths = [ - os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") for _ in range(2) + os.path.join(temp_directory, f"{str(uuid.uuid4())}.json") for _ in range(2) ] for other_wallet_path in other_wallets_paths: init_wallet(other_wallet_path, WALLET_PASS) + ir_node = cluster.ir_nodes[0] + storage_node = cluster.storage_nodes[0] + + ir_wallet_path = ir_node.get_wallet_path() + ir_wallet_config = ir_node.get_wallet_config_path() + + storage_wallet_path = storage_node.get_wallet_path() + storage_wallet_config = storage_node.get_wallet_config_path() + yield Wallets( wallets={ - EACLRole.USER: [ - Wallet(wallet_path=prepare_wallet_and_deposit, config_path=WALLET_CONFIG) - ], + EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=WALLET_CONFIG)], EACLRole.OTHERS: [ Wallet(wallet_path=other_wallet_path, config_path=WALLET_CONFIG) for other_wallet_path in other_wallets_paths ], EACLRole.SYSTEM: [ - Wallet(wallet_path=IR_WALLET_PATH, config_path=IR_WALLET_CONFIG), - Wallet(wallet_path=STORAGE_WALLET_PATH, config_path=STORAGE_WALLET_CONFIG), + Wallet(wallet_path=ir_wallet_path, config_path=ir_wallet_config), + Wallet(wallet_path=storage_wallet_path, config_path=storage_wallet_config), ], } ) @@ -72,19 +73,27 @@ def file_path(): @pytest.fixture(scope="function") -def eacl_container_with_objects(wallets, client_shell, file_path): +def eacl_container_with_objects( + wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str +): user_wallet = wallets.get_wallet() with allure.step("Create eACL public container"): - cid = create_container(user_wallet.wallet_path, basic_acl=PUBLIC_ACL, shell=client_shell) + cid = create_container( + user_wallet.wallet_path, + basic_acl=PUBLIC_ACL, + shell=client_shell, + endpoint=cluster.default_rpc_endpoint, + ) with allure.step("Add test objects to container"): objects_oids = [ - put_object( + put_object_to_random_node( user_wallet.wallet_path, file_path, cid, attributes={"key1": "val1", "key": val, "key2": "abc"}, shell=client_shell, + cluster=cluster, ) for val in range(OBJECT_COUNT) ] diff --git a/pytest_tests/testsuites/acl/storage_group/test_storagegroup.py b/pytest_tests/testsuites/acl/storage_group/test_storagegroup.py index 467ce9c..7f2a9a1 100644 --- a/pytest_tests/testsuites/acl/storage_group/test_storagegroup.py +++ b/pytest_tests/testsuites/acl/storage_group/test_storagegroup.py @@ -5,20 +5,10 @@ from typing import Optional import allure import pytest -from common import ( - ASSETS_DIR, - COMPLEX_OBJ_SIZE, - FREE_STORAGE, - IR_WALLET_CONFIG, - IR_WALLET_PASS, - IR_WALLET_PATH, - SIMPLE_OBJ_SIZE, - WALLET_PASS, -) -from epoch import tick_epoch +from cluster_test_base import ClusterTestBase +from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE, WALLET_PASS from file_helper import generate_file from grpc_responses import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND -from neofs_testlib.shell import Shell from neofs_testlib.utils.wallet import init_wallet from python_keywords.acl import ( EACLAccess, @@ -30,7 +20,7 @@ from python_keywords.acl import ( set_eacl, ) from python_keywords.container import create_container -from python_keywords.neofs_verbs import put_object +from python_keywords.neofs_verbs import put_object_to_random_node from python_keywords.payment_neogo import deposit_gas, transfer_gas from python_keywords.storage_group import ( delete_storagegroup, @@ -53,53 +43,59 @@ deposit = 30 @pytest.mark.sanity @pytest.mark.acl @pytest.mark.storage_group -class TestStorageGroup: +class TestStorageGroup(ClusterTestBase): @pytest.fixture(autouse=True) - def prepare_two_wallets(self, prepare_wallet_and_deposit, client_shell): - self.main_wallet = prepare_wallet_and_deposit + def prepare_two_wallets(self, default_wallet): + self.main_wallet = default_wallet self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") init_wallet(self.other_wallet, WALLET_PASS) if not FREE_STORAGE: + main_chain = self.cluster.main_chain_nodes[0] deposit = 30 transfer_gas( - shell=client_shell, + shell=self.shell, amount=deposit + 1, + main_chain=main_chain, wallet_to_path=self.other_wallet, wallet_to_password=WALLET_PASS, ) deposit_gas( - shell=client_shell, + shell=self.shell, amount=deposit, + main_chain=main_chain, wallet_from_path=self.other_wallet, wallet_from_password=WALLET_PASS, ) @allure.title("Test Storage Group in Private Container") - def test_storagegroup_basic_private_container(self, client_shell, object_size): - cid = create_container(self.main_wallet, shell=client_shell) + def test_storagegroup_basic_private_container(self, object_size): + cid = create_container( + self.main_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) file_path = generate_file(object_size) - oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) + oid = put_object_to_random_node(self.main_wallet, file_path, cid, self.shell, self.cluster) objects = [oid] storage_group = put_storagegroup( - shell=client_shell, wallet=self.main_wallet, cid=cid, objects=objects + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=self.main_wallet, + cid=cid, + objects=objects, ) self.expect_success_for_storagegroup_operations( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, object_size=object_size, ) self.expect_failure_for_storagegroup_operations( - shell=client_shell, wallet=self.other_wallet, cid=cid, obj_list=objects, gid=storage_group, ) self.storagegroup_operations_by_system_ro_container( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, @@ -107,27 +103,31 @@ class TestStorageGroup: ) @allure.title("Test Storage Group in Public Container") - def test_storagegroup_basic_public_container(self, client_shell, object_size): - cid = create_container(self.main_wallet, basic_acl="public-read-write", shell=client_shell) + def test_storagegroup_basic_public_container(self, object_size): + cid = create_container( + self.main_wallet, + basic_acl="public-read-write", + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) file_path = generate_file(object_size) - oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) + oid = put_object_to_random_node( + self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster + ) objects = [oid] self.expect_success_for_storagegroup_operations( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, object_size=object_size, ) self.expect_success_for_storagegroup_operations( - shell=client_shell, wallet=self.other_wallet, cid=cid, obj_list=objects, object_size=object_size, ) self.storagegroup_operations_by_system_ro_container( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, @@ -135,20 +135,25 @@ class TestStorageGroup: ) @allure.title("Test Storage Group in Read-Only Container") - def test_storagegroup_basic_ro_container(self, client_shell, object_size): - cid = create_container(self.main_wallet, basic_acl="public-read", shell=client_shell) + def test_storagegroup_basic_ro_container(self, object_size): + cid = create_container( + self.main_wallet, + basic_acl="public-read", + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) file_path = generate_file(object_size) - oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) + oid = put_object_to_random_node( + self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster + ) objects = [oid] self.expect_success_for_storagegroup_operations( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, object_size=object_size, ) self.storagegroup_operations_by_other_ro_container( - shell=client_shell, owner_wallet=self.main_wallet, other_wallet=self.other_wallet, cid=cid, @@ -156,7 +161,6 @@ class TestStorageGroup: object_size=object_size, ) self.storagegroup_operations_by_system_ro_container( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, @@ -164,21 +168,27 @@ class TestStorageGroup: ) @allure.title("Test Storage Group with Bearer Allow") - def test_storagegroup_bearer_allow(self, client_shell, object_size): + def test_storagegroup_bearer_allow(self, object_size): cid = create_container( - self.main_wallet, basic_acl="eacl-public-read-write", shell=client_shell + self.main_wallet, + basic_acl="eacl-public-read-write", + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) file_path = generate_file(object_size) - oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) + oid = put_object_to_random_node( + self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster + ) objects = [oid] self.expect_success_for_storagegroup_operations( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, object_size=object_size, ) - storage_group = put_storagegroup(client_shell, self.main_wallet, cid, objects) + storage_group = put_storagegroup( + self.shell, self.cluster.default_rpc_endpoint, self.main_wallet, cid, objects + ) eacl_deny = [ EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in EACLOperation @@ -187,11 +197,12 @@ class TestStorageGroup: set_eacl( self.main_wallet, cid, - create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) self.expect_failure_for_storagegroup_operations( - client_shell, self.main_wallet, cid, objects, storage_group + self.main_wallet, cid, objects, storage_group ) bearer_file = form_bearertoken_file( self.main_wallet, @@ -200,10 +211,10 @@ class TestStorageGroup: EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER) for op in EACLOperation ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) self.expect_success_for_storagegroup_operations( - shell=client_shell, wallet=self.main_wallet, cid=cid, obj_list=objects, @@ -212,24 +223,38 @@ class TestStorageGroup: ) @allure.title("Test to check Storage Group lifetime") - def test_storagegroup_lifetime(self, client_shell, object_size): - cid = create_container(self.main_wallet, shell=client_shell) + def test_storagegroup_lifetime(self, object_size): + cid = create_container( + self.main_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) file_path = generate_file(object_size) - oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) + oid = put_object_to_random_node( + self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster + ) objects = [oid] - storage_group = put_storagegroup(client_shell, self.main_wallet, cid, objects, lifetime=1) + storage_group = put_storagegroup( + self.shell, + self.cluster.default_rpc_endpoint, + self.main_wallet, + cid, + objects, + lifetime=1, + ) with allure.step("Tick two epochs"): for _ in range(2): - tick_epoch(shell=client_shell) + self.tick_epoch() with pytest.raises(Exception, match=OBJECT_NOT_FOUND): get_storagegroup( - shell=client_shell, wallet=self.main_wallet, cid=cid, gid=storage_group + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=self.main_wallet, + cid=cid, + gid=storage_group, ) - @staticmethod @allure.step("Run Storage Group Operations And Expect Success") def expect_success_for_storagegroup_operations( - shell: Shell, + self, wallet: str, cid: str, obj_list: list, @@ -241,12 +266,20 @@ class TestStorageGroup: Put, List, Get and Delete the Storage Group which contains the Object. """ - storage_group = put_storagegroup(shell, wallet, cid, obj_list, bearer) + storage_group = put_storagegroup( + self.shell, self.cluster.default_rpc_endpoint, wallet, cid, obj_list, bearer + ) verify_list_storage_group( - shell=shell, wallet=wallet, cid=cid, gid=storage_group, bearer=bearer + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=wallet, + cid=cid, + gid=storage_group, + bearer=bearer, ) verify_get_storage_group( - shell=shell, + shell=self.shell, + cluster=self.cluster, wallet=wallet, cid=cid, gid=storage_group, @@ -254,12 +287,18 @@ class TestStorageGroup: object_size=object_size, bearer=bearer, ) - delete_storagegroup(shell=shell, wallet=wallet, cid=cid, gid=storage_group, bearer=bearer) + delete_storagegroup( + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=wallet, + cid=cid, + gid=storage_group, + bearer=bearer, + ) - @staticmethod @allure.step("Run Storage Group Operations And Expect Failure") def expect_failure_for_storagegroup_operations( - shell: Shell, wallet: str, cid: str, obj_list: list, gid: str + self, wallet: str, cid: str, obj_list: list, gid: str ): """ This func verifies if the Object's owner isn't allowed to @@ -267,30 +306,64 @@ class TestStorageGroup: the Object. """ with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - put_storagegroup(shell=shell, wallet=wallet, cid=cid, objects=obj_list) + put_storagegroup( + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=wallet, + cid=cid, + objects=obj_list, + ) with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - list_storagegroup(shell=shell, wallet=wallet, cid=cid) + list_storagegroup( + shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, wallet=wallet, cid=cid + ) with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - get_storagegroup(shell=shell, wallet=wallet, cid=cid, gid=gid) + get_storagegroup( + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=wallet, + cid=cid, + gid=gid, + ) with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - delete_storagegroup(shell=shell, wallet=wallet, cid=cid, gid=gid) + delete_storagegroup( + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=wallet, + cid=cid, + gid=gid, + ) - @staticmethod @allure.step("Run Storage Group Operations On Other's Behalf In RO Container") def storagegroup_operations_by_other_ro_container( - shell: Shell, + self, owner_wallet: str, other_wallet: str, cid: str, obj_list: list, object_size: int, ): - storage_group = put_storagegroup(shell, owner_wallet, cid, obj_list) + storage_group = put_storagegroup( + self.shell, self.cluster.default_rpc_endpoint, owner_wallet, cid, obj_list + ) with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - put_storagegroup(shell=shell, wallet=other_wallet, cid=cid, objects=obj_list) - verify_list_storage_group(shell=shell, wallet=other_wallet, cid=cid, gid=storage_group) + put_storagegroup( + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=other_wallet, + cid=cid, + objects=obj_list, + ) + verify_list_storage_group( + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=other_wallet, + cid=cid, + gid=storage_group, + ) verify_get_storage_group( - shell=shell, + shell=self.shell, + cluster=self.cluster, wallet=other_wallet, cid=cid, gid=storage_group, @@ -298,56 +371,81 @@ class TestStorageGroup: object_size=object_size, ) with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - delete_storagegroup(shell=shell, wallet=other_wallet, cid=cid, gid=storage_group) + delete_storagegroup( + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=other_wallet, + cid=cid, + gid=storage_group, + ) - @staticmethod @allure.step("Run Storage Group Operations On Systems's Behalf In RO Container") def storagegroup_operations_by_system_ro_container( - shell: Shell, wallet: str, cid: str, obj_list: list, object_size: int + self, wallet: str, cid: str, obj_list: list, object_size: int ): """ In this func we create a Storage Group on Inner Ring's key behalf and include an Object created on behalf of some user. We expect that System key is granted to make all operations except PUT and DELETE. """ + ir_node = self.cluster.ir_nodes[0] + ir_wallet_path = ir_node.get_wallet_path() + ir_wallet_password = ir_node.get_wallet_password() + ir_wallet_config = ir_node.get_wallet_config_path() + if not FREE_STORAGE: + main_chain = self.cluster.main_chain_nodes[0] deposit = 30 transfer_gas( - shell=shell, + shell=self.shell, amount=deposit + 1, - wallet_to_path=IR_WALLET_PATH, - wallet_to_password=IR_WALLET_PASS, + main_chain=main_chain, + wallet_to_path=ir_wallet_path, + wallet_to_password=ir_wallet_password, ) deposit_gas( - shell=shell, + shell=self.shell, amount=deposit, - wallet_from_path=IR_WALLET_PATH, - wallet_from_password=IR_WALLET_PASS, + main_chain=main_chain, + wallet_from_path=ir_wallet_path, + wallet_from_password=ir_wallet_password, ) - storage_group = put_storagegroup(shell, wallet, cid, obj_list) + storage_group = put_storagegroup( + self.shell, self.cluster.default_rpc_endpoint, wallet, cid, obj_list + ) with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - put_storagegroup(shell, IR_WALLET_PATH, cid, obj_list, wallet_config=IR_WALLET_CONFIG) + put_storagegroup( + self.shell, + self.cluster.default_rpc_endpoint, + ir_wallet_path, + cid, + obj_list, + wallet_config=ir_wallet_config, + ) verify_list_storage_group( - shell=shell, - wallet=IR_WALLET_PATH, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=ir_wallet_path, cid=cid, gid=storage_group, - wallet_config=IR_WALLET_CONFIG, + wallet_config=ir_wallet_config, ) verify_get_storage_group( - shell=shell, - wallet=IR_WALLET_PATH, + shell=self.shell, + cluster=self.cluster, + wallet=ir_wallet_path, cid=cid, gid=storage_group, obj_list=obj_list, object_size=object_size, - wallet_config=IR_WALLET_CONFIG, + wallet_config=ir_wallet_config, ) with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): delete_storagegroup( - shell=shell, - wallet=IR_WALLET_PATH, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wallet=ir_wallet_path, cid=cid, gid=storage_group, - wallet_config=IR_WALLET_CONFIG, + wallet_config=ir_wallet_config, ) diff --git a/pytest_tests/testsuites/acl/test_acl.py b/pytest_tests/testsuites/acl/test_acl.py index 541457e..dd61cb3 100644 --- a/pytest_tests/testsuites/acl/test_acl.py +++ b/pytest_tests/testsuites/acl/test_acl.py @@ -1,5 +1,6 @@ import allure import pytest +from cluster_test_base import ClusterTestBase from python_keywords.acl import EACLRole from python_keywords.container import create_container from python_keywords.container_access import ( @@ -7,7 +8,7 @@ from python_keywords.container_access import ( check_no_access_to_container, check_read_only_container, ) -from python_keywords.neofs_verbs import put_object +from python_keywords.neofs_verbs import put_object_to_random_node from wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F @@ -15,13 +16,16 @@ from wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F @pytest.mark.smoke @pytest.mark.acl @pytest.mark.acl_basic -class TestACLBasic: +class TestACLBasic(ClusterTestBase): @pytest.fixture(scope="function") - def public_container(self, client_shell, wallets): + def public_container(self, wallets): user_wallet = wallets.get_wallet() with allure.step("Create public container"): cid_public = create_container( - user_wallet.wallet_path, basic_acl=PUBLIC_ACL_F, shell=client_shell + user_wallet.wallet_path, + basic_acl=PUBLIC_ACL_F, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) yield cid_public @@ -30,11 +34,14 @@ class TestACLBasic: # delete_container(user_wallet.wallet_path, cid_public) @pytest.fixture(scope="function") - def private_container(self, client_shell, wallets): + def private_container(self, wallets): user_wallet = wallets.get_wallet() with allure.step("Create private container"): cid_private = create_container( - user_wallet.wallet_path, basic_acl=PRIVATE_ACL_F, shell=client_shell + user_wallet.wallet_path, + basic_acl=PRIVATE_ACL_F, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) yield cid_private @@ -43,11 +50,14 @@ class TestACLBasic: # delete_container(user_wallet.wallet_path, cid_private) @pytest.fixture(scope="function") - def read_only_container(self, client_shell, wallets): + def read_only_container(self, wallets): user_wallet = wallets.get_wallet() with allure.step("Create public readonly container"): cid_read_only = create_container( - user_wallet.wallet_path, basic_acl=READONLY_ACL_F, shell=client_shell + user_wallet.wallet_path, + basic_acl=READONLY_ACL_F, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) yield cid_read_only @@ -56,7 +66,7 @@ class TestACLBasic: # delete_container(user_wallet.wallet_path, cid_read_only) @allure.title("Test basic ACL on public container") - def test_basic_acl_public(self, wallets, client_shell, public_container, file_path): + def test_basic_acl_public(self, wallets, public_container, file_path): """ Test basic ACL set during public container creation. """ @@ -67,30 +77,42 @@ class TestACLBasic: with allure.step("Add test objects to container"): # We create new objects for each wallet because check_full_access_to_container # deletes the object - owner_object_oid = put_object( + owner_object_oid = put_object_to_random_node( user_wallet.wallet_path, file_path, cid, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes={"created": "owner"}, ) - other_object_oid = put_object( + other_object_oid = put_object_to_random_node( other_wallet.wallet_path, file_path, cid, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes={"created": "other"}, ) with allure.step(f"Check {desc} has full access to public container"): check_full_access_to_container( - wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell + wallet.wallet_path, + cid, + owner_object_oid, + file_path, + shell=self.shell, + cluster=self.cluster, ) check_full_access_to_container( - wallet.wallet_path, cid, other_object_oid, file_path, shell=client_shell + wallet.wallet_path, + cid, + other_object_oid, + file_path, + shell=self.shell, + cluster=self.cluster, ) @allure.title("Test basic ACL on private container") - def test_basic_acl_private(self, wallets, client_shell, private_container, file_path): + def test_basic_acl_private(self, wallets, private_container, file_path): """ Test basic ACL set during private container creation. """ @@ -98,19 +120,29 @@ class TestACLBasic: other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) cid = private_container with allure.step("Add test objects to container"): - owner_object_oid = put_object( - user_wallet.wallet_path, file_path, cid, shell=client_shell + owner_object_oid = put_object_to_random_node( + user_wallet.wallet_path, file_path, cid, shell=self.shell, cluster=self.cluster ) with allure.step("Check only owner has full access to private container"): with allure.step("Check no one except owner has access to operations with container"): check_no_access_to_container( - other_wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell + other_wallet.wallet_path, + cid, + owner_object_oid, + file_path, + shell=self.shell, + cluster=self.cluster, ) with allure.step("Check owner has full access to private container"): check_full_access_to_container( - user_wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell + user_wallet.wallet_path, + cid, + owner_object_oid, + file_path, + shell=self.shell, + cluster=self.cluster, ) @allure.title("Test basic ACL on readonly container") @@ -123,14 +155,26 @@ class TestACLBasic: cid = read_only_container with allure.step("Add test objects to container"): - object_oid = put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell) + object_oid = put_object_to_random_node( + user_wallet.wallet_path, file_path, cid, shell=client_shell, cluster=self.cluster + ) with allure.step("Check other has read-only access to operations with container"): check_read_only_container( - other_wallet.wallet_path, cid, object_oid, file_path, shell=client_shell + other_wallet.wallet_path, + cid, + object_oid, + file_path, + shell=client_shell, + cluster=self.cluster, ) with allure.step("Check owner has full access to public container"): check_full_access_to_container( - user_wallet.wallet_path, cid, object_oid, file_path, shell=client_shell + user_wallet.wallet_path, + cid, + object_oid, + file_path, + shell=client_shell, + cluster=self.cluster, ) diff --git a/pytest_tests/testsuites/acl/test_bearer.py b/pytest_tests/testsuites/acl/test_bearer.py index 42a50e8..262ce96 100644 --- a/pytest_tests/testsuites/acl/test_bearer.py +++ b/pytest_tests/testsuites/acl/test_bearer.py @@ -1,5 +1,6 @@ import allure import pytest +from cluster_test_base import ClusterTestBase from python_keywords.acl import ( EACLAccess, EACLOperation, @@ -20,15 +21,14 @@ from python_keywords.container_access import ( @pytest.mark.sanity @pytest.mark.acl @pytest.mark.acl_bearer -class TestACLBearer: +class TestACLBearer(ClusterTestBase): @pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS]) - def test_bearer_token_operations( - self, wallets, client_shell, eacl_container_with_objects, role - ): + def test_bearer_token_operations(self, wallets, eacl_container_with_objects, role): allure.dynamic.title(f"Testcase to validate NeoFS operations with {role.value} BearerToken") cid, objects_oids, file_path = eacl_container_with_objects user_wallet = wallets.get_wallet() deny_wallet = wallets.get_wallet(role) + endpoint = self.cluster.default_rpc_endpoint with allure.step(f"Check {role.value} has full access to container without bearer token"): check_full_access_to_container( @@ -37,15 +37,16 @@ class TestACLBearer: objects_oids.pop(), file_path, wallet_config=deny_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step(f"Set deny all operations for {role.value} via eACL"): eacl = [ EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in EACLOperation ] - eacl_file = create_eacl(cid, eacl, shell=client_shell) - set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=client_shell) + eacl_file = create_eacl(cid, eacl, shell=self.shell) + set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=self.shell, endpoint=endpoint) wait_for_cache_expired() with allure.step(f"Create bearer token for {role.value} with all operations allowed"): @@ -56,7 +57,8 @@ class TestACLBearer: EACLRule(operation=op, access=EACLAccess.ALLOW, role=role) for op in EACLOperation ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) with allure.step( @@ -68,7 +70,8 @@ class TestACLBearer: objects_oids.pop(), file_path, wallet_config=deny_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step( @@ -81,15 +84,16 @@ class TestACLBearer: file_path, bearer=bearer, wallet_config=deny_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step(f"Set allow all operations for {role.value} via eACL"): eacl = [ EACLRule(access=EACLAccess.ALLOW, role=role, operation=op) for op in EACLOperation ] - eacl_file = create_eacl(cid, eacl, shell=client_shell) - set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=client_shell) + eacl_file = create_eacl(cid, eacl, shell=self.shell) + set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=self.shell, endpoint=endpoint) wait_for_cache_expired() with allure.step( @@ -101,13 +105,13 @@ class TestACLBearer: objects_oids.pop(), file_path, wallet_config=deny_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) @allure.title("BearerToken Operations for compound Operations") - def test_bearer_token_compound_operations( - self, wallets, client_shell, eacl_container_with_objects - ): + def test_bearer_token_compound_operations(self, wallets, eacl_container_with_objects): + endpoint = self.cluster.default_rpc_endpoint cid, objects_oids, file_path = eacl_container_with_objects user_wallet = wallets.get_wallet() other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) @@ -153,8 +157,9 @@ class TestACLBearer: set_eacl( user_wallet.wallet_path, cid, - eacl_table_path=create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + eacl_table_path=create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=endpoint, ) wait_for_cache_expired() @@ -166,7 +171,8 @@ class TestACLBearer: file_path, deny_operations=deny_map[EACLRole.USER], wallet_config=user_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) check_custom_access_to_container( other_wallet.wallet_path, @@ -175,7 +181,8 @@ class TestACLBearer: file_path, deny_operations=deny_map[EACLRole.OTHERS], wallet_config=other_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step("Check rule consistency using bearer token"): @@ -186,7 +193,8 @@ class TestACLBearer: EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER) for op in bearer_map[EACLRole.USER] ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) bearer_other = form_bearertoken_file( @@ -196,7 +204,8 @@ class TestACLBearer: EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) for op in bearer_map[EACLRole.OTHERS] ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) check_custom_access_to_container( @@ -207,7 +216,8 @@ class TestACLBearer: deny_operations=deny_map_with_bearer[EACLRole.USER], bearer=bearer_user, wallet_config=user_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) check_custom_access_to_container( other_wallet.wallet_path, @@ -217,5 +227,6 @@ class TestACLBearer: deny_operations=deny_map_with_bearer[EACLRole.OTHERS], bearer=bearer_other, wallet_config=other_wallet.config_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) diff --git a/pytest_tests/testsuites/acl/test_eacl.py b/pytest_tests/testsuites/acl/test_eacl.py index c2262f4..3ab5655 100644 --- a/pytest_tests/testsuites/acl/test_eacl.py +++ b/pytest_tests/testsuites/acl/test_eacl.py @@ -1,8 +1,7 @@ import allure import pytest -from common import NEOFS_NETMAP_DICT -from failover_utils import wait_object_replication_on_nodes -from neofs_testlib.hosting import Hosting +from cluster_test_base import ClusterTestBase +from failover_utils import wait_object_replication from neofs_testlib.shell import Shell from python_keywords.acl import ( EACLAccess, @@ -18,7 +17,7 @@ from python_keywords.container_access import ( check_full_access_to_container, check_no_access_to_container, ) -from python_keywords.neofs_verbs import put_object +from python_keywords.neofs_verbs import put_object_to_random_node from python_keywords.node_management import drop_object from python_keywords.object_access import ( can_delete_object, @@ -35,36 +34,39 @@ from wellknown_acl import PUBLIC_ACL @pytest.mark.sanity @pytest.mark.acl @pytest.mark.acl_extended -class TestEACLContainer: - NODE_COUNT = len(NEOFS_NETMAP_DICT.keys()) - +class TestEACLContainer(ClusterTestBase): @pytest.fixture(scope="function") - def eacl_full_placement_container_with_object( - self, wallets, file_path, client_shell: Shell - ) -> str: + def eacl_full_placement_container_with_object(self, wallets, file_path) -> str: user_wallet = wallets.get_wallet() + storage_nodes = self.cluster.storage_nodes + node_count = len(storage_nodes) with allure.step("Create eACL public container with full placement rule"): - full_placement_rule = ( - f"REP {self.NODE_COUNT} IN X CBF 1 SELECT {self.NODE_COUNT} FROM * AS X" - ) + full_placement_rule = f"REP {node_count} IN X CBF 1 SELECT {node_count} FROM * AS X" cid = create_container( wallet=user_wallet.wallet_path, rule=full_placement_rule, basic_acl=PUBLIC_ACL, - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) with allure.step("Add test object to container"): - oid = put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell) - wait_object_replication_on_nodes( - user_wallet.wallet_path, cid, oid, self.NODE_COUNT, shell=client_shell + oid = put_object_to_random_node( + user_wallet.wallet_path, file_path, cid, shell=self.shell, cluster=self.cluster + ) + wait_object_replication( + cid, + oid, + node_count, + shell=self.shell, + nodes=storage_nodes, ) yield cid, oid, file_path @pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS]) def test_extended_acl_deny_all_operations( - self, wallets, client_shell, eacl_container_with_objects, deny_role + self, wallets, eacl_container_with_objects, deny_role ): user_wallet = wallets.get_wallet() other_wallet = wallets.get_wallet(EACLRole.OTHERS) @@ -83,8 +85,9 @@ class TestEACLContainer: set_eacl( user_wallet.wallet_path, cid, - create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) wait_for_cache_expired() @@ -97,7 +100,8 @@ class TestEACLContainer: cid, object_oids[0], file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step( @@ -108,7 +112,8 @@ class TestEACLContainer: cid, object_oids.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step(f"Allow all operations for {deny_role_str} via eACL"): @@ -119,30 +124,33 @@ class TestEACLContainer: set_eacl( user_wallet.wallet_path, cid, - create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) wait_for_cache_expired() - with allure.step(f"Check all have full access to eACL public container"): + with allure.step("Check all have full access to eACL public container"): check_full_access_to_container( user_wallet.wallet_path, cid, object_oids.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) check_full_access_to_container( other_wallet.wallet_path, cid, object_oids.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) @allure.title("Testcase to allow NeoFS operations for only one other pubkey.") def test_extended_acl_deny_all_operations_exclude_pubkey( - self, wallets, client_shell, eacl_container_with_objects + self, wallets, eacl_container_with_objects ): user_wallet = wallets.get_wallet() other_wallet, other_wallet_allow = wallets.get_wallets_list(EACLRole.OTHERS)[0:2] @@ -164,8 +172,9 @@ class TestEACLContainer: set_eacl( user_wallet.wallet_path, cid, - create_eacl(cid, eacl, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) wait_for_cache_expired() @@ -176,7 +185,8 @@ class TestEACLContainer: cid, object_oids[0], file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step("Check owner has full access to public container"): @@ -185,7 +195,8 @@ class TestEACLContainer: cid, object_oids.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step("Check allowed other has full access to public container"): @@ -194,20 +205,20 @@ class TestEACLContainer: cid, object_oids.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) @allure.title("Testcase to validate NeoFS replication with eACL deny rules.") def test_extended_acl_deny_replication( self, wallets, - client_shell, - hosting: Hosting, eacl_full_placement_container_with_object, - file_path, ): user_wallet = wallets.get_wallet() cid, oid, file_path = eacl_full_placement_container_with_object + storage_nodes = self.cluster.storage_nodes + storage_node = self.cluster.storage_nodes[0] with allure.step("Deny all operations for user via eACL"): eacl_deny = [ @@ -221,40 +232,48 @@ class TestEACLContainer: set_eacl( user_wallet.wallet_path, cid, - create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) wait_for_cache_expired() with allure.step("Drop object to check replication"): - drop_object(hosting, node_name=[*NEOFS_NETMAP_DICT][0], cid=cid, oid=oid) + drop_object(storage_node, cid=cid, oid=oid) - storage_wallet_path = NEOFS_NETMAP_DICT[[*NEOFS_NETMAP_DICT][0]]["wallet_path"] + storage_wallet_path = storage_node.get_wallet_path() with allure.step("Wait for dropped object replicated"): - wait_object_replication_on_nodes( - storage_wallet_path, cid, oid, self.NODE_COUNT, shell=client_shell + wait_object_replication( + cid, + oid, + len(storage_nodes), + self.shell, + storage_nodes, ) @allure.title("Testcase to validate NeoFS system operations with extended ACL") - def test_extended_actions_system(self, wallets, client_shell, eacl_container_with_objects): + def test_extended_actions_system(self, wallets, eacl_container_with_objects): user_wallet = wallets.get_wallet() ir_wallet, storage_wallet = wallets.get_wallets_list(role=EACLRole.SYSTEM)[:2] cid, object_oids, file_path = eacl_container_with_objects + endpoint = self.cluster.default_rpc_endpoint with allure.step("Check IR and STORAGE rules compliance"): assert not can_put_object( ir_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=ir_wallet.config_path, ) assert can_put_object( storage_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=storage_wallet.config_path, ) @@ -263,7 +282,8 @@ class TestEACLContainer: cid, object_oids[0], file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=ir_wallet.config_path, ) assert can_get_object( @@ -271,7 +291,8 @@ class TestEACLContainer: cid, object_oids[0], file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=storage_wallet.config_path, ) @@ -279,28 +300,32 @@ class TestEACLContainer: ir_wallet.wallet_path, cid, object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) assert can_get_head_object( storage_wallet.wallet_path, cid, object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) assert can_search_object( ir_wallet.wallet_path, cid, - shell=client_shell, + shell=self.shell, + endpoint=endpoint, oid=object_oids[0], wallet_config=ir_wallet.config_path, ) assert can_search_object( storage_wallet.wallet_path, cid, - shell=client_shell, + shell=self.shell, + endpoint=endpoint, oid=object_oids[0], wallet_config=storage_wallet.config_path, ) @@ -310,7 +335,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -318,7 +344,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -326,7 +353,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) @@ -334,7 +362,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -343,7 +372,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -351,7 +381,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -365,9 +396,10 @@ class TestEACLContainer: EACLRule(access=EACLAccess.DENY, role=EACLRole.SYSTEM, operation=op) for op in EACLOperation ], - shell=client_shell, + shell=self.shell, ), - shell=client_shell, + shell=self.shell, + endpoint=endpoint, ) wait_for_cache_expired() @@ -376,14 +408,16 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=ir_wallet.config_path, ) assert not can_put_object( wallet=storage_wallet.wallet_path, cid=cid, file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=storage_wallet.config_path, ) @@ -393,7 +427,8 @@ class TestEACLContainer: cid=cid, oid=object_oids[0], file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -402,7 +437,8 @@ class TestEACLContainer: cid=cid, oid=object_oids[0], file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=storage_wallet.config_path, ) @@ -411,7 +447,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -419,7 +456,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -427,7 +465,8 @@ class TestEACLContainer: assert can_search_object( wallet=ir_wallet.wallet_path, cid=cid, - shell=client_shell, + shell=self.shell, + endpoint=endpoint, oid=object_oids[0], wallet_config=ir_wallet.config_path, ) @@ -435,7 +474,8 @@ class TestEACLContainer: assert can_search_object( wallet=storage_wallet.wallet_path, cid=cid, - shell=client_shell, + shell=self.shell, + endpoint=endpoint, oid=object_oids[0], wallet_config=storage_wallet.config_path, ) @@ -445,7 +485,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -453,7 +494,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -462,7 +504,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -470,7 +513,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -479,7 +523,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -487,7 +532,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -501,9 +547,10 @@ class TestEACLContainer: EACLRule(access=EACLAccess.ALLOW, role=EACLRole.SYSTEM, operation=op) for op in EACLOperation ], - shell=client_shell, + shell=self.shell, ), - shell=client_shell, + shell=self.shell, + endpoint=endpoint, ) wait_for_cache_expired() @@ -512,14 +559,16 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=ir_wallet.config_path, ) assert can_put_object( wallet=storage_wallet.wallet_path, cid=cid, file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=storage_wallet.config_path, ) @@ -528,7 +577,8 @@ class TestEACLContainer: cid=cid, oid=object_oids[0], file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=ir_wallet.config_path, ) assert can_get_object( @@ -536,7 +586,8 @@ class TestEACLContainer: cid=cid, oid=object_oids[0], file_name=file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, wallet_config=storage_wallet.config_path, ) @@ -544,29 +595,33 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) assert can_get_head_object( wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) assert can_search_object( wallet=ir_wallet.wallet_path, cid=cid, - shell=client_shell, + shell=self.shell, oid=object_oids[0], + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) assert can_search_object( wallet=storage_wallet.wallet_path, cid=cid, - shell=client_shell, + shell=self.shell, oid=object_oids[0], + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -575,7 +630,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -583,7 +639,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -591,7 +648,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) @@ -599,7 +657,8 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) @@ -608,7 +667,8 @@ class TestEACLContainer: wallet=ir_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=ir_wallet.config_path, ) with pytest.raises(AssertionError): @@ -616,6 +676,7 @@ class TestEACLContainer: wallet=storage_wallet.wallet_path, cid=cid, oid=object_oids[0], - shell=client_shell, + shell=self.shell, + endpoint=endpoint, wallet_config=storage_wallet.config_path, ) diff --git a/pytest_tests/testsuites/acl/test_eacl_filters.py b/pytest_tests/testsuites/acl/test_eacl_filters.py index 3983153..95ac02d 100644 --- a/pytest_tests/testsuites/acl/test_eacl_filters.py +++ b/pytest_tests/testsuites/acl/test_eacl_filters.py @@ -1,5 +1,6 @@ import allure import pytest +from cluster_test_base import ClusterTestBase from python_keywords.acl import ( EACLAccess, EACLFilter, @@ -19,7 +20,7 @@ from python_keywords.container_access import ( check_full_access_to_container, check_no_access_to_container, ) -from python_keywords.neofs_verbs import put_object +from python_keywords.neofs_verbs import put_object_to_random_node from python_keywords.object_access import can_get_head_object, can_get_object, can_put_object from wellknown_acl import PUBLIC_ACL @@ -27,7 +28,7 @@ from wellknown_acl import PUBLIC_ACL @pytest.mark.sanity @pytest.mark.acl @pytest.mark.acl_filters -class TestEACLFilters: +class TestEACLFilters(ClusterTestBase): # SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md ATTRIBUTE = {"check_key": "check_value"} OTHER_ATTRIBUTE = {"check_key": "other_value"} @@ -67,52 +68,66 @@ class TestEACLFilters: ] @pytest.fixture(scope="function") - def eacl_container_with_objects(self, wallets, client_shell, file_path): + def eacl_container_with_objects(self, wallets, file_path): user_wallet = wallets.get_wallet() with allure.step("Create eACL public container"): cid = create_container( - user_wallet.wallet_path, basic_acl=PUBLIC_ACL, shell=client_shell + user_wallet.wallet_path, + basic_acl=PUBLIC_ACL, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) with allure.step("Add test objects to container"): objects_with_header = [ - put_object( + put_object_to_random_node( user_wallet.wallet_path, file_path, cid, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes={**self.SET_HEADERS, "key": val}, ) for val in range(self.OBJECT_COUNT) ] objects_with_other_header = [ - put_object( + put_object_to_random_node( user_wallet.wallet_path, file_path, cid, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes={**self.OTHER_HEADERS, "key": val}, ) for val in range(self.OBJECT_COUNT) ] objects_without_header = [ - put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell) + put_object_to_random_node( + user_wallet.wallet_path, + file_path, + cid, + shell=self.shell, + cluster=self.cluster, + ) for _ in range(self.OBJECT_COUNT) ] yield cid, objects_with_header, objects_with_other_header, objects_without_header, file_path with allure.step("Delete eACL public container"): - delete_container(user_wallet.wallet_path, cid, shell=client_shell) + delete_container( + user_wallet.wallet_path, + cid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) @pytest.mark.parametrize( "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] ) - def test_extended_acl_filters_request( - self, wallets, client_shell, eacl_container_with_objects, match_type - ): + def test_extended_acl_filters_request(self, wallets, eacl_container_with_objects, match_type): allure.dynamic.title(f"Validate NeoFS operations with request filter: {match_type.name}") user_wallet = wallets.get_wallet() other_wallet = wallets.get_wallet(EACLRole.OTHERS) @@ -139,8 +154,9 @@ class TestEACLFilters: set_eacl( user_wallet.wallet_path, cid, - create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) wait_for_cache_expired() @@ -163,7 +179,12 @@ class TestEACLFilters: ): with allure.step("Check other has full access when sending request without headers"): check_full_access_to_container( - other_wallet.wallet_path, cid, oid.pop(), file_path, shell=client_shell + other_wallet.wallet_path, + cid, + oid.pop(), + file_path, + shell=self.shell, + cluster=self.cluster, ) with allure.step( @@ -174,7 +195,8 @@ class TestEACLFilters: cid, oid.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, xhdr=allow_headers, ) @@ -184,7 +206,8 @@ class TestEACLFilters: cid, oid.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, xhdr=deny_headers, ) @@ -199,14 +222,16 @@ class TestEACLFilters: EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) for op in EACLOperation ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) check_full_access_to_container( other_wallet.wallet_path, cid, oid.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, xhdr=deny_headers, bearer=bearer_other, ) @@ -215,7 +240,7 @@ class TestEACLFilters: "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] ) def test_extended_acl_deny_filters_object( - self, wallets, client_shell, eacl_container_with_objects, match_type + self, wallets, eacl_container_with_objects, match_type ): allure.dynamic.title( f"Validate NeoFS operations with deny user headers filter: {match_type.name}" @@ -245,8 +270,9 @@ class TestEACLFilters: set_eacl( user_wallet.wallet_path, cid, - create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) wait_for_cache_expired() @@ -271,7 +297,8 @@ class TestEACLFilters: cid, objs_without_header.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, xhdr=xhdr, ) @@ -281,7 +308,8 @@ class TestEACLFilters: cid, allow_objects.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, xhdr=xhdr, ) @@ -291,7 +319,8 @@ class TestEACLFilters: other_wallet.wallet_path, cid, deny_objects[0], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, xhdr=xhdr, ) with pytest.raises(AssertionError): @@ -300,7 +329,8 @@ class TestEACLFilters: cid, deny_objects[0], file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, xhdr=xhdr, ) @@ -318,14 +348,16 @@ class TestEACLFilters: ) for op in EACLOperation ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) check_full_access_to_container( other_wallet.wallet_path, cid, deny_objects.pop(), file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, xhdr=xhdr, bearer=bearer_other, ) @@ -338,10 +370,13 @@ class TestEACLFilters: other_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes=allow_attribute, ) - assert can_put_object(other_wallet.wallet_path, cid, file_path, shell=client_shell) + assert can_put_object( + other_wallet.wallet_path, cid, file_path, shell=self.shell, cluster=self.cluster + ) deny_attribute = ( self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE @@ -352,7 +387,8 @@ class TestEACLFilters: other_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes=deny_attribute, ) @@ -369,13 +405,15 @@ class TestEACLFilters: role=EACLRole.OTHERS, ) ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) assert can_put_object( other_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes=deny_attribute, bearer=bearer_other_for_put, ) @@ -384,7 +422,7 @@ class TestEACLFilters: "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] ) def test_extended_acl_allow_filters_object( - self, wallets, client_shell, eacl_container_with_objects, match_type + self, wallets, eacl_container_with_objects, match_type ): allure.dynamic.title( "Testcase to validate NeoFS operation with allow eACL user headers filters:" @@ -420,8 +458,9 @@ class TestEACLFilters: set_eacl( user_wallet.wallet_path, cid, - create_eacl(cid, eacl, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) wait_for_cache_expired() @@ -439,13 +478,26 @@ class TestEACLFilters: with allure.step(f"Check other cannot get and put objects without attributes"): oid = objects_without_header.pop() with pytest.raises(AssertionError): - assert can_get_head_object(other_wallet.wallet_path, cid, oid, shell=client_shell) - with pytest.raises(AssertionError): - assert can_get_object( - other_wallet.wallet_path, cid, oid, file_path, shell=client_shell + assert can_get_head_object( + other_wallet.wallet_path, + cid, + oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) with pytest.raises(AssertionError): - assert can_put_object(other_wallet.wallet_path, cid, file_path, shell=client_shell) + assert can_get_object( + other_wallet.wallet_path, + cid, + oid, + file_path, + shell=self.shell, + cluster=self.cluster, + ) + with pytest.raises(AssertionError): + assert can_put_object( + other_wallet.wallet_path, cid, file_path, shell=self.shell, cluster=self.cluster + ) with allure.step( "Check other can get and put objects without attributes and using bearer token" @@ -461,13 +513,15 @@ class TestEACLFilters: ) for op in EACLOperation ], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) assert can_get_head_object( other_wallet.wallet_path, cid, objects_without_header[0], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, bearer=bearer_other, ) assert can_get_object( @@ -475,37 +529,62 @@ class TestEACLFilters: cid, objects_without_header[0], file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, bearer=bearer_other, ) assert can_put_object( other_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, bearer=bearer_other, ) with allure.step(f"Check other can get objects with attributes matching the filter"): oid = allow_objects.pop() - assert can_get_head_object(other_wallet.wallet_path, cid, oid, shell=client_shell) - assert can_get_object(other_wallet.wallet_path, cid, oid, file_path, shell=client_shell) + assert can_get_head_object( + other_wallet.wallet_path, + cid, + oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) + assert can_get_object( + other_wallet.wallet_path, + cid, + oid, + file_path, + shell=self.shell, + cluster=self.cluster, + ) assert can_put_object( other_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes=allow_attribute, ) with allure.step("Check other cannot get objects without attributes matching the filter"): with pytest.raises(AssertionError): assert can_get_head_object( - other_wallet.wallet_path, cid, deny_objects[0], shell=client_shell + other_wallet.wallet_path, + cid, + deny_objects[0], + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) with pytest.raises(AssertionError): assert can_get_object( - other_wallet.wallet_path, cid, deny_objects[0], file_path, shell=client_shell + other_wallet.wallet_path, + cid, + deny_objects[0], + file_path, + shell=self.shell, + cluster=self.cluster, ) with pytest.raises(AssertionError): assert can_put_object( @@ -513,7 +592,8 @@ class TestEACLFilters: cid, file_path, attributes=deny_attribute, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, ) with allure.step( @@ -522,21 +602,28 @@ class TestEACLFilters: ): oid = deny_objects.pop() assert can_get_head_object( - other_wallet.wallet_path, cid, oid, shell=client_shell, bearer=bearer_other + other_wallet.wallet_path, + cid, + oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + bearer=bearer_other, ) assert can_get_object( other_wallet.wallet_path, cid, oid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, bearer=bearer_other, ) assert can_put_object( other_wallet.wallet_path, cid, file_path, - shell=client_shell, + shell=self.shell, + cluster=self.cluster, attributes=deny_attribute, bearer=bearer_other, ) diff --git a/pytest_tests/testsuites/conftest.py b/pytest_tests/testsuites/conftest.py index ebd8ca6..869bd71 100644 --- a/pytest_tests/testsuites/conftest.py +++ b/pytest_tests/testsuites/conftest.py @@ -9,6 +9,7 @@ import allure import pytest import yaml from binary_version_helper import get_local_binaries_versions, get_remote_binaries_versions +from cluster import Cluster from common import ( ASSETS_DIR, BACKGROUND_LOAD_MAX_TIME, @@ -20,7 +21,6 @@ from common import ( LOAD_NODE_SSH_PRIVATE_KEY_PATH, LOAD_NODE_SSH_USER, LOAD_NODES, - NEOFS_NETMAP_DICT, STORAGE_NODE_SERVICE_NAME_REGEX, WALLET_PASS, ) @@ -33,8 +33,9 @@ from neofs_testlib.shell import LocalShell, Shell from neofs_testlib.utils.wallet import init_wallet from payment_neogo import deposit_gas, transfer_gas from pytest import FixtureRequest -from python_keywords.node_management import node_healthcheck -from wallet import WalletFactory +from python_keywords.node_management import storage_node_healthcheck + +from helpers.wallet import WalletFactory logger = logging.getLogger("NeoLogger") @@ -66,6 +67,7 @@ def hosting(configure_testlib) -> Hosting: hosting_instance = Hosting() hosting_instance.configure(hosting_config) + yield hosting_instance @@ -81,8 +83,13 @@ def require_multiple_hosts(hosting: Hosting): @pytest.fixture(scope="session") -def wallet_factory(prepare_tmp_dir: str, client_shell: Shell) -> WalletFactory: - return WalletFactory(prepare_tmp_dir, client_shell) +def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -> WalletFactory: + return WalletFactory(temp_directory, client_shell, cluster) + + +@pytest.fixture(scope="session") +def cluster(hosting: Hosting) -> Cluster: + yield Cluster(hosting) @pytest.fixture(scope="session", autouse=True) @@ -97,7 +104,7 @@ def check_binary_versions(request, hosting: Hosting, client_shell: Shell): @pytest.fixture(scope="session") @allure.title("Prepare tmp directory") -def prepare_tmp_dir(): +def temp_directory(): with allure.step("Prepare tmp directory"): full_path = os.path.join(os.getcwd(), ASSETS_DIR) shutil.rmtree(full_path, ignore_errors=True) @@ -111,7 +118,7 @@ def prepare_tmp_dir(): @pytest.fixture(scope="function", autouse=True) @allure.title("Analyze logs") -def analyze_logs(prepare_tmp_dir: str, hosting: Hosting, request: FixtureRequest): +def analyze_logs(temp_directory: str, hosting: Hosting, request: FixtureRequest): start_time = datetime.utcnow() yield end_time = datetime.utcnow() @@ -123,39 +130,39 @@ def analyze_logs(prepare_tmp_dir: str, hosting: Hosting, request: FixtureRequest # Test name may exceed os NAME_MAX (255 bytes), so we use test start datetime instead start_time_str = start_time.strftime("%Y_%m_%d_%H_%M_%S_%f") - logs_dir = os.path.join(prepare_tmp_dir, f"logs_{start_time_str}") + logs_dir = os.path.join(temp_directory, f"logs_{start_time_str}") dump_logs(hosting, logs_dir, start_time, end_time) check_logs(logs_dir) @pytest.fixture(scope="session", autouse=True) @allure.title("Collect logs") -def collect_logs(prepare_tmp_dir, hosting: Hosting): +def collect_logs(temp_directory, hosting: Hosting): start_time = datetime.utcnow() yield end_time = datetime.utcnow() # Dump logs to temp directory (because they might be too large to keep in RAM) - logs_dir = os.path.join(prepare_tmp_dir, "logs") + logs_dir = os.path.join(temp_directory, "logs") dump_logs(hosting, logs_dir, start_time, end_time) attach_logs(logs_dir) @pytest.fixture(scope="session", autouse=True) @allure.title("Run health check for all storage nodes") -def run_health_check(collect_logs, hosting: Hosting): +def run_health_check(collect_logs, cluster: Cluster): failed_nodes = [] - for node_name in NEOFS_NETMAP_DICT.keys(): - health_check = node_healthcheck(hosting, node_name) + for node in cluster.storage_nodes: + health_check = storage_node_healthcheck(node) if health_check.health_status != "READY" or health_check.network_status != "ONLINE": - failed_nodes.append(node_name) + failed_nodes.append(node) if failed_nodes: raise AssertionError(f"Nodes {failed_nodes} are not healthy") @pytest.fixture(scope="session") -def background_grpc_load(client_shell, prepare_wallet_and_deposit): +def background_grpc_load(client_shell, default_wallet): registry_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.bolt") prepare_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.json") allure.dynamic.title( @@ -221,21 +228,24 @@ def background_grpc_load(client_shell, prepare_wallet_and_deposit): @pytest.fixture(scope="session") @allure.title("Prepare wallet and deposit") -def prepare_wallet_and_deposit(client_shell, prepare_tmp_dir): +def default_wallet(client_shell: Shell, temp_directory: str, cluster: Cluster): wallet_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") init_wallet(wallet_path, WALLET_PASS) allure.attach.file(wallet_path, os.path.basename(wallet_path), allure.attachment_type.JSON) if not FREE_STORAGE: + main_chain = cluster.main_chain_nodes[0] deposit = 30 transfer_gas( shell=client_shell, amount=deposit + 1, + main_chain=main_chain, wallet_to_path=wallet_path, wallet_to_password=WALLET_PASS, ) deposit_gas( shell=client_shell, + main_chain=main_chain, amount=deposit, wallet_from_path=wallet_path, wallet_from_password=WALLET_PASS, diff --git a/pytest_tests/testsuites/container/test_container.py b/pytest_tests/testsuites/container/test_container.py index 581d34f..b201fff 100644 --- a/pytest_tests/testsuites/container/test_container.py +++ b/pytest_tests/testsuites/container/test_container.py @@ -14,84 +14,114 @@ from python_keywords.container import ( from utility import placement_policy_from_container from wellknown_acl import PRIVATE_ACL_F +from steps.cluster_test_base import ClusterTestBase + -@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"]) -@pytest.mark.sanity -@pytest.mark.smoke @pytest.mark.container -def test_container_creation(client_shell, prepare_wallet_and_deposit, name): - scenario_title = f"with name {name}" if name else "without name" - allure.dynamic.title(f"User can create container {scenario_title}") +@pytest.mark.sanity +@pytest.mark.container +class TestContainer(ClusterTestBase): + @pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"]) + @pytest.mark.smoke + def test_container_creation(self, default_wallet, name): + scenario_title = f"with name {name}" if name else "without name" + allure.dynamic.title(f"User can create container {scenario_title}") - wallet = prepare_wallet_and_deposit - with open(wallet) as file: - json_wallet = json.load(file) + wallet = default_wallet + with open(wallet) as file: + json_wallet = json.load(file) - placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" - cid = create_container(wallet, rule=placement_rule, name=name, shell=client_shell) + placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" + cid = create_container( + wallet, + rule=placement_rule, + name=name, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) - containers = list_containers(wallet, shell=client_shell) - assert cid in containers, f"Expected container {cid} in containers: {containers}" + containers = list_containers( + wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) + assert cid in containers, f"Expected container {cid} in containers: {containers}" - container_info: str = get_container(wallet, cid, json_mode=False, shell=client_shell) - container_info = container_info.casefold() # To ignore case when comparing with expected values + container_info: str = get_container( + wallet, + cid, + json_mode=False, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) + container_info = ( + container_info.casefold() + ) # To ignore case when comparing with expected values - info_to_check = { - f"basic ACL: {PRIVATE_ACL_F} (private)", - f"owner ID: {json_wallet.get('accounts')[0].get('address')}", - f"container ID: {cid}", - } - if name: - info_to_check.add(f"Name={name}") + info_to_check = { + f"basic ACL: {PRIVATE_ACL_F} (private)", + f"owner ID: {json_wallet.get('accounts')[0].get('address')}", + f"container ID: {cid}", + } + if name: + info_to_check.add(f"Name={name}") - with allure.step("Check container has correct information"): - expected_policy = placement_rule.casefold() - actual_policy = placement_policy_from_container(container_info) - assert ( - actual_policy == expected_policy - ), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}" - - for info in info_to_check: - expected_info = info.casefold() + with allure.step("Check container has correct information"): + expected_policy = placement_rule.casefold() + actual_policy = placement_policy_from_container(container_info) assert ( - expected_info in container_info - ), f"Expected {expected_info} in container info:\n{container_info}" + actual_policy == expected_policy + ), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}" - with allure.step("Delete container and check it was deleted"): - delete_container(wallet, cid, shell=client_shell) - tick_epoch(shell=client_shell) - wait_for_container_deletion(wallet, cid, shell=client_shell) + for info in info_to_check: + expected_info = info.casefold() + assert ( + expected_info in container_info + ), f"Expected {expected_info} in container info:\n{container_info}" + with allure.step("Delete container and check it was deleted"): + delete_container( + wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) + tick_epoch(self.shell, self.cluster) + wait_for_container_deletion( + wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) -@allure.title("Parallel container creation and deletion") -@pytest.mark.sanity -@pytest.mark.container -def test_container_creation_deletion_parallel(client_shell, prepare_wallet_and_deposit): - containers_count = 3 - wallet = prepare_wallet_and_deposit - placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" + @allure.title("Parallel container creation and deletion") + def test_container_creation_deletion_parallel(self, default_wallet): + containers_count = 3 + wallet = default_wallet + placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" - cids: list[str] = [] - with allure.step(f"Create {containers_count} containers"): - for _ in range(containers_count): - cids.append( - create_container( - wallet, - rule=placement_rule, - await_mode=False, - shell=client_shell, - wait_for_creation=False, + cids: list[str] = [] + with allure.step(f"Create {containers_count} containers"): + for _ in range(containers_count): + cids.append( + create_container( + wallet, + rule=placement_rule, + await_mode=False, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wait_for_creation=False, + ) ) - ) - with allure.step(f"Wait for containers occur in container list"): - for cid in cids: - wait_for_container_creation( - wallet, cid, sleep_interval=containers_count, shell=client_shell - ) + with allure.step(f"Wait for containers occur in container list"): + for cid in cids: + wait_for_container_creation( + wallet, + cid, + sleep_interval=containers_count, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) - with allure.step("Delete containers and check they were deleted"): - for cid in cids: - delete_container(wallet, cid, shell=client_shell) - tick_epoch(shell=client_shell) - wait_for_container_deletion(wallet, cid, shell=client_shell) + with allure.step("Delete containers and check they were deleted"): + for cid in cids: + delete_container( + wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) + tick_epoch(self.shell, self.cluster) + wait_for_container_deletion( + wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) diff --git a/pytest_tests/testsuites/failovers/test_failover_network.py b/pytest_tests/testsuites/failovers/test_failover_network.py index 98aed8f..1ee52b8 100644 --- a/pytest_tests/testsuites/failovers/test_failover_network.py +++ b/pytest_tests/testsuites/failovers/test_failover_network.py @@ -4,98 +4,107 @@ from time import sleep import allure import pytest -from failover_utils import wait_all_storage_node_returned, wait_object_replication_on_nodes +from cluster import StorageNode +from failover_utils import wait_all_storage_nodes_returned, wait_object_replication from file_helper import generate_file, get_file_hash from iptables_helper import IpTablesHelper -from neofs_testlib.hosting import Hosting from python_keywords.container import create_container -from python_keywords.neofs_verbs import get_object, put_object +from python_keywords.neofs_verbs import get_object, put_object_to_random_node from wellknown_acl import PUBLIC_ACL +from steps.cluster_test_base import ClusterTestBase + logger = logging.getLogger("NeoLogger") STORAGE_NODE_COMMUNICATION_PORT = "8080" STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082" PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS] -blocked_hosts = [] +blocked_nodes: list[StorageNode] = [] -@pytest.fixture(autouse=True) -@allure.step("Restore network") -def restore_network(hosting: Hosting): - yield - - not_empty = len(blocked_hosts) != 0 - for host_address in list(blocked_hosts): - with allure.step(f"Restore network at host {host_address}"): - host = hosting.get_host_by_address(host_address) - IpTablesHelper.restore_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK) - blocked_hosts.remove(host) - if not_empty: - wait_all_storage_node_returned(hosting) - - -@allure.title("Block Storage node traffic") @pytest.mark.failover @pytest.mark.failover_network -def test_block_storage_node_traffic( - prepare_wallet_and_deposit, client_shell, require_multiple_hosts, hosting: Hosting -): - """ - Block storage nodes traffic using iptables and wait for replication for objects. - """ - wallet = prepare_wallet_and_deposit - placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" - wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked - nodes_to_block_count = 2 +class TestFailoverNetwork(ClusterTestBase): + @pytest.fixture(autouse=True) + @allure.step("Restore network") + def restore_network(self): + yield - source_file_path = generate_file() - cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL) - oid = put_object(wallet, source_file_path, cid, shell=client_shell) + not_empty = len(blocked_nodes) != 0 + for node in list(blocked_nodes): + with allure.step(f"Restore network at host for {node.label}"): + IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK) + blocked_nodes.remove(node) + if not_empty: + wait_all_storage_nodes_returned(self.cluster) - # TODO: we need to refactor wait_object_replication_on_nodes so that it returns - # storage node names rather than endpoints - node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) + @allure.title("Block Storage node traffic") + def test_block_storage_node_traffic(self, default_wallet, require_multiple_hosts): + """ + Block storage nodes traffic using iptables and wait for replication for objects. + """ + wallet = default_wallet + placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" + wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked + nodes_to_block_count = 2 - logger.info(f"Nodes are {node_endpoints}") - node_endpoints_to_block = node_endpoints - if nodes_to_block_count > len(node_endpoints): - # TODO: the intent of this logic is not clear, need to revisit - node_endpoints_to_block = choices(node_endpoints, k=2) + source_file_path = generate_file() + cid = create_container( + wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=placement_rule, + basic_acl=PUBLIC_ACL, + ) + oid = put_object_to_random_node( + wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster + ) - excluded_nodes = [] - for node_endpoint in node_endpoints_to_block: - host_address = node_endpoint.split(":")[0] - host = hosting.get_host_by_address(host_address) + nodes = wait_object_replication( + cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes + ) - with allure.step(f"Block incoming traffic at host {host_address} on port {PORTS_TO_BLOCK}"): - blocked_hosts.append(host_address) - excluded_nodes.append(node_endpoint) - IpTablesHelper.drop_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK) - sleep(wakeup_node_timeout) + logger.info(f"Nodes are {nodes}") + nodes_to_block = nodes + if nodes_to_block_count > len(nodes): + # TODO: the intent of this logic is not clear, need to revisit + nodes_to_block = choices(nodes, k=2) - with allure.step(f"Check object is not stored on node {node_endpoint}"): - new_nodes = wait_object_replication_on_nodes( - wallet, cid, oid, 2, shell=client_shell, excluded_nodes=excluded_nodes - ) - assert node_endpoint not in new_nodes + excluded_nodes = [] + for node in nodes_to_block: + with allure.step(f"Block incoming traffic at node {node} on port {PORTS_TO_BLOCK}"): + blocked_nodes.append(node) + excluded_nodes.append(node) + IpTablesHelper.drop_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK) + sleep(wakeup_node_timeout) + + with allure.step(f"Check object is not stored on node {node}"): + new_nodes = wait_object_replication( + cid, + oid, + 2, + shell=self.shell, + nodes=list(set(self.cluster.storage_nodes) - set(excluded_nodes)), + ) + assert node not in new_nodes + + with allure.step(f"Check object data is not corrupted"): + got_file_path = get_object( + wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell + ) + assert get_file_hash(source_file_path) == get_file_hash(got_file_path) + + for node in nodes_to_block: + with allure.step(f"Unblock incoming traffic at host {node} on port {PORTS_TO_BLOCK}"): + IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK) + blocked_nodes.remove(node) + sleep(wakeup_node_timeout) with allure.step(f"Check object data is not corrupted"): - got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0], shell=client_shell) + new_nodes = wait_object_replication( + cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes + ) + + got_file_path = get_object( + wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint() + ) assert get_file_hash(source_file_path) == get_file_hash(got_file_path) - - for node_endpoint in node_endpoints_to_block: - host_address = node_endpoint.split(":")[0] - host = hosting.get_host_by_address(host_address) - - with allure.step( - f"Unblock incoming traffic at host {host_address} on port {PORTS_TO_BLOCK}" - ): - IpTablesHelper.restore_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK) - blocked_hosts.remove(host_address) - sleep(wakeup_node_timeout) - - with allure.step(f"Check object data is not corrupted"): - new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) - - got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0]) - assert get_file_hash(source_file_path) == get_file_hash(got_file_path) diff --git a/pytest_tests/testsuites/failovers/test_failover_storage.py b/pytest_tests/testsuites/failovers/test_failover_storage.py index 08eb95d..0ec153f 100644 --- a/pytest_tests/testsuites/failovers/test_failover_storage.py +++ b/pytest_tests/testsuites/failovers/test_failover_storage.py @@ -2,23 +2,26 @@ import logging import allure import pytest -from failover_utils import wait_all_storage_node_returned, wait_object_replication_on_nodes +from cluster import Cluster, StorageNode +from failover_utils import wait_all_storage_nodes_returned, wait_object_replication from file_helper import generate_file, get_file_hash -from neofs_testlib.hosting import Host, Hosting +from neofs_testlib.hosting import Host from neofs_testlib.shell import CommandOptions from python_keywords.container import create_container -from python_keywords.neofs_verbs import get_object, put_object +from python_keywords.neofs_verbs import get_object, put_object_to_random_node from wellknown_acl import PUBLIC_ACL +from steps.cluster_test_base import ClusterTestBase + logger = logging.getLogger("NeoLogger") -stopped_hosts = [] +stopped_nodes: list[StorageNode] = [] @pytest.fixture(scope="function", autouse=True) @allure.step("Return all stopped hosts") -def after_run_return_all_stopped_hosts(hosting: Hosting): +def after_run_return_all_stopped_hosts(cluster: Cluster): yield - return_stopped_hosts(hosting) + return_stopped_hosts(cluster) def panic_reboot_host(host: Host) -> None: @@ -29,112 +32,147 @@ def panic_reboot_host(host: Host) -> None: shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) -def return_stopped_hosts(hosting: Hosting) -> None: - for host_address in list(stopped_hosts): - with allure.step(f"Start host {host_address}"): - host = hosting.get_host_by_address(host_address) - host.start_host() - stopped_hosts.remove(host_address) +def return_stopped_hosts(cluster: Cluster) -> None: + for node in list(stopped_nodes): + with allure.step(f"Start host {node}"): + node.host.start_host() + stopped_nodes.remove(node) - wait_all_storage_node_returned(hosting) + wait_all_storage_nodes_returned(cluster) -@allure.title("Lose and return storage node's host") -@pytest.mark.parametrize("hard_reboot", [True, False]) @pytest.mark.failover -@pytest.mark.failover_reboot -def test_lose_storage_node_host( - prepare_wallet_and_deposit, - client_shell, - hosting: Hosting, - hard_reboot: bool, - require_multiple_hosts, -): - wallet = prepare_wallet_and_deposit - placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" - source_file_path = generate_file() - cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL) - oid = put_object(wallet, source_file_path, cid, shell=client_shell) - node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) - - for node_endpoint in node_endpoints: - host_address = node_endpoint.split(":")[0] - host = hosting.get_host_by_address(host_address) - stopped_hosts.append(host.config.address) - - with allure.step(f"Stop host {host_address}"): - host.stop_host("hard" if hard_reboot else "soft") - - new_nodes = wait_object_replication_on_nodes( - wallet, cid, oid, 2, shell=client_shell, excluded_nodes=[node_endpoint] +class TestFailoverStorage(ClusterTestBase): + @allure.title("Lose and return storage node's host") + @pytest.mark.parametrize("hard_reboot", [True, False]) + @pytest.mark.failover_reboot + def test_lose_storage_node_host( + self, + default_wallet, + hard_reboot: bool, + require_multiple_hosts, + ): + wallet = default_wallet + placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" + source_file_path = generate_file() + cid = create_container( + wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=placement_rule, + basic_acl=PUBLIC_ACL, + ) + oid = put_object_to_random_node( + wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster + ) + nodes = wait_object_replication( + cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes ) - assert all(old_node not in new_nodes for old_node in node_endpoints) - with allure.step("Check object data is not corrupted"): - got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0], shell=client_shell) - assert get_file_hash(source_file_path) == get_file_hash(got_file_path) + for node in nodes: + stopped_nodes.append(node) - with allure.step(f"Return all hosts"): - return_stopped_hosts(hosting) + with allure.step(f"Stop host {node}"): + node.host.stop_host("hard" if hard_reboot else "soft") - with allure.step("Check object data is not corrupted"): - new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) - got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0]) - assert get_file_hash(source_file_path) == get_file_hash(got_file_path) + new_nodes = wait_object_replication( + cid, + oid, + 2, + shell=self.shell, + nodes=list(set(self.cluster.storage_nodes) - {node}), + ) + assert all(old_node not in new_nodes for old_node in nodes) + with allure.step("Check object data is not corrupted"): + got_file_path = get_object( + wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell + ) + assert get_file_hash(source_file_path) == get_file_hash(got_file_path) -@allure.title("Panic storage node's host") -@pytest.mark.parametrize("sequence", [True, False]) -@pytest.mark.failover -@pytest.mark.failover_panic -def test_panic_storage_node_host( - prepare_wallet_and_deposit, - client_shell, - hosting: Hosting, - require_multiple_hosts, - sequence: bool, -): - wallet = prepare_wallet_and_deposit - placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" - source_file_path = generate_file() - cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL) - oid = put_object(wallet, source_file_path, cid, shell=client_shell) + with allure.step(f"Return all hosts"): + return_stopped_hosts(self.cluster) - node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) - allure.attach( - "\n".join(node_endpoints), - "Current nodes with object", - allure.attachment_type.TEXT, - ) + with allure.step("Check object data is not corrupted"): + new_nodes = wait_object_replication( + cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes + ) + got_file_path = get_object( + wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint() + ) + assert get_file_hash(source_file_path) == get_file_hash(got_file_path) - new_nodes: list[str] = [] - for node_endpoint in node_endpoints: - host_address = node_endpoint.split(":")[0] + @allure.title("Panic storage node's host") + @pytest.mark.parametrize("sequence", [True, False]) + @pytest.mark.failover_panic + def test_panic_storage_node_host( + self, + default_wallet, + require_multiple_hosts, + sequence: bool, + ): + wallet = default_wallet + placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" + source_file_path = generate_file() + cid = create_container( + wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=placement_rule, + basic_acl=PUBLIC_ACL, + ) + oid = put_object_to_random_node( + wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster + ) - with allure.step(f"Hard reboot host {node_endpoint} via magic SysRq option"): - host = hosting.get_host_by_address(host_address) - panic_reboot_host(host) - if sequence: - try: - new_nodes = wait_object_replication_on_nodes( - wallet, cid, oid, 2, shell=client_shell, excluded_nodes=[node_endpoint] - ) - except AssertionError: - new_nodes = wait_object_replication_on_nodes( - wallet, cid, oid, 2, shell=client_shell - ) - - allure.attach( - "\n".join(new_nodes), - f"Nodes with object after {node_endpoint} fail", - allure.attachment_type.TEXT, - ) - - if not sequence: - new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) + nodes = wait_object_replication( + cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes + ) allure.attach( - "\n".join(new_nodes), "Nodes with object after nodes fail", allure.attachment_type.TEXT + "\n".join(nodes), + "Current nodes with object", + allure.attachment_type.TEXT, ) - got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0]) - assert get_file_hash(source_file_path) == get_file_hash(got_file_path) + new_nodes: list[StorageNode] = [] + for node in nodes: + with allure.step(f"Hard reboot host {node} via magic SysRq option"): + panic_reboot_host(node.host) + if sequence: + try: + new_nodes = wait_object_replication( + cid, + oid, + 2, + shell=self.shell, + nodes=list(set(self.cluster.storage_nodes) - {node}), + ) + except AssertionError: + new_nodes = wait_object_replication( + cid, + oid, + 2, + shell=self.shell, + nodes=self.cluster.storage_nodes, + ) + + allure.attach( + "\n".join(new_nodes), + f"Nodes with object after {node} fail", + allure.attachment_type.TEXT, + ) + + if not sequence: + new_nodes = wait_object_replication( + cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes + ) + allure.attach( + "\n".join(new_nodes), + "Nodes with object after nodes fail", + allure.attachment_type.TEXT, + ) + + got_file_path = get_object( + wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint() + ) + assert get_file_hash(source_file_path) == get_file_hash(got_file_path) diff --git a/pytest_tests/testsuites/network/test_node_management.py b/pytest_tests/testsuites/network/test_node_management.py index e615e79..18f6dee 100644 --- a/pytest_tests/testsuites/network/test_node_management.py +++ b/pytest_tests/testsuites/network/test_node_management.py @@ -1,541 +1,531 @@ import logging -from random import choice +import random from time import sleep -from typing import Optional +from typing import Optional, Tuple import allure import pytest -from common import ( - COMPLEX_OBJ_SIZE, - MORPH_BLOCK_TIME, - NEOFS_CONTRACT_CACHE_TIMEOUT, - NEOFS_NETMAP_DICT, - STORAGE_RPC_ENDPOINT_1, - STORAGE_WALLET_PASS, -) -from data_formatters import get_wallet_public_key +from cluster import StorageNode +from cluster_test_base import ClusterTestBase +from common import COMPLEX_OBJ_SIZE, MORPH_BLOCK_TIME, NEOFS_CONTRACT_CACHE_TIMEOUT from epoch import tick_epoch from file_helper import generate_file from grpc_responses import OBJECT_NOT_FOUND, error_matches_status -from neofs_testlib.hosting import Hosting -from neofs_testlib.shell import Shell from python_keywords.container import create_container, get_container -from python_keywords.failover_utils import wait_object_replication_on_nodes -from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object +from python_keywords.failover_utils import wait_object_replication +from python_keywords.neofs_verbs import ( + delete_object, + get_object, + get_object_from_random_node, + head_object, + put_object, + put_object_to_random_node, +) from python_keywords.node_management import ( check_node_in_map, delete_node_data, drop_object, exclude_node_from_network_map, - get_locode, + get_locode_from_random_node, get_netmap_snapshot, include_node_to_network_map, - node_healthcheck, - node_set_status, node_shard_list, node_shard_set_mode, - start_nodes, - stop_nodes, + start_storage_nodes, + storage_node_healthcheck, + storage_node_set_status, ) from storage_policy import get_nodes_with_object, get_simple_object_copies from utility import parse_time, placement_policy_from_container, wait_for_gc_pass_on_storage_nodes from wellknown_acl import PUBLIC_ACL logger = logging.getLogger("NeoLogger") -check_nodes = [] - - -@pytest.fixture -@allure.title("Create container and pick the node with data") -def create_container_and_pick_node(prepare_wallet_and_deposit, client_shell, hosting: Hosting): - wallet = prepare_wallet_and_deposit - file_path = generate_file() - placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" - - cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL) - oid = put_object(wallet, file_path, cid, shell=client_shell) - - nodes = get_nodes_with_object(wallet, cid, oid, shell=client_shell) - assert len(nodes) == 1 - node = nodes[0] - - node_name = choice( - [node_name for node_name, params in NEOFS_NETMAP_DICT.items() if params.get("rpc") == node] - ) - - yield cid, node_name - - shards = node_shard_list(hosting, node_name) - assert shards - - for shard in shards: - node_shard_set_mode(hosting, node_name, shard, "read-write") - - node_shard_list(hosting, node_name) - - -@pytest.fixture -def after_run_start_all_nodes(hosting: Hosting): - yield - try: - start_nodes(hosting, list(NEOFS_NETMAP_DICT.keys())) - except Exception as err: - logger.error(f"Node start fails with error:\n{err}") - - -@pytest.fixture -def return_nodes_after_test_run(client_shell: Shell, hosting: Hosting): - yield - return_nodes(client_shell, hosting) - - -@allure.step("Tick epoch with retries") -def tick_epoch_with_retries(shell: Shell, attempts: int = 3, timeout: int = 3): - for __attempt in range(attempts): - try: - tick_epoch(shell=shell) - except RuntimeError: - sleep(timeout) - continue - return - raise - - -@allure.step("Return node to cluster") -def return_nodes(shell: Shell, hosting: Hosting, alive_node: Optional[str] = None) -> None: - for node in list(check_nodes): - with allure.step(f"Start node {node}"): - host = hosting.get_host_by_service(node) - host.start_service(node) - with allure.step(f"Waiting status ready for node {node}"): - wait_for_node_to_be_ready(hosting, node) - - # We need to wait for node to establish notifications from morph-chain - # Otherwise it will hang up when we will try to set status - sleep(parse_time(MORPH_BLOCK_TIME)) - - with allure.step(f"Move node {node} to online state"): - node_set_status(hosting, node, status="online", retries=2) - - check_nodes.remove(node) - sleep(parse_time(MORPH_BLOCK_TIME)) - tick_epoch_with_retries(attempts=3) - check_node_in_map(node, shell=shell, alive_node=alive_node) +check_nodes: list[StorageNode] = [] @allure.title("Add one node to cluster") @pytest.mark.add_nodes @pytest.mark.node_mgmt -def test_add_nodes( - prepare_tmp_dir, - client_shell, - prepare_wallet_and_deposit, - return_nodes_after_test_run, - hosting: Hosting, -): - wallet = prepare_wallet_and_deposit - placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X" - placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X" - source_file_path = generate_file() +class TestNodeManagement(ClusterTestBase): + @pytest.fixture + @allure.title("Create container and pick the node with data") + def create_container_and_pick_node(self, default_wallet: str) -> Tuple[str, StorageNode]: + default_wallet + file_path = generate_file() + placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" + endpoint = self.cluster.default_rpc_endpoint - additional_node = choice( - [ - node - for node, node_config in NEOFS_NETMAP_DICT.items() - if node_config.get("rpc") != STORAGE_RPC_ENDPOINT_1 - ] - ) - alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != additional_node]) - - check_node_in_map(additional_node, shell=client_shell, alive_node=alive_node) - - # Add node to recovery list before messing with it - check_nodes.append(additional_node) - exclude_node_from_network_map(hosting, additional_node, alive_node, shell=client_shell) - delete_node_data(hosting, additional_node) - - cid = create_container(wallet, rule=placement_rule_3, basic_acl=PUBLIC_ACL, shell=client_shell) - oid = put_object( - wallet, - source_file_path, - cid, - endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc"), - shell=client_shell, - ) - wait_object_replication_on_nodes(wallet, cid, oid, 3, shell=client_shell) - - return_nodes(shell=client_shell, hosting=hosting, alive_node=alive_node) - - with allure.step("Check data could be replicated to new node"): - random_node = choice( - [node for node in NEOFS_NETMAP_DICT if node not in (additional_node, alive_node)] - ) - exclude_node_from_network_map(hosting, random_node, alive_node, shell=client_shell) - - wait_object_replication_on_nodes( - wallet, cid, oid, 3, excluded_nodes=[random_node], shell=client_shell - ) - include_node_to_network_map(hosting, random_node, alive_node, shell=client_shell) - wait_object_replication_on_nodes(wallet, cid, oid, 3, shell=client_shell) - - with allure.step("Check container could be created with new node"): cid = create_container( - wallet, rule=placement_rule_4, basic_acl=PUBLIC_ACL, shell=client_shell + default_wallet, + shell=self.shell, + endpoint=endpoint, + rule=placement_rule, + basic_acl=PUBLIC_ACL, + ) + oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster) + + nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) + assert len(nodes) == 1 + node = nodes[0] + + yield cid, node + + shards = node_shard_list(node) + assert shards + + for shard in shards: + node_shard_set_mode(node, shard, "read-write") + + node_shard_list(node) + + @allure.step("Tick epoch with retries") + def tick_epoch_with_retries(self, attempts: int = 3, timeout: int = 3): + for attempt in range(attempts): + try: + self.tick_epoch() + except RuntimeError: + sleep(timeout) + if attempt >= attempts - 1: + raise + continue + return + + @pytest.fixture + def after_run_start_all_nodes(self): + yield + self.return_nodes() + + @pytest.fixture + def return_nodes_after_test_run(self): + yield + self.return_nodes() + + @allure.step("Return node to cluster") + def return_nodes(self, alive_node: Optional[StorageNode] = None) -> None: + for node in list(check_nodes): + with allure.step(f"Start node {node}"): + node.start_service() + with allure.step(f"Waiting status ready for node {node}"): + self.wait_for_node_to_be_ready(node) + + # We need to wait for node to establish notifications from morph-chain + # Otherwise it will hang up when we will try to set status + sleep(parse_time(MORPH_BLOCK_TIME)) + + with allure.step(f"Move node {node} to online state"): + storage_node_set_status(node, status="online", retries=2) + + check_nodes.remove(node) + sleep(parse_time(MORPH_BLOCK_TIME)) + self.tick_epoch_with_retries(3) + check_node_in_map(node, shell=self.shell, alive_node=alive_node) + + @allure.title("Add one node to cluster") + @pytest.mark.add_nodes + def test_add_nodes( + self, + default_wallet, + return_nodes_after_test_run, + ): + wallet = default_wallet + placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X" + placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X" + source_file_path = generate_file() + + storage_nodes = self.cluster.storage_nodes + random_node = random.choice(storage_nodes[1:]) + alive_node = random.choice( + [storage_node for storage_node in storage_nodes if storage_node.id != random_node.id] + ) + + check_node_in_map(random_node, shell=self.shell, alive_node=alive_node) + + # Add node to recovery list before messing with it + check_nodes.append(random_node) + exclude_node_from_network_map( + random_node, alive_node, shell=self.shell, cluster=self.cluster + ) + delete_node_data(random_node) + + cid = create_container( + wallet, + rule=placement_rule_3, + basic_acl=PUBLIC_ACL, + shell=self.shell, + endpoint=alive_node.get_rpc_endpoint(), ) oid = put_object( wallet, source_file_path, cid, - endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc"), - shell=client_shell, + shell=self.shell, + endpoint=alive_node.get_rpc_endpoint(), ) - wait_object_replication_on_nodes(wallet, cid, oid, 4, shell=client_shell) + wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes) + self.return_nodes(alive_node) -@allure.title("Control Operations with storage nodes") -@pytest.mark.node_mgmt -def test_nodes_management(prepare_tmp_dir, client_shell, hosting: Hosting): - """ - This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status). - """ - random_node = choice(list(NEOFS_NETMAP_DICT)) - alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != random_node]) + with allure.step("Check data could be replicated to new node"): + random_node = random.choice(list(set(storage_nodes) - {random_node, alive_node})) + # Add node to recovery list before messing with it + check_nodes.append(random_node) + exclude_node_from_network_map( + random_node, alive_node, shell=self.shell, cluster=self.cluster + ) - # Calculate public key that identifies node in netmap - random_node_wallet_path = NEOFS_NETMAP_DICT[random_node]["wallet_path"] - random_node_netmap_key = get_wallet_public_key(random_node_wallet_path, STORAGE_WALLET_PASS) + wait_object_replication( + cid, + oid, + 3, + shell=self.shell, + nodes=list(set(storage_nodes) - {random_node}), + ) + include_node_to_network_map( + random_node, alive_node, shell=self.shell, cluster=self.cluster + ) + wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes) - with allure.step("Check node {random_node} is in netmap"): - snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell) - assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap" + with allure.step("Check container could be created with new node"): + cid = create_container( + wallet, + rule=placement_rule_4, + basic_acl=PUBLIC_ACL, + shell=self.shell, + endpoint=alive_node.get_rpc_endpoint(), + ) + oid = put_object( + wallet, + source_file_path, + cid, + shell=self.shell, + endpoint=alive_node.get_rpc_endpoint(), + ) + wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes) - with allure.step("Run health check for all storage nodes"): - for node_name in NEOFS_NETMAP_DICT.keys(): - health_check = node_healthcheck(hosting, node_name) + @allure.title("Control Operations with storage nodes") + @pytest.mark.node_mgmt + def test_nodes_management(self, temp_directory): + """ + This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status). + """ + + storage_nodes = self.cluster.storage_nodes + random_node = random.choice(storage_nodes) + alive_node = random.choice(list(set(storage_nodes) - {random_node})) + + # Calculate public key that identifies node in netmap + random_node_netmap_key = random_node.get_wallet_public_key() + + with allure.step(f"Check node ({random_node}) is in netmap"): + snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell) + assert ( + random_node_netmap_key in snapshot + ), f"Expected node {random_node} to be in netmap" + + with allure.step("Run health check for all storage nodes"): + for node in self.cluster.storage_nodes: + health_check = storage_node_healthcheck(node) + assert ( + health_check.health_status == "READY" + and health_check.network_status == "ONLINE" + ) + + with allure.step(f"Move node ({random_node}) to offline state"): + storage_node_set_status(random_node, status="offline") + + sleep(parse_time(MORPH_BLOCK_TIME)) + tick_epoch(self.shell, self.cluster) + + with allure.step(f"Check node {random_node} went to offline"): + health_check = storage_node_healthcheck(random_node) + assert ( + health_check.health_status == "READY" and health_check.network_status == "OFFLINE" + ) + snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell) + assert ( + random_node_netmap_key not in snapshot + ), f"Expected node {random_node} not in netmap" + + with allure.step(f"Check node {random_node} went to online"): + storage_node_set_status(random_node, status="online") + + sleep(parse_time(MORPH_BLOCK_TIME)) + tick_epoch(self.shell, self.cluster) + + with allure.step(f"Check node {random_node} went to online"): + health_check = storage_node_healthcheck(random_node) assert health_check.health_status == "READY" and health_check.network_status == "ONLINE" + snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell) + assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap" - with allure.step(f"Move node {random_node} to offline state"): - node_set_status(hosting, random_node, status="offline") - - sleep(parse_time(MORPH_BLOCK_TIME)) - tick_epoch(shell=client_shell) - - with allure.step(f"Check node {random_node} went to offline"): - health_check = node_healthcheck(hosting, random_node) - assert health_check.health_status == "READY" and health_check.network_status == "OFFLINE" - snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell) - assert random_node_netmap_key not in snapshot, f"Expected node {random_node} not in netmap" - - with allure.step(f"Check node {random_node} went to online"): - node_set_status(hosting, random_node, status="online") - - sleep(parse_time(MORPH_BLOCK_TIME)) - tick_epoch(shell=client_shell) - - with allure.step(f"Check node {random_node} went to online"): - health_check = node_healthcheck(hosting, random_node) - assert health_check.health_status == "READY" and health_check.network_status == "ONLINE" - snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell) - assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap" - - -@pytest.mark.parametrize( - "placement_rule,expected_copies", - [ - ("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", 2), - ("REP 2 IN X CBF 1 SELECT 2 FROM * AS X", 2), - ("REP 3 IN X CBF 1 SELECT 3 FROM * AS X", 3), - ("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", 1), - ("REP 1 IN X CBF 2 SELECT 1 FROM * AS X", 1), - ("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4), - ("REP 2 IN X CBF 1 SELECT 4 FROM * AS X", 2), - ], -) -@pytest.mark.node_mgmt -@allure.title("Test object copies based on placement policy") -def test_placement_policy( - prepare_wallet_and_deposit, placement_rule, expected_copies, client_shell: Shell -): - """ - This test checks object's copies based on container's placement policy. - """ - wallet = prepare_wallet_and_deposit - file_path = generate_file() - validate_object_copies(wallet, placement_rule, file_path, expected_copies, shell=client_shell) - - -@pytest.mark.parametrize( - "placement_rule,expected_copies,nodes", - [ - ("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4, ["s01", "s02", "s03", "s04"]), - ( - "REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW", - 1, - ["s03"], - ), - ("REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB", 1, ["s02"]), - ( - "REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE " - "SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE " - "FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK", - 2, - ["s01", "s02"], - ), - ( - "REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU", - 4, - ["s01", "s02", "s03", "s04"], - ), - ( - "REP 1 CBF 1 SELECT 1 FROM LOC_SPB " - "FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB", - 1, - ["s02"], - ), - ( - "REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU", - 2, - ["s01", "s02"], - ), - ( - "REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU", - 2, - ["s01", "s02"], - ), - ( - "REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU", - 2, - ["s03", "s04"], - ), - ], -) -@pytest.mark.node_mgmt -@allure.title("Test object copies and storage nodes based on placement policy") -def test_placement_policy_with_nodes( - prepare_wallet_and_deposit, placement_rule, expected_copies, nodes, client_shell: Shell -): - """ - Based on container's placement policy check that storage nodes are piked correctly and object has - correct copies amount. - """ - wallet = prepare_wallet_and_deposit - file_path = generate_file() - cid, oid, found_nodes = validate_object_copies( - wallet, placement_rule, file_path, expected_copies, shell=client_shell + @pytest.mark.parametrize( + "placement_rule,expected_copies", + [ + ("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", 2), + ("REP 2 IN X CBF 1 SELECT 2 FROM * AS X", 2), + ("REP 3 IN X CBF 1 SELECT 3 FROM * AS X", 3), + ("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", 1), + ("REP 1 IN X CBF 2 SELECT 1 FROM * AS X", 1), + ("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4), + ("REP 2 IN X CBF 1 SELECT 4 FROM * AS X", 2), + ], ) - expected_nodes = [NEOFS_NETMAP_DICT[node_name].get("rpc") for node_name in nodes] - assert set(found_nodes) == set( - expected_nodes - ), f"Expected nodes {expected_nodes}, got {found_nodes}" + @pytest.mark.node_mgmt + @allure.title("Test object copies based on placement policy") + def test_placement_policy(self, default_wallet, placement_rule, expected_copies): + """ + This test checks object's copies based on container's placement policy. + """ + wallet = default_wallet + file_path = generate_file() + self.validate_object_copies(wallet, placement_rule, file_path, expected_copies) - -@pytest.mark.parametrize( - "placement_rule,expected_copies", - [ - ("REP 2 IN X CBF 2 SELECT 6 FROM * AS X", 2), - ], -) -@pytest.mark.node_mgmt -@allure.title("Negative cases for placement policy") -def test_placement_policy_negative( - prepare_wallet_and_deposit, placement_rule, expected_copies, client_shell: Shell -): - """ - Negative test for placement policy. - """ - wallet = prepare_wallet_and_deposit - file_path = generate_file() - with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"): - validate_object_copies( - wallet, placement_rule, file_path, expected_copies, shell=client_shell + @pytest.mark.parametrize( + "placement_rule,expected_copies,expected_nodes_id", + [ + ("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4, {1, 2, 3, 4}), + ( + "REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW", + 1, + {3}, + ), + ( + "REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB", + 1, + {2}, + ), + ( + "REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE " + "SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE " + "FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK", + 2, + {1, 2}, + ), + ( + "REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU", + 4, + {1, 2, 3, 4}, + ), + ( + "REP 1 CBF 1 SELECT 1 FROM LOC_SPB " + "FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB", + 1, + {2}, + ), + ( + "REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU", + 2, + {1, 2}, + ), + ( + "REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU", + 2, + {1, 2}, + ), + ( + "REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU", + 2, + {3, 4}, + ), + ], + ) + @pytest.mark.node_mgmt + @allure.title("Test object copies and storage nodes based on placement policy") + def test_placement_policy_with_nodes( + self, default_wallet, placement_rule, expected_copies, expected_nodes_id: set[int] + ): + """ + Based on container's placement policy check that storage nodes are piked correctly and object has + correct copies amount. + """ + wallet = default_wallet + file_path = generate_file() + cid, oid, found_nodes = self.validate_object_copies( + wallet, placement_rule, file_path, expected_copies ) + assert ( + found_nodes == expected_nodes_id + ), f"Expected nodes {expected_nodes_id}, got {found_nodes}" -@pytest.mark.skip(reason="We cover this scenario in failover tests") -@pytest.mark.sanity -@pytest.mark.node_mgmt -@allure.title("NeoFS object replication on node failover") -def test_replication( - prepare_wallet_and_deposit, client_shell: Shell, after_run_start_all_nodes, hosting: Hosting -): - """ - Test checks object replication on storage not failover and come back. - """ - wallet = prepare_wallet_and_deposit - file_path = generate_file() - expected_nodes_count = 2 - - cid = create_container(wallet, basic_acl=PUBLIC_ACL) - oid = put_object(wallet, file_path, cid) - - nodes = get_nodes_with_object(wallet, cid, oid) - assert ( - len(nodes) == expected_nodes_count - ), f"Expected {expected_nodes_count} copies, got {len(nodes)}" - - node_names = [name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes] - stopped_nodes = stop_nodes(hosting, 1, node_names) - - wait_for_expected_object_copies(client_shell, wallet, cid, oid) - - start_nodes(hosting, stopped_nodes) - tick_epoch(shell=client_shell) - - for node_name in node_names: - wait_for_node_go_online(hosting, node_name) - - wait_for_expected_object_copies(client_shell, wallet, cid, oid) - - -@pytest.mark.node_mgmt -@allure.title("NeoFS object could be dropped using control command") -def test_drop_object(prepare_wallet_and_deposit, client_shell: Shell, hosting: Hosting): - """ - Test checks object could be dropped using `neofs-cli control drop-objects` command. - """ - wallet = prepare_wallet_and_deposit - file_path_simple, file_path_complex = generate_file(), generate_file(COMPLEX_OBJ_SIZE) - - locode = get_locode() - rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC" - cid = create_container(wallet, rule=rule, shell=client_shell) - oid_simple = put_object(wallet, file_path_simple, cid, shell=client_shell) - oid_complex = put_object(wallet, file_path_complex, cid, shell=client_shell) - - for oid in (oid_simple, oid_complex): - get_object(wallet, cid, oid, client_shell) - head_object(wallet, cid, oid, client_shell) - - nodes = get_nodes_with_object(wallet, cid, oid_simple, shell=client_shell) - node_name = choice( - [name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes] + @pytest.mark.parametrize( + "placement_rule,expected_copies", + [ + ("REP 2 IN X CBF 2 SELECT 6 FROM * AS X", 2), + ], ) + @pytest.mark.node_mgmt + @allure.title("Negative cases for placement policy") + def test_placement_policy_negative(self, default_wallet, placement_rule, expected_copies): + """ + Negative test for placement policy. + """ + wallet = default_wallet + file_path = generate_file() + with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"): + self.validate_object_copies(wallet, placement_rule, file_path, expected_copies) - for oid in (oid_simple, oid_complex): - with allure.step(f"Drop object {oid}"): - get_object(wallet, cid, oid, shell=client_shell) - head_object(wallet, cid, oid, shell=client_shell) - drop_object(hosting, node_name, cid, oid) - wait_for_obj_dropped(wallet, cid, oid, client_shell, get_object) - wait_for_obj_dropped(wallet, cid, oid, client_shell, head_object) + @pytest.mark.node_mgmt + @allure.title("NeoFS object could be dropped using control command") + def test_drop_object(self, default_wallet): + """ + Test checks object could be dropped using `neofs-cli control drop-objects` command. + """ + wallet = default_wallet + endpoint = self.cluster.default_rpc_endpoint + file_path_simple, file_path_complex = generate_file(), generate_file(COMPLEX_OBJ_SIZE) + locode = get_locode_from_random_node(self.cluster) + rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC" + cid = create_container(wallet, rule=rule, shell=self.shell, endpoint=endpoint) + oid_simple = put_object_to_random_node( + wallet, file_path_simple, cid, shell=self.shell, cluster=self.cluster + ) + oid_complex = put_object_to_random_node( + wallet, file_path_complex, cid, shell=self.shell, cluster=self.cluster + ) -@pytest.mark.node_mgmt -@pytest.mark.skip(reason="Need to clarify scenario") -@allure.title("Control Operations with storage nodes") -def test_shards( - prepare_wallet_and_deposit, - create_container_and_pick_node, - client_shell: Shell, - hosting: Hosting, -): - wallet = prepare_wallet_and_deposit - file_path = generate_file() + for oid in (oid_simple, oid_complex): + get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster) + head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint) - cid, node_name = create_container_and_pick_node - original_oid = put_object(wallet, file_path, cid, shell=client_shell) + nodes_with_object = get_nodes_with_object( + cid, oid_simple, shell=self.shell, nodes=self.cluster.storage_nodes + ) + random_node = random.choice(nodes_with_object) - # for mode in ('read-only', 'degraded'): - for mode in ("degraded",): - shards = node_shard_list(hosting, node_name) - assert shards + for oid in (oid_simple, oid_complex): + with allure.step(f"Drop object {oid}"): + get_object_from_random_node( + wallet, cid, oid, shell=self.shell, cluster=self.cluster + ) + head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint) + drop_object(random_node, cid, oid) + self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object) + self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object) - for shard in shards: - node_shard_set_mode(hosting, node_name, shard, mode) + @pytest.mark.node_mgmt + @pytest.mark.skip(reason="Need to clarify scenario") + @allure.title("Control Operations with storage nodes") + def test_shards( + self, + default_wallet, + create_container_and_pick_node, + ): + wallet = default_wallet + file_path = generate_file() - shards = node_shard_list(hosting, node_name) - assert shards + cid, node = create_container_and_pick_node + original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) - with pytest.raises(RuntimeError): - put_object(wallet, file_path, cid, shell=client_shell) + # for mode in ('read-only', 'degraded'): + for mode in ("degraded",): + shards = node_shard_list(node) + assert shards - with pytest.raises(RuntimeError): - delete_object(wallet, cid, original_oid, shell=client_shell) + for shard in shards: + node_shard_set_mode(node, shard, mode) - get_object(wallet, cid, original_oid, shell=client_shell) + shards = node_shard_list(node) + assert shards - for shard in shards: - node_shard_set_mode(hosting, node_name, shard, "read-write") + with pytest.raises(RuntimeError): + put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) - shards = node_shard_list(hosting, node_name) - assert shards + with pytest.raises(RuntimeError): + delete_object( + wallet, cid, original_oid, self.shell, self.cluster.default_rpc_endpoint + ) - oid = put_object(wallet, file_path, cid, shell=client_shell) - delete_object(wallet, cid, oid, shell=client_shell) + get_object_from_random_node(wallet, cid, original_oid, self.shell, self.cluster) + for shard in shards: + node_shard_set_mode(node, shard, "read-write") -@allure.step("Validate object has {expected_copies} copies") -def validate_object_copies( - wallet: str, placement_rule: str, file_path: str, expected_copies: int, shell: Shell -): - cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=shell) - got_policy = placement_policy_from_container( - get_container(wallet, cid, json_mode=False, shell=shell) - ) - assert got_policy == placement_rule.replace( - "'", "" - ), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same" - oid = put_object(wallet, file_path, cid, shell=shell) - nodes = get_nodes_with_object(wallet, cid, oid, shell=shell) - assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}" - return cid, oid, nodes + shards = node_shard_list(node) + assert shards + oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster) + delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint) -@allure.step("Wait for node {node_name} goes online") -def wait_for_node_go_online(hosting: Hosting, node_name: str) -> None: - timeout, attempts = 5, 20 - for _ in range(attempts): - try: - health_check = node_healthcheck(hosting, node_name) - assert health_check.health_status == "READY" and health_check.network_status == "ONLINE" - return - except Exception as err: - logger.warning(f"Node {node_name} is not online:\n{err}") - sleep(timeout) - raise AssertionError( - f"Node {node_name} hasn't gone to the READY and ONLINE state after {timeout * attempts} second" - ) + @allure.step("Validate object has {expected_copies} copies") + def validate_object_copies( + self, wallet: str, placement_rule: str, file_path: str, expected_copies: int + ) -> set[int]: + endpoint = self.cluster.default_rpc_endpoint + cid = create_container( + wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint + ) + got_policy = placement_policy_from_container( + get_container(wallet, cid, json_mode=False, shell=self.shell, endpoint=endpoint) + ) + assert got_policy == placement_rule.replace( + "'", "" + ), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same" + oid = put_object_to_random_node( + wallet, file_path, cid, shell=self.shell, cluster=self.cluster + ) + nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes) + nodes_id = {node.id for node in nodes} + assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}" + return cid, oid, nodes_id - -@allure.step("Wait for node {node_name} is ready") -def wait_for_node_to_be_ready(hosting: Hosting, node_name: str) -> None: - timeout, attempts = 30, 6 - for _ in range(attempts): - try: - health_check = node_healthcheck(hosting, node_name) - if health_check.health_status == "READY": + @allure.step("Wait for node {node} goes online") + def wait_for_node_go_online(self, node: StorageNode) -> None: + timeout, attempts = 5, 20 + for _ in range(attempts): + try: + health_check = storage_node_healthcheck(node) + assert ( + health_check.health_status == "READY" + and health_check.network_status == "ONLINE" + ) return - except Exception as err: - logger.warning(f"Node {node_name} is not ready:\n{err}") - sleep(timeout) - raise AssertionError( - f"Node {node_name} hasn't gone to the READY state after {timeout * attempts} seconds" - ) + except Exception as err: + logger.warning(f"Node {node} is not online:\n{err}") + sleep(timeout) + raise AssertionError( + f"Node {node} hasn't gone to the READY and ONLINE state after {timeout * attempts} second" + ) + @allure.step("Wait for node {node} is ready") + def wait_for_node_to_be_ready(self, node: StorageNode) -> None: + timeout, attempts = 30, 6 + for _ in range(attempts): + try: + health_check = storage_node_healthcheck(node) + if health_check.health_status == "READY": + return + except Exception as err: + logger.warning(f"Node {node} is not ready:\n{err}") + sleep(timeout) + raise AssertionError( + f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds" + ) -@allure.step("Wait for {expected_copies} object copies in the wallet") -def wait_for_expected_object_copies( - shell: Shell, wallet: str, cid: str, oid: str, expected_copies: int = 2 -) -> None: - for i in range(2): - copies = get_simple_object_copies(wallet, cid, oid) - if copies == expected_copies: - break - tick_epoch(shell=shell) - sleep(parse_time(NEOFS_CONTRACT_CACHE_TIMEOUT)) - else: - raise AssertionError(f"There are no {expected_copies} copies during time") + @allure.step("Wait for {expected_copies} object copies in the wallet") + def wait_for_expected_object_copies( + self, wallet: str, cid: str, oid: str, expected_copies: int = 2 + ) -> None: + nodes = self.cluster.storage_nodes + for _ in range(2): + copies = get_simple_object_copies(wallet, cid, oid, self.shell, nodes) + if copies == expected_copies: + break + tick_epoch(self.shell, self.cluster) + sleep(parse_time(NEOFS_CONTRACT_CACHE_TIMEOUT)) + else: + raise AssertionError(f"There are no {expected_copies} copies during time") + @allure.step("Wait for object to be dropped") + def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker) -> None: + for _ in range(3): + try: + checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint) + wait_for_gc_pass_on_storage_nodes() + except Exception as err: + if error_matches_status(err, OBJECT_NOT_FOUND): + return + raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}') -@allure.step("Wait for object to be dropped") -def wait_for_obj_dropped(wallet: str, cid: str, oid: str, shell: Shell, checker) -> None: - for _ in range(3): - try: - checker(wallet, cid, oid, shell=shell) - wait_for_gc_pass_on_storage_nodes() - except Exception as err: - if error_matches_status(err, OBJECT_NOT_FOUND): - return - raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}') - - raise AssertionError(f"Object {oid} was not dropped from node") + raise AssertionError(f"Object {oid} was not dropped from node") diff --git a/pytest_tests/testsuites/object/test_object_api.py b/pytest_tests/testsuites/object/test_object_api.py index bcbafab..a22247c 100755 --- a/pytest_tests/testsuites/object/test_object_api.py +++ b/pytest_tests/testsuites/object/test_object_api.py @@ -4,6 +4,7 @@ import sys import allure import pytest +from cluster import Cluster from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE from container import create_container from file_helper import generate_file, get_file_content, get_file_hash @@ -12,16 +13,17 @@ from neofs_testlib.shell import Shell from pytest import FixtureRequest from python_keywords.neofs_verbs import ( get_netmap_netinfo, - get_object, + get_object_from_random_node, get_range, get_range_hash, head_object, - put_object, + put_object_to_random_node, search_object, ) from python_keywords.storage_policy import get_complex_object_copies, get_simple_object_copies from helpers.storage_object_info import StorageObjectInfo +from steps.cluster_test_base import ClusterTestBase from steps.storage_object import delete_objects logger = logging.getLogger("NeoLogger") @@ -87,11 +89,11 @@ def generate_ranges(file_size: int, max_object_size: int) -> list[(int, int)]: scope="module", ) def storage_objects( - prepare_wallet_and_deposit: str, client_shell: Shell, request: FixtureRequest + default_wallet: str, client_shell: Shell, cluster: Cluster, request: FixtureRequest ) -> list[StorageObjectInfo]: - wallet = prepare_wallet_and_deposit + wallet = default_wallet # Separate containers for complex/simple objects to avoid side-effects - cid = create_container(wallet, shell=client_shell) + cid = create_container(wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint) file_path = generate_file(request.param) file_hash = get_file_hash(file_path) @@ -101,11 +103,12 @@ def storage_objects( with allure.step("Put objects"): # We need to upload objects multiple times with different attributes for attributes in OBJECT_ATTRIBUTES: - storage_object_id = put_object( + storage_object_id = put_object_to_random_node( wallet=wallet, path=file_path, cid=cid, shell=client_shell, + cluster=cluster, attributes=attributes, ) @@ -121,357 +124,390 @@ def storage_objects( yield storage_objects # Teardown after all tests done with current param - delete_objects(storage_objects, client_shell) + delete_objects(storage_objects, client_shell, cluster) -@allure.title("Validate object storage policy by native API") @pytest.mark.sanity @pytest.mark.grpc_api -def test_object_storage_policies( - client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] -): - """ - Validate object storage policy - """ - allure.dynamic.title( - f"Validate object storage policy by native API for {request.node.callspec.id}" - ) +class TestObjectApi(ClusterTestBase): + @allure.title("Validate object storage policy by native API") + def test_object_storage_policies( + self, request: FixtureRequest, storage_objects: list[StorageObjectInfo] + ): + """ + Validate object storage policy + """ + allure.dynamic.title( + f"Validate object storage policy by native API for {request.node.callspec.id}" + ) - with allure.step("Validate storage policy for objects"): - for storage_object in storage_objects: - if storage_object.size == SIMPLE_OBJ_SIZE: - copies = get_simple_object_copies( + with allure.step("Validate storage policy for objects"): + for storage_object in storage_objects: + if storage_object.size == SIMPLE_OBJ_SIZE: + copies = get_simple_object_copies( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + shell=self.shell, + nodes=self.cluster.storage_nodes, + ) + else: + copies = get_complex_object_copies( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + shell=self.shell, + nodes=self.cluster.storage_nodes, + ) + assert copies == 2, "Expected 2 copies" + + @allure.title("Validate get object native API") + def test_get_object_api( + self, request: FixtureRequest, storage_objects: list[StorageObjectInfo] + ): + """ + Validate get object native API + """ + allure.dynamic.title(f"Validate get object native API for {request.node.callspec.id}") + + with allure.step("Get objects and compare hashes"): + for storage_object in storage_objects: + file_path = get_object_from_random_node( storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - shell=client_shell, + self.shell, + cluster=self.cluster, ) - else: - copies = get_complex_object_copies( - storage_object.wallet_file_path, - storage_object.cid, - storage_object.oid, - shell=client_shell, - ) - assert copies == 2, "Expected 2 copies" + file_hash = get_file_hash(file_path) + assert storage_object.file_hash == file_hash + @allure.title("Validate head object native API") + def test_head_object_api( + self, request: FixtureRequest, storage_objects: list[StorageObjectInfo] + ): + """ + Validate head object native API + """ + allure.dynamic.title(f"Validate head object by native API for {request.node.callspec.id}") -@allure.title("Validate get object native API") -@pytest.mark.sanity -@pytest.mark.grpc_api -def test_get_object_api( - client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] -): - """ - Validate get object native API - """ - allure.dynamic.title(f"Validate get object native API for {request.node.callspec.id}") + storage_object_1 = storage_objects[0] + storage_object_2 = storage_objects[1] - with allure.step("Get objects and compare hashes"): - for storage_object in storage_objects: - file_path = get_object( - storage_object.wallet_file_path, - storage_object.cid, - storage_object.oid, - client_shell, + with allure.step("Head object and validate"): + head_object( + storage_object_1.wallet_file_path, + storage_object_1.cid, + storage_object_1.oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) - file_hash = get_file_hash(file_path) - assert storage_object.file_hash == file_hash + head_info = head_object( + storage_object_2.wallet_file_path, + storage_object_2.cid, + storage_object_2.oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) + self.check_header_is_presented(head_info, storage_object_2.attributes) + @allure.title("Validate object search by native API") + def test_search_object_api( + self, request: FixtureRequest, storage_objects: list[StorageObjectInfo] + ): + """ + Validate object search by native API + """ + allure.dynamic.title(f"Validate object search by native API for {request.node.callspec.id}") -@allure.title("Validate head object native API") -@pytest.mark.sanity -@pytest.mark.grpc_api -def test_head_object_api( - client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] -): - """ - Validate head object native API - """ - allure.dynamic.title(f"Validate head object by native API for {request.node.callspec.id}") + oids = [storage_object.oid for storage_object in storage_objects] + wallet = storage_objects[0].wallet_file_path + cid = storage_objects[0].cid - storage_object_1 = storage_objects[0] - storage_object_2 = storage_objects[1] + test_table = [ + (OBJECT_ATTRIBUTES[1], oids[1:2]), + (OBJECT_ATTRIBUTES[2], oids[2:3]), + (COMMON_ATTRIBUTE, oids[1:3]), + ] - with allure.step("Head object and validate"): - head_object( - storage_object_1.wallet_file_path, - storage_object_1.cid, - storage_object_1.oid, - shell=client_shell, - ) - head_info = head_object( - storage_object_2.wallet_file_path, - storage_object_2.cid, - storage_object_2.oid, - shell=client_shell, - ) - check_header_is_presented(head_info, storage_object_2.attributes) - - -@allure.title("Validate object search by native API") -@pytest.mark.sanity -@pytest.mark.grpc_api -def test_search_object_api( - client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] -): - """ - Validate object search by native API - """ - allure.dynamic.title(f"Validate object search by native API for {request.node.callspec.id}") - - oids = [storage_object.oid for storage_object in storage_objects] - wallet = storage_objects[0].wallet_file_path - cid = storage_objects[0].cid - - test_table = [ - (OBJECT_ATTRIBUTES[1], oids[1:2]), - (OBJECT_ATTRIBUTES[2], oids[2:3]), - (COMMON_ATTRIBUTE, oids[1:3]), - ] - - with allure.step("Search objects"): - # Search with no attributes - result = search_object( - wallet, cid, shell=client_shell, expected_objects_list=oids, root=True - ) - assert sorted(oids) == sorted(result) - - # search by test table - for filter, expected_oids in test_table: + with allure.step("Search objects"): + # Search with no attributes result = search_object( wallet, cid, - shell=client_shell, - filters=filter, - expected_objects_list=expected_oids, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + expected_objects_list=oids, root=True, ) - assert sorted(expected_oids) == sorted(result) + assert sorted(oids) == sorted(result) + # search by test table + for filter, expected_oids in test_table: + result = search_object( + wallet, + cid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + filters=filter, + expected_objects_list=expected_oids, + root=True, + ) + assert sorted(expected_oids) == sorted(result) -@allure.title("Validate object search with removed items") -@pytest.mark.sanity -@pytest.mark.grpc_api -@pytest.mark.parametrize( - "object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"] -) -def test_object_search_should_return_tombstone_items( - prepare_wallet_and_deposit: str, client_shell: Shell, request: FixtureRequest, object_size: int -): - """ - Validate object search with removed items - """ - allure.dynamic.title( - f"Validate object search with removed items for {request.node.callspec.id}" + @allure.title("Validate object search with removed items") + @pytest.mark.parametrize( + "object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"] ) - - wallet = prepare_wallet_and_deposit - cid = create_container(wallet, shell=client_shell) - - with allure.step("Upload file"): - file_path = generate_file(object_size) - file_hash = get_file_hash(file_path) - - storage_object = StorageObjectInfo( - cid=cid, - oid=put_object(wallet, file_path, cid, shell=client_shell), - size=object_size, - wallet_file_path=wallet, - file_path=file_path, - file_hash=file_hash, + def test_object_search_should_return_tombstone_items( + self, default_wallet: str, request: FixtureRequest, object_size: int + ): + """ + Validate object search with removed items + """ + allure.dynamic.title( + f"Validate object search with removed items for {request.node.callspec.id}" ) - with allure.step("Search object"): - # Root Search object should return root object oid - result = search_object(wallet, cid, shell=client_shell, root=True) - assert result == [storage_object.oid] + wallet = default_wallet + cid = create_container(wallet, self.shell, self.cluster.default_rpc_endpoint) - with allure.step("Delete file"): - delete_objects([storage_object], client_shell) + with allure.step("Upload file"): + file_path = generate_file(object_size) + file_hash = get_file_hash(file_path) - with allure.step("Search deleted object with --root"): - # Root Search object should return nothing - result = search_object(wallet, cid, shell=client_shell, root=True) - assert len(result) == 0 + storage_object = StorageObjectInfo( + cid=cid, + oid=put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster), + size=object_size, + wallet_file_path=wallet, + file_path=file_path, + file_hash=file_hash, + ) - with allure.step("Search deleted object with --phy should return only tombstones"): - # Physical Search object should return only tombstones - result = search_object(wallet, cid, shell=client_shell, phy=True) - assert ( - storage_object.tombstone in result - ), f"Search result should contain tombstone of removed object" - assert ( - storage_object.oid not in result - ), f"Search result should not contain ObjectId of removed object" - for tombstone_oid in result: - header = head_object(wallet, cid, tombstone_oid, shell=client_shell)["header"] - object_type = header["objectType"] + with allure.step("Search object"): + # Root Search object should return root object oid + result = search_object( + wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True + ) + assert result == [storage_object.oid] + + with allure.step("Delete file"): + delete_objects([storage_object], self.shell, self.cluster) + + with allure.step("Search deleted object with --root"): + # Root Search object should return nothing + result = search_object( + wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True + ) + assert len(result) == 0 + + with allure.step("Search deleted object with --phy should return only tombstones"): + # Physical Search object should return only tombstones + result = search_object( + wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, phy=True + ) assert ( - object_type == "TOMBSTONE" - ), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}" - - -@allure.title("Validate native object API get_range_hash") -@pytest.mark.sanity -@pytest.mark.grpc_api -def test_object_get_range_hash( - client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] -): - """ - Validate get_range_hash for object by common gRPC API - """ - allure.dynamic.title( - f"Validate native get_range_hash object API for {request.node.callspec.id}" - ) - - wallet = storage_objects[0].wallet_file_path - cid = storage_objects[0].cid - oids = [storage_object.oid for storage_object in storage_objects[:2]] - file_path = storage_objects[0].file_path - net_info = get_netmap_netinfo(wallet, client_shell) - max_object_size = net_info["maximum_object_size"] - - file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size) - logging.info(f"Ranges used in test {file_ranges_to_test}") - - for range_start, range_end in file_ranges_to_test: - range_len = range_end - range_start - range_cut = f"{range_start}:{range_len}" - with allure.step(f"Get range hash ({range_cut})"): - for oid in oids: - range_hash = get_range_hash( - wallet, cid, oid, shell=client_shell, range_cut=range_cut - ) + storage_object.tombstone in result + ), "Search result should contain tombstone of removed object" + assert ( + storage_object.oid not in result + ), "Search result should not contain ObjectId of removed object" + for tombstone_oid in result: + header = head_object( + wallet, + cid, + tombstone_oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + )["header"] + object_type = header["objectType"] assert ( - get_file_hash(file_path, range_len, range_start) == range_hash - ), f"Expected range hash to match {range_cut} slice of file payload" + object_type == "TOMBSTONE" + ), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}" + @allure.title("Validate native object API get_range_hash") + @pytest.mark.sanity + @pytest.mark.grpc_api + def test_object_get_range_hash( + self, request: FixtureRequest, storage_objects: list[StorageObjectInfo] + ): + """ + Validate get_range_hash for object by common gRPC API + """ + allure.dynamic.title( + f"Validate native get_range_hash object API for {request.node.callspec.id}" + ) -@allure.title("Validate native object API get_range") -@pytest.mark.sanity -@pytest.mark.grpc_api -def test_object_get_range( - client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] -): - """ - Validate get_range for object by common gRPC API - """ - allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}") + wallet = storage_objects[0].wallet_file_path + cid = storage_objects[0].cid + oids = [storage_object.oid for storage_object in storage_objects[:2]] + file_path = storage_objects[0].file_path + net_info = get_netmap_netinfo( + wallet, self.shell, endpoint=self.cluster.default_rpc_endpoint + ) + max_object_size = net_info["maximum_object_size"] - wallet = storage_objects[0].wallet_file_path - cid = storage_objects[0].cid - oids = [storage_object.oid for storage_object in storage_objects[:2]] - file_path = storage_objects[0].file_path - net_info = get_netmap_netinfo(wallet, client_shell) - max_object_size = net_info["maximum_object_size"] + file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size) + logging.info(f"Ranges used in test {file_ranges_to_test}") - file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size) - logging.info(f"Ranges used in test {file_ranges_to_test}") - - for range_start, range_end in file_ranges_to_test: - range_len = range_end - range_start - range_cut = f"{range_start}:{range_len}" - with allure.step(f"Get range ({range_cut})"): - for oid in oids: - _, range_content = get_range( - wallet, cid, oid, shell=client_shell, range_cut=range_cut - ) - assert ( - get_file_content( - file_path, content_len=range_len, mode="rb", offset=range_start + for range_start, range_end in file_ranges_to_test: + range_len = range_end - range_start + range_cut = f"{range_start}:{range_len}" + with allure.step(f"Get range hash ({range_cut})"): + for oid in oids: + range_hash = get_range_hash( + wallet, + cid, + oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + range_cut=range_cut, ) - == range_content - ), f"Expected range content to match {range_cut} slice of file payload" + assert ( + get_file_hash(file_path, range_len, range_start) == range_hash + ), f"Expected range hash to match {range_cut} slice of file payload" + @allure.title("Validate native object API get_range") + @pytest.mark.sanity + @pytest.mark.grpc_api + def test_object_get_range( + self, request: FixtureRequest, storage_objects: list[StorageObjectInfo] + ): + """ + Validate get_range for object by common gRPC API + """ + allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}") -@allure.title("Validate native object API get_range negative cases") -@pytest.mark.sanity -@pytest.mark.grpc_api -def test_object_get_range_negatives( - client_shell: Shell, - request: FixtureRequest, - storage_objects: list[StorageObjectInfo], -): - """ - Validate get_range negative for object by common gRPC API - """ - allure.dynamic.title( - f"Validate native get_range negative object API for {request.node.callspec.id}" - ) + wallet = storage_objects[0].wallet_file_path + cid = storage_objects[0].cid + oids = [storage_object.oid for storage_object in storage_objects[:2]] + file_path = storage_objects[0].file_path + net_info = get_netmap_netinfo( + wallet, self.shell, endpoint=self.cluster.default_rpc_endpoint + ) + max_object_size = net_info["maximum_object_size"] - wallet = storage_objects[0].wallet_file_path - cid = storage_objects[0].cid - oids = [storage_object.oid for storage_object in storage_objects[:2]] - file_size = storage_objects[0].size + file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size) + logging.info(f"Ranges used in test {file_ranges_to_test}") - assert ( - RANGE_MIN_LEN < file_size - ), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})" + for range_start, range_end in file_ranges_to_test: + range_len = range_end - range_start + range_cut = f"{range_start}:{range_len}" + with allure.step(f"Get range ({range_cut})"): + for oid in oids: + _, range_content = get_range( + wallet, + cid, + oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + range_cut=range_cut, + ) + assert ( + get_file_content( + file_path, content_len=range_len, mode="rb", offset=range_start + ) + == range_content + ), f"Expected range content to match {range_cut} slice of file payload" - file_ranges_to_test = [ - # Offset is bigger than the file size, the length is small. - (file_size + 1, RANGE_MIN_LEN), - # Offset is ok, but offset+length is too big. - (file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2), - # Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid". - (RANGE_MIN_LEN, sys.maxsize * 2 + 1), - ] + @allure.title("Validate native object API get_range negative cases") + @pytest.mark.sanity + @pytest.mark.grpc_api + def test_object_get_range_negatives( + self, + request: FixtureRequest, + storage_objects: list[StorageObjectInfo], + ): + """ + Validate get_range negative for object by common gRPC API + """ + allure.dynamic.title( + f"Validate native get_range negative object API for {request.node.callspec.id}" + ) - for range_start, range_len in file_ranges_to_test: - range_cut = f"{range_start}:{range_len}" - with allure.step(f"Get range ({range_cut})"): - for oid in oids: - with pytest.raises(Exception, match=OUT_OF_RANGE): - get_range(wallet, cid, oid, shell=client_shell, range_cut=range_cut) + wallet = storage_objects[0].wallet_file_path + cid = storage_objects[0].cid + oids = [storage_object.oid for storage_object in storage_objects[:2]] + file_size = storage_objects[0].size - -@allure.title("Validate native object API get_range_hash negative cases") -@pytest.mark.sanity -@pytest.mark.grpc_api -def test_object_get_range_hash_negatives( - client_shell: Shell, - request: FixtureRequest, - storage_objects: list[StorageObjectInfo], -): - """ - Validate get_range_hash negative for object by common gRPC API - """ - allure.dynamic.title( - f"Validate native get_range_hash negative object API for {request.node.callspec.id}" - ) - - wallet = storage_objects[0].wallet_file_path - cid = storage_objects[0].cid - oids = [storage_object.oid for storage_object in storage_objects[:2]] - file_size = storage_objects[0].size - - assert ( - RANGE_MIN_LEN < file_size - ), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})" - - file_ranges_to_test = [ - # Offset is bigger than the file size, the length is small. - (file_size + 1, RANGE_MIN_LEN), - # Offset is ok, but offset+length is too big. - (file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2), - # Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid". - (RANGE_MIN_LEN, sys.maxsize * 2 + 1), - ] - - for range_start, range_len in file_ranges_to_test: - range_cut = f"{range_start}:{range_len}" - with allure.step(f"Get range ({range_cut})"): - for oid in oids: - with pytest.raises(Exception, match=OUT_OF_RANGE): - get_range_hash(wallet, cid, oid, shell=client_shell, range_cut=range_cut) - - -def check_header_is_presented(head_info: dict, object_header: dict) -> None: - for key_to_check, val_to_check in object_header.items(): assert ( - key_to_check in head_info["header"]["attributes"] - ), f"Key {key_to_check} is found in {head_object}" - assert head_info["header"]["attributes"].get(key_to_check) == str( - val_to_check - ), f"Value {val_to_check} is equal" + RANGE_MIN_LEN < file_size + ), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})" + + file_ranges_to_test = [ + # Offset is bigger than the file size, the length is small. + (file_size + 1, RANGE_MIN_LEN), + # Offset is ok, but offset+length is too big. + (file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2), + # Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid". + (RANGE_MIN_LEN, sys.maxsize * 2 + 1), + ] + + for range_start, range_len in file_ranges_to_test: + range_cut = f"{range_start}:{range_len}" + with allure.step(f"Get range ({range_cut})"): + for oid in oids: + with pytest.raises(Exception, match=OUT_OF_RANGE): + get_range( + wallet, + cid, + oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + range_cut=range_cut, + ) + + @allure.title("Validate native object API get_range_hash negative cases") + def test_object_get_range_hash_negatives( + self, + request: FixtureRequest, + storage_objects: list[StorageObjectInfo], + ): + """ + Validate get_range_hash negative for object by common gRPC API + """ + allure.dynamic.title( + f"Validate native get_range_hash negative object API for {request.node.callspec.id}" + ) + + wallet = storage_objects[0].wallet_file_path + cid = storage_objects[0].cid + oids = [storage_object.oid for storage_object in storage_objects[:2]] + file_size = storage_objects[0].size + + assert ( + RANGE_MIN_LEN < file_size + ), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})" + + file_ranges_to_test = [ + # Offset is bigger than the file size, the length is small. + (file_size + 1, RANGE_MIN_LEN), + # Offset is ok, but offset+length is too big. + (file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2), + # Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid". + (RANGE_MIN_LEN, sys.maxsize * 2 + 1), + ] + + for range_start, range_len in file_ranges_to_test: + range_cut = f"{range_start}:{range_len}" + with allure.step(f"Get range ({range_cut})"): + for oid in oids: + with pytest.raises(Exception, match=OUT_OF_RANGE): + get_range_hash( + wallet, + cid, + oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + range_cut=range_cut, + ) + + def check_header_is_presented(self, head_info: dict, object_header: dict) -> None: + for key_to_check, val_to_check in object_header.items(): + assert ( + key_to_check in head_info["header"]["attributes"] + ), f"Key {key_to_check} is found in {head_object}" + assert head_info["header"]["attributes"].get(key_to_check) == str( + val_to_check + ), f"Value {val_to_check} is equal" diff --git a/pytest_tests/testsuites/object/test_object_lifetime.py b/pytest_tests/testsuites/object/test_object_lifetime.py index be87e16..81475c6 100644 --- a/pytest_tests/testsuites/object/test_object_lifetime.py +++ b/pytest_tests/testsuites/object/test_object_lifetime.py @@ -7,46 +7,52 @@ from container import create_container from epoch import get_epoch, tick_epoch from file_helper import generate_file, get_file_hash from grpc_responses import OBJECT_NOT_FOUND -from neofs_testlib.shell import Shell from pytest import FixtureRequest -from python_keywords.neofs_verbs import get_object, put_object +from python_keywords.neofs_verbs import get_object_from_random_node, put_object_to_random_node from utility import wait_for_gc_pass_on_storage_nodes +from steps.cluster_test_base import ClusterTestBase + logger = logging.getLogger("NeoLogger") -@allure.title("Test object life time") @pytest.mark.sanity @pytest.mark.grpc_api -@pytest.mark.parametrize( - "object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"] -) -def test_object_api_lifetime( - prepare_wallet_and_deposit: str, client_shell: Shell, request: FixtureRequest, object_size: int -): - """ - Test object deleted after expiration epoch. - """ - wallet = prepare_wallet_and_deposit - cid = create_container(wallet, shell=client_shell) +class ObjectApiLifetimeTest(ClusterTestBase): + @allure.title("Test object life time") + @pytest.mark.parametrize( + "object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"] + ) + def test_object_api_lifetime( + self, default_wallet: str, request: FixtureRequest, object_size: int + ): + """ + Test object deleted after expiration epoch. + """ - allure.dynamic.title(f"Test object life time for {request.node.callspec.id}") + allure.dynamic.title(f"Test object life time for {request.node.callspec.id}") - file_path = generate_file(object_size) - file_hash = get_file_hash(file_path) - epoch = get_epoch(shell=client_shell) + wallet = default_wallet + endpoint = self.cluster.default_rpc_endpoint + cid = create_container(wallet, self.shell, endpoint) - oid = put_object(wallet, file_path, cid, shell=client_shell, expire_at=epoch + 1) - got_file = get_object(wallet, cid, oid, shell=client_shell) - assert get_file_hash(got_file) == file_hash + file_path = generate_file(object_size) + file_hash = get_file_hash(file_path) + epoch = get_epoch(self.shell, self.cluster) - with allure.step("Tick two epochs"): - for _ in range(2): - tick_epoch(shell=client_shell) + oid = put_object_to_random_node( + wallet, file_path, cid, self.shell, self.cluster, expire_at=epoch + 1 + ) + got_file = get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster) + assert get_file_hash(got_file) == file_hash - # Wait for GC, because object with expiration is counted as alive until GC removes it - wait_for_gc_pass_on_storage_nodes() + with allure.step("Tick two epochs"): + for _ in range(2): + tick_epoch(self.shell, self.cluster) - with allure.step("Check object deleted because it expires-on epoch"): - with pytest.raises(Exception, match=OBJECT_NOT_FOUND): - get_object(wallet, cid, oid, shell=client_shell) + # Wait for GC, because object with expiration is counted as alive until GC removes it + wait_for_gc_pass_on_storage_nodes() + + with allure.step("Check object deleted because it expires-on epoch"): + with pytest.raises(Exception, match=OBJECT_NOT_FOUND): + get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster) diff --git a/pytest_tests/testsuites/object/test_object_lock.py b/pytest_tests/testsuites/object/test_object_lock.py index 590c499..b4379be 100755 --- a/pytest_tests/testsuites/object/test_object_lock.py +++ b/pytest_tests/testsuites/object/test_object_lock.py @@ -3,6 +3,8 @@ import re import allure import pytest +from cluster import Cluster +from cluster_test_base import ClusterTestBase from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, STORAGE_GC_TIME from complex_object_actions import get_link_object from container import create_container @@ -22,9 +24,11 @@ from python_keywords.neofs_verbs import delete_object, head_object, lock_object from test_control import expect_not_raises, wait_for_success from utility import parse_time, wait_for_gc_pass_on_storage_nodes +import steps from helpers.container import StorageContainer, StorageContainerInfo from helpers.storage_object_info import LockObjectInfo, StorageObjectInfo from helpers.wallet import WalletFactory, WalletFile +from steps.cluster_test_base import ClusterTestBase from steps.storage_object import delete_objects logger = logging.getLogger("NeoLogger") @@ -33,25 +37,6 @@ FIXTURE_LOCK_LIFETIME = 5 FIXTURE_OBJECT_LIFETIME = 10 -def get_storage_object_chunks(storage_object: StorageObjectInfo, shell: Shell): - with allure.step(f"Get complex object chunks (f{storage_object.oid})"): - split_object_id = get_link_object( - storage_object.wallet_file_path, - storage_object.cid, - storage_object.oid, - shell, - is_direct=False, - ) - head = head_object( - storage_object.wallet_file_path, storage_object.cid, split_object_id, shell - ) - - chunks_object_ids = [] - if "split" in head["header"] and "children" in head["header"]["split"]: - chunks_object_ids = head["header"]["split"]["children"] - return chunks_object_ids - - @pytest.fixture( scope="module", ) @@ -64,9 +49,11 @@ def user_wallet(wallet_factory: WalletFactory): @pytest.fixture( scope="module", ) -def user_container(user_wallet: WalletFile, client_shell: Shell): - container_id = create_container(user_wallet.path, shell=client_shell) - return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell) +def user_container(user_wallet: WalletFile, client_shell: Shell, cluster: Cluster): + container_id = create_container( + user_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint + ) + return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell, cluster) @pytest.fixture( @@ -75,10 +62,11 @@ def user_container(user_wallet: WalletFile, client_shell: Shell): def locked_storage_object( user_container: StorageContainer, client_shell: Shell, + cluster: Cluster, request: FixtureRequest, ): - with allure.step(f"Creating locked object"): - current_epoch = ensure_fresh_epoch(client_shell) + with allure.step("Creating locked object"): + current_epoch = ensure_fresh_epoch(client_shell, cluster) expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME storage_object = user_container.generate_object( @@ -89,6 +77,7 @@ def locked_storage_object( storage_object.cid, storage_object.oid, client_shell, + cluster.default_rpc_endpoint, lifetime=FIXTURE_LOCK_LIFETIME, ) storage_object.locks = [ @@ -99,20 +88,21 @@ def locked_storage_object( yield storage_object - with allure.step(f"Delete created locked object"): - current_epoch = get_epoch(client_shell) + with allure.step("Delete created locked object"): + current_epoch = get_epoch(client_shell, cluster) epoch_diff = expiration_epoch - current_epoch + 1 if epoch_diff > 0: with allure.step(f"Tick {epoch_diff} epochs"): for _ in range(epoch_diff): - tick_epoch(client_shell) + tick_epoch(client_shell, cluster) try: delete_object( storage_object.wallet_file_path, storage_object.cid, storage_object.oid, client_shell, + cluster.default_rpc_endpoint, ) except Exception as ex: ex_message = str(ex) @@ -126,7 +116,30 @@ def locked_storage_object( @pytest.mark.sanity @pytest.mark.grpc_object_lock -class TestObjectLockWithGrpc: +class TestObjectLockWithGrpc(ClusterTestBase): + def get_storage_object_chunks(self, storage_object: StorageObjectInfo): + with allure.step(f"Get complex object chunks (f{storage_object.oid})"): + split_object_id = get_link_object( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + self.shell, + self.cluster.storage_nodes, + is_direct=False, + ) + head = head_object( + storage_object.wallet_file_path, + storage_object.cid, + split_object_id, + self.shell, + self.cluster.default_rpc_endpoint, + ) + + chunks_object_ids = [] + if "split" in head["header"] and "children" in head["header"]["split"]: + chunks_object_ids = head["header"]["split"]["children"] + return chunks_object_ids + @allure.title("Locked object should be protected from deletion") @pytest.mark.parametrize( "locked_storage_object", @@ -136,7 +149,6 @@ class TestObjectLockWithGrpc: ) def test_locked_object_cannot_be_deleted( self, - client_shell: Shell, request: FixtureRequest, locked_storage_object: StorageObjectInfo, ): @@ -152,7 +164,8 @@ class TestObjectLockWithGrpc: locked_storage_object.wallet_file_path, locked_storage_object.cid, locked_storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, ) @allure.title("Lock object itself should be protected from deletion") @@ -160,7 +173,6 @@ class TestObjectLockWithGrpc: @pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True) def test_lock_object_itself_cannot_be_deleted( self, - client_shell: Shell, locked_storage_object: StorageObjectInfo, ): """ @@ -171,14 +183,19 @@ class TestObjectLockWithGrpc: wallet_path = locked_storage_object.wallet_file_path with pytest.raises(Exception, match=LOCK_OBJECT_REMOVAL): - delete_object(wallet_path, lock_object.cid, lock_object.oid, client_shell) + delete_object( + wallet_path, + lock_object.cid, + lock_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + ) @allure.title("Lock object itself cannot be locked") # We operate with only lock object here so no complex object needed in this test @pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True) def test_lock_object_cannot_be_locked( self, - client_shell: Shell, locked_storage_object: StorageObjectInfo, ): """ @@ -189,7 +206,14 @@ class TestObjectLockWithGrpc: wallet_path = locked_storage_object.wallet_file_path with pytest.raises(Exception, match=LOCK_NON_REGULAR_OBJECT): - lock_object(wallet_path, lock_object_info.cid, lock_object_info.oid, client_shell, 1) + lock_object( + wallet_path, + lock_object_info.cid, + lock_object_info.oid, + self.shell, + self.cluster.default_rpc_endpoint, + 1, + ) @allure.title("Cannot lock object without lifetime and expire_at fields") # We operate with only lock object here so no complex object needed in this test @@ -207,7 +231,6 @@ class TestObjectLockWithGrpc: ) def test_cannot_lock_object_without_lifetime( self, - client_shell: Shell, locked_storage_object: StorageObjectInfo, wrong_lifetime: int, wrong_expire_at: int, @@ -228,7 +251,8 @@ class TestObjectLockWithGrpc: wallet_path, lock_object_info.cid, lock_object_info.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, lifetime=wrong_lifetime, expire_at=wrong_expire_at, ) @@ -239,7 +263,6 @@ class TestObjectLockWithGrpc: ) def test_expired_object_should_be_deleted_after_locks_are_expired( self, - client_shell: Shell, request: FixtureRequest, user_container: StorageContainer, object_size: int, @@ -251,7 +274,7 @@ class TestObjectLockWithGrpc: f"Expired object should be deleted after locks are expired for {request.node.callspec.id}" ) - current_epoch = ensure_fresh_epoch(client_shell) + current_epoch = self.ensure_fresh_epoch() storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1) with allure.step("Lock object for couple epochs"): @@ -259,20 +282,22 @@ class TestObjectLockWithGrpc: storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, lifetime=3, ) lock_object( storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, expire_at=current_epoch + 3, ) with allure.step("Check object is not deleted at expiration time"): - tick_epoch(client_shell) - tick_epoch(client_shell) + self.tick_epoch() + self.tick_epoch() # Must wait to ensure object is not deleted wait_for_gc_pass_on_storage_nodes() with expect_not_raises(): @@ -280,7 +305,8 @@ class TestObjectLockWithGrpc: storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, ) @wait_for_success(parse_time(STORAGE_GC_TIME)) @@ -290,11 +316,12 @@ class TestObjectLockWithGrpc: storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, ) with allure.step("Wait for object to be deleted after third epoch"): - tick_epoch(client_shell) + self.tick_epoch() check_object_not_found() @allure.title("Should be possible to lock multiple objects at once") @@ -305,7 +332,6 @@ class TestObjectLockWithGrpc: ) def test_should_be_possible_to_lock_multiple_objects_at_once( self, - client_shell: Shell, request: FixtureRequest, user_container: StorageContainer, object_size: int, @@ -317,7 +343,7 @@ class TestObjectLockWithGrpc: f"Should be possible to lock multiple objects at once for {request.node.callspec.id}" ) - current_epoch = ensure_fresh_epoch(client_shell) + current_epoch = ensure_fresh_epoch(self.shell, self.cluster) storage_objects: list[StorageObjectInfo] = [] with allure.step("Generate three objects"): @@ -330,7 +356,8 @@ class TestObjectLockWithGrpc: storage_objects[0].wallet_file_path, storage_objects[0].cid, ",".join([storage_object.oid for storage_object in storage_objects]), - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, expire_at=current_epoch + 1, ) @@ -341,15 +368,16 @@ class TestObjectLockWithGrpc: storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, ) with allure.step("Tick two epochs"): - tick_epoch(client_shell) - tick_epoch(client_shell) + self.tick_epoch() + self.tick_epoch() with expect_not_raises(): - delete_objects(storage_objects, client_shell) + delete_objects(storage_objects, self.shell, self.cluster) @allure.title("Already outdated lock should not be applied") @pytest.mark.parametrize( @@ -359,7 +387,6 @@ class TestObjectLockWithGrpc: ) def test_already_outdated_lock_should_not_be_applied( self, - client_shell: Shell, request: FixtureRequest, user_container: StorageContainer, object_size: int, @@ -371,7 +398,7 @@ class TestObjectLockWithGrpc: f"Already outdated lock should not be applied for {request.node.callspec.id}" ) - current_epoch = ensure_fresh_epoch(client_shell) + current_epoch = self.ensure_fresh_epoch() storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1) @@ -386,7 +413,8 @@ class TestObjectLockWithGrpc: storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, expire_at=expiration_epoch, ) @@ -399,7 +427,6 @@ class TestObjectLockWithGrpc: @expect_not_raises() def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object( self, - client_shell: Shell, request: FixtureRequest, user_container: StorageContainer, object_size: int, @@ -411,22 +438,27 @@ class TestObjectLockWithGrpc: f"After lock expiration with lifetime user should be able to delete object for {request.node.callspec.id}" ) - current_epoch = ensure_fresh_epoch(client_shell) + current_epoch = self.ensure_fresh_epoch() storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1) lock_object( storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, lifetime=1, ) - tick_epoch(client_shell) - - delete_object( - storage_object.wallet_file_path, storage_object.cid, storage_object.oid, client_shell - ) + self.tick_epoch() + with expect_not_raises(): + delete_object( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + ) @allure.title("After lock expiration with expire_at user should be able to delete object") @pytest.mark.parametrize( @@ -437,7 +469,6 @@ class TestObjectLockWithGrpc: @expect_not_raises() def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object( self, - client_shell: Shell, request: FixtureRequest, user_container: StorageContainer, object_size: int, @@ -449,7 +480,7 @@ class TestObjectLockWithGrpc: f"After lock expiration with expire_at user should be able to delete object for {request.node.callspec.id}" ) - current_epoch = ensure_fresh_epoch(client_shell) + current_epoch = self.ensure_fresh_epoch() storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5) @@ -457,15 +488,21 @@ class TestObjectLockWithGrpc: storage_object.wallet_file_path, storage_object.cid, storage_object.oid, - client_shell, + self.shell, + endpoint=self.cluster.default_rpc_endpoint, expire_at=current_epoch + 1, ) - tick_epoch(client_shell) + self.tick_epoch() - delete_object( - storage_object.wallet_file_path, storage_object.cid, storage_object.oid, client_shell - ) + with expect_not_raises(): + delete_object( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + ) @allure.title("Complex object chunks should also be protected from deletion") @pytest.mark.parametrize( @@ -476,14 +513,13 @@ class TestObjectLockWithGrpc: ) def test_complex_object_chunks_should_also_be_protected_from_deletion( self, - client_shell: Shell, locked_storage_object: StorageObjectInfo, ): """ Complex object chunks should also be protected from deletion """ - chunk_object_ids = get_storage_object_chunks(locked_storage_object, client_shell) + chunk_object_ids = self.get_storage_object_chunks(locked_storage_object) for chunk_object_id in chunk_object_ids: with allure.step(f"Try to delete chunk object {chunk_object_id}"): with pytest.raises(Exception, match=OBJECT_IS_LOCKED): @@ -491,7 +527,8 @@ class TestObjectLockWithGrpc: locked_storage_object.wallet_file_path, locked_storage_object.cid, chunk_object_id, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, ) @allure.title("Link object of complex object should also be protected from deletion") @@ -503,7 +540,6 @@ class TestObjectLockWithGrpc: ) def test_link_object_of_complex_object_should_also_be_protected_from_deletion( self, - client_shell: Shell, locked_storage_object: StorageObjectInfo, ): """ @@ -514,7 +550,8 @@ class TestObjectLockWithGrpc: locked_storage_object.wallet_file_path, locked_storage_object.cid, locked_storage_object.oid, - client_shell, + self.shell, + self.cluster.storage_nodes, is_direct=False, ) with allure.step(f"Try to delete link object {link_object_id}"): @@ -523,5 +560,6 @@ class TestObjectLockWithGrpc: locked_storage_object.wallet_file_path, locked_storage_object.cid, link_object_id, - client_shell, + self.shell, + self.cluster.default_rpc_endpoint, ) diff --git a/pytest_tests/testsuites/payment/test_balance.py b/pytest_tests/testsuites/payment/test_balance.py index ff4f6e4..6bebc9c 100644 --- a/pytest_tests/testsuites/payment/test_balance.py +++ b/pytest_tests/testsuites/payment/test_balance.py @@ -4,7 +4,8 @@ import os import allure import pytest import yaml -from common import FREE_STORAGE, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG +from cluster_test_base import ClusterTestBase +from common import FREE_STORAGE, NEOFS_CLI_EXEC, WALLET_CONFIG from neofs_testlib.cli import NeofsCli from neofs_testlib.shell import CommandResult, Shell from wallet import WalletFactory, WalletFile @@ -16,7 +17,7 @@ DEPOSIT_AMOUNT = 30 @pytest.mark.sanity @pytest.mark.payments @pytest.mark.skipif(FREE_STORAGE, reason="Test only works on public network with paid storage") -class TestBalanceAccounting: +class TestBalanceAccounting(ClusterTestBase): @pytest.fixture(scope="class") def main_wallet(self, wallet_factory: WalletFactory) -> WalletFile: return wallet_factory.create_wallet() @@ -61,7 +62,7 @@ class TestBalanceAccounting: def test_balance_wallet_address(self, main_wallet: WalletFile, cli: NeofsCli): result = cli.accounting.balance( wallet=main_wallet.path, - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=self.cluster.default_rpc_endpoint, address=main_wallet.get_address(), ) @@ -69,7 +70,9 @@ class TestBalanceAccounting: @allure.title("Test balance request with wallet only") def test_balance_wallet(self, main_wallet: WalletFile, cli: NeofsCli): - result = cli.accounting.balance(wallet=main_wallet.path, rpc_endpoint=NEOFS_ENDPOINT) + result = cli.accounting.balance( + wallet=main_wallet.path, rpc_endpoint=self.cluster.default_rpc_endpoint + ) self.check_amount(result) @allure.title("Test balance request with wallet and wrong address") @@ -79,14 +82,16 @@ class TestBalanceAccounting: with pytest.raises(Exception, match="address option must be specified and valid"): cli.accounting.balance( wallet=main_wallet.path, - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=self.cluster.default_rpc_endpoint, address=other_wallet.get_address(), ) @allure.title("Test balance request with config file") - def test_balance_api(self, prepare_tmp_dir: str, main_wallet: WalletFile, client_shell: Shell): + def test_balance_api(self, temp_directory: str, main_wallet: WalletFile, client_shell: Shell): config_file = self.write_api_config( - config_dir=prepare_tmp_dir, endpoint=NEOFS_ENDPOINT, wallet=main_wallet.path + config_dir=temp_directory, + endpoint=self.cluster.default_rpc_endpoint, + wallet=main_wallet.path, ) logger.info(f"Config with API endpoint: {config_file}") diff --git a/pytest_tests/testsuites/services/s3_gate/test_s3_gate.py b/pytest_tests/testsuites/services/s3_gate/test_s3_gate.py index 7401768..61dd264 100644 --- a/pytest_tests/testsuites/services/s3_gate/test_s3_gate.py +++ b/pytest_tests/testsuites/services/s3_gate/test_s3_gate.py @@ -4,6 +4,7 @@ from random import choice, choices import allure import pytest +from aws_cli_client import AwsCliClient from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE from epoch import tick_epoch from file_helper import ( @@ -22,7 +23,6 @@ from s3_helper import ( ) from steps import s3_gate_bucket, s3_gate_object -from steps.aws_cli_client import AwsCliClient from steps.s3_gate_base import TestS3GateBase logger = logging.getLogger("NeoLogger") @@ -39,7 +39,7 @@ def pytest_generate_tests(metafunc): @pytest.mark.s3_gate_base class TestS3Gate(TestS3GateBase): @allure.title("Test S3 Bucket API") - def test_s3_buckets(self, client_shell): + def test_s3_buckets(self): """ Test base S3 Bucket API (Create/List/Head/Delete). """ @@ -83,7 +83,7 @@ class TestS3Gate(TestS3GateBase): with allure.step(f"Delete empty bucket {bucket_2}"): s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_2) - tick_epoch(shell=client_shell) + tick_epoch(self.shell, self.cluster) with allure.step(f"Check bucket {bucket_2} deleted"): with pytest.raises(Exception, match=r".*Not Found.*"): @@ -99,7 +99,7 @@ class TestS3Gate(TestS3GateBase): with allure.step(f"Delete bucket {bucket_1}"): s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1) - tick_epoch(shell=client_shell) + tick_epoch(self.shell, self.cluster) with allure.step(f"Check bucket {bucket_1} deleted"): with pytest.raises(Exception, match=r".*Not Found.*"): diff --git a/pytest_tests/testsuites/services/s3_gate/test_s3_object.py b/pytest_tests/testsuites/services/s3_gate/test_s3_object.py index 19f9450..5d265d6 100644 --- a/pytest_tests/testsuites/services/s3_gate/test_s3_object.py +++ b/pytest_tests/testsuites/services/s3_gate/test_s3_object.py @@ -6,6 +6,7 @@ from random import choices, sample import allure import pytest +from aws_cli_client import AwsCliClient from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE, WALLET_PASS from data_formatters import get_wallet_public_key from file_helper import concat_files, generate_file, generate_file_with_content, get_file_hash @@ -14,7 +15,6 @@ from python_keywords.payment_neogo import deposit_gas, transfer_gas from s3_helper import assert_object_lock_mode, check_objects_in_bucket, set_bucket_versioning from steps import s3_gate_bucket, s3_gate_object -from steps.aws_cli_client import AwsCliClient from steps.s3_gate_base import TestS3GateBase @@ -653,23 +653,26 @@ class TestS3GateObject(TestS3GateBase): ], "Tags must be the same" @pytest.fixture - def prepare_two_wallets(self, prepare_wallet_and_deposit, client_shell): - self.main_wallet = prepare_wallet_and_deposit + def prepare_two_wallets(self, default_wallet, client_shell): + self.main_wallet = default_wallet self.main_public_key = get_wallet_public_key(self.main_wallet, WALLET_PASS) self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") init_wallet(self.other_wallet, WALLET_PASS) self.other_public_key = get_wallet_public_key(self.other_wallet, WALLET_PASS) if not FREE_STORAGE: + main_chain = self.cluster.main_chain_nodes[0] deposit = 30 transfer_gas( shell=client_shell, amount=deposit + 1, + main_chain=main_chain, wallet_to_path=self.other_wallet, wallet_to_password=WALLET_PASS, ) deposit_gas( shell=client_shell, + main_chain=main_chain, amount=deposit, wallet_from_path=self.other_wallet, wallet_from_password=WALLET_PASS, @@ -906,9 +909,9 @@ class TestS3GateObject(TestS3GateBase): # ], "Permission for all groups is FULL_CONTROL" @allure.title("Test S3 Put 10 nested level object") - def test_s3_put_10_folder(self, bucket, prepare_tmp_dir): + def test_s3_put_10_folder(self, bucket, temp_directory): path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)]) - file_path_1 = os.path.join(prepare_tmp_dir, path, "test_file_1") + file_path_1 = os.path.join(temp_directory, path, "test_file_1") generate_file_with_content(file_path=file_path_1) file_name = self.object_key_from_file_path(file_path_1) objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket) diff --git a/pytest_tests/testsuites/services/s3_gate/test_s3_policy.py b/pytest_tests/testsuites/services/s3_gate/test_s3_policy.py index 08e9c29..a70f947 100644 --- a/pytest_tests/testsuites/services/s3_gate/test_s3_policy.py +++ b/pytest_tests/testsuites/services/s3_gate/test_s3_policy.py @@ -35,7 +35,7 @@ def pytest_generate_tests(metafunc): @pytest.mark.s3_gate class TestS3GatePolicy(TestS3GateBase): @allure.title("Test S3: Verify bucket creation with retention policy applied") - def test_s3_bucket_location(self, client_shell): + def test_s3_bucket_location(self): file_path_1 = generate_file() file_name_1 = object_key_from_file_path(file_path_1) file_path_2 = generate_file() @@ -72,14 +72,26 @@ class TestS3GatePolicy(TestS3GateBase): assert bucket_loc_2 == "rep-3" with allure.step("Check object policy"): - cid_1 = search_container_by_name(self.wallet, bucket_1, shell=client_shell) + cid_1 = search_container_by_name( + self.wallet, bucket_1, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) copies_1 = get_simple_object_copies( - wallet=self.wallet, cid=cid_1, oid=version_id_1, shell=client_shell + wallet=self.wallet, + cid=cid_1, + oid=version_id_1, + shell=self.shell, + nodes=self.cluster.storage_nodes, ) assert copies_1 == 1 - cid_2 = search_container_by_name(self.wallet, bucket_2, shell=client_shell) + cid_2 = search_container_by_name( + self.wallet, bucket_2, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) copies_2 = get_simple_object_copies( - wallet=self.wallet, cid=cid_2, oid=version_id_2, shell=client_shell + wallet=self.wallet, + cid=cid_2, + oid=version_id_2, + shell=self.shell, + nodes=self.cluster.storage_nodes, ) assert copies_2 == 3 diff --git a/pytest_tests/testsuites/services/test_http_gate.py b/pytest_tests/testsuites/services/test_http_gate.py index f89ac8a..0b3b9f0 100644 --- a/pytest_tests/testsuites/services/test_http_gate.py +++ b/pytest_tests/testsuites/services/test_http_gate.py @@ -1,6 +1,6 @@ import logging import os -from random import choice +import random from time import sleep import allure @@ -9,7 +9,6 @@ from common import COMPLEX_OBJ_SIZE from container import create_container from epoch import get_epoch, tick_epoch from file_helper import generate_file, get_file_hash -from neofs_testlib.shell import Shell from python_keywords.http_gate import ( get_via_http_curl, get_via_http_gate, @@ -18,11 +17,13 @@ from python_keywords.http_gate import ( upload_via_http_gate, upload_via_http_gate_curl, ) -from python_keywords.neofs_verbs import get_object, put_object +from python_keywords.neofs_verbs import get_object, put_object_to_random_node from python_keywords.storage_policy import get_nodes_without_object from utility import wait_for_gc_pass_on_storage_nodes from wellknown_acl import PUBLIC_ACL +from steps.cluster_test_base import ClusterTestBase + logger = logging.getLogger("NeoLogger") OBJECT_NOT_FOUND_ERROR = "not found" @@ -39,17 +40,17 @@ OBJECT_UPLOAD_DELAY = 10 @allure.link("https://github.com/nspcc-dev/neofs-http-gw#downloading", name="downloading") @pytest.mark.sanity @pytest.mark.http_gate -class TestHttpGate: +class TestHttpGate(ClusterTestBase): PLACEMENT_RULE_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" PLACEMENT_RULE_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" @pytest.fixture(scope="class", autouse=True) @allure.title("[Class/Autouse]: Prepare wallet and deposit") - def prepare_wallet(self, prepare_wallet_and_deposit): - TestHttpGate.wallet = prepare_wallet_and_deposit + def prepare_wallet(self, default_wallet): + TestHttpGate.wallet = default_wallet @allure.title("Test Put over gRPC, Get over HTTP") - def test_put_grpc_get_http(self, client_shell): + def test_put_grpc_get_http(self): """ Test that object can be put using gRPC interface and get using HTTP. @@ -65,26 +66,38 @@ class TestHttpGate: Hashes must be the same. """ cid = create_container( - self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_1, basic_acl=PUBLIC_ACL + self.wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=self.PLACEMENT_RULE_1, + basic_acl=PUBLIC_ACL, ) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) with allure.step("Put objects using gRPC"): - oid_simple = put_object( - wallet=self.wallet, path=file_path_simple, cid=cid, shell=client_shell + oid_simple = put_object_to_random_node( + wallet=self.wallet, + path=file_path_simple, + cid=cid, + shell=self.shell, + cluster=self.cluster, ) - oid_large = put_object( - wallet=self.wallet, path=file_path_large, cid=cid, shell=client_shell + oid_large = put_object_to_random_node( + wallet=self.wallet, + path=file_path_large, + cid=cid, + shell=self.shell, + cluster=self.cluster, ) for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): - self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid, shell=client_shell) + self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid) @allure.link("https://github.com/nspcc-dev/neofs-http-gw#uploading", name="uploading") @allure.link("https://github.com/nspcc-dev/neofs-http-gw#downloading", name="downloading") @allure.title("Test Put over HTTP, Get over HTTP") @pytest.mark.smoke - def test_put_http_get_http(self, client_shell): + def test_put_http_get_http(self): """ Test that object can be put and get using HTTP interface. @@ -98,16 +111,24 @@ class TestHttpGate: Hashes must be the same. """ cid = create_container( - self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL + self.wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=self.PLACEMENT_RULE_2, + basic_acl=PUBLIC_ACL, ) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) with allure.step("Put objects using HTTP"): - oid_simple = upload_via_http_gate(cid=cid, path=file_path_simple) - oid_large = upload_via_http_gate(cid=cid, path=file_path_large) + oid_simple = upload_via_http_gate( + cid=cid, path=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint + ) + oid_large = upload_via_http_gate( + cid=cid, path=file_path_large, endpoint=self.cluster.default_http_gate_endpoint + ) for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): - self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid, shell=client_shell) + self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid) @allure.link( "https://github.com/nspcc-dev/neofs-http-gw#by-attributes", name="download by attributes" @@ -122,7 +143,7 @@ class TestHttpGate: ], ids=["simple", "hyphen", "percent"], ) - def test_put_http_get_http_with_headers(self, client_shell, attributes: dict): + def test_put_http_get_http_with_headers(self, attributes: dict): """ Test that object can be downloaded using different attributes in HTTP header. @@ -136,43 +157,63 @@ class TestHttpGate: Hashes must be the same. """ cid = create_container( - self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL + self.wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=self.PLACEMENT_RULE_2, + basic_acl=PUBLIC_ACL, ) file_path = generate_file() with allure.step("Put objects using HTTP with attribute"): headers = self._attr_into_header(attributes) - oid = upload_via_http_gate(cid=cid, path=file_path, headers=headers) + oid = upload_via_http_gate( + cid=cid, + path=file_path, + headers=headers, + endpoint=self.cluster.default_http_gate_endpoint, + ) sleep(OBJECT_UPLOAD_DELAY) self.get_object_by_attr_and_verify_hashes(oid, file_path, cid, attributes) @allure.title("Test Expiration-Epoch in HTTP header") - def test_expiration_epoch_in_http(self, client_shell): + def test_expiration_epoch_in_http(self): + endpoint = self.cluster.default_rpc_endpoint + http_endpoint = self.cluster.default_http_gate_endpoint + cid = create_container( - self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL + self.wallet, + shell=self.shell, + endpoint=endpoint, + rule=self.PLACEMENT_RULE_2, + basic_acl=PUBLIC_ACL, ) file_path = generate_file() oids = [] - curr_epoch = get_epoch(client_shell) + curr_epoch = get_epoch(self.shell, self.cluster) epochs = (curr_epoch, curr_epoch + 1, curr_epoch + 2, curr_epoch + 100) for epoch in epochs: headers = {"X-Attribute-Neofs-Expiration-Epoch": str(epoch)} with allure.step("Put objects using HTTP with attribute Expiration-Epoch"): - oids.append(upload_via_http_gate(cid=cid, path=file_path, headers=headers)) + oids.append( + upload_via_http_gate( + cid=cid, path=file_path, headers=headers, endpoint=http_endpoint + ) + ) assert len(oids) == len(epochs), "Expected all objects have been put successfully" with allure.step("All objects can be get"): for oid in oids: - get_via_http_gate(cid=cid, oid=oid) + get_via_http_gate(cid=cid, oid=oid, endpoint=http_endpoint) for expired_objects, not_expired_objects in [(oids[:1], oids[1:]), (oids[:2], oids[2:])]: - tick_epoch(shell=client_shell) + tick_epoch(self.shell, self.cluster) # Wait for GC, because object with expiration is counted as alive until GC removes it wait_for_gc_pass_on_storage_nodes() @@ -184,12 +225,16 @@ class TestHttpGate: with allure.step("Other objects can be get"): for oid in not_expired_objects: - get_via_http_gate(cid=cid, oid=oid) + get_via_http_gate(cid=cid, oid=oid, endpoint=http_endpoint) @allure.title("Test Zip in HTTP header") - def test_zip_in_http(self, client_shell): + def test_zip_in_http(self): cid = create_container( - self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL + self.wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=self.PLACEMENT_RULE_2, + basic_acl=PUBLIC_ACL, ) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) common_prefix = "my_files" @@ -197,12 +242,24 @@ class TestHttpGate: headers1 = {"X-Attribute-FilePath": f"{common_prefix}/file1"} headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"} - upload_via_http_gate(cid=cid, path=file_path_simple, headers=headers1) - upload_via_http_gate(cid=cid, path=file_path_large, headers=headers2) + upload_via_http_gate( + cid=cid, + path=file_path_simple, + headers=headers1, + endpoint=self.cluster.default_http_gate_endpoint, + ) + upload_via_http_gate( + cid=cid, + path=file_path_large, + headers=headers2, + endpoint=self.cluster.default_http_gate_endpoint, + ) sleep(OBJECT_UPLOAD_DELAY) - dir_path = get_via_zip_http_gate(cid=cid, prefix=common_prefix) + dir_path = get_via_zip_http_gate( + cid=cid, prefix=common_prefix, endpoint=self.cluster.default_http_gate_endpoint + ) with allure.step("Verify hashes"): assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple) @@ -210,45 +267,63 @@ class TestHttpGate: @pytest.mark.long @allure.title("Test Put over HTTP/Curl, Get over HTTP/Curl for large object") - def test_put_http_get_http_large_file(self, client_shell): + def test_put_http_get_http_large_file(self): """ This test checks upload and download using curl with 'large' object. Large is object with size up to 20Mb. """ cid = create_container( - self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL + self.wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=self.PLACEMENT_RULE_2, + basic_acl=PUBLIC_ACL, ) obj_size = int(os.getenv("BIG_OBJ_SIZE", COMPLEX_OBJ_SIZE)) file_path = generate_file(obj_size) with allure.step("Put objects using HTTP"): - oid_gate = upload_via_http_gate(cid=cid, path=file_path) - oid_curl = upload_via_http_gate_curl(cid=cid, filepath=file_path, large_object=True) + oid_gate = upload_via_http_gate( + cid=cid, path=file_path, endpoint=self.cluster.default_http_gate_endpoint + ) + oid_curl = upload_via_http_gate_curl( + cid=cid, + filepath=file_path, + large_object=True, + endpoint=self.cluster.default_http_gate_endpoint, + ) - self.get_object_and_verify_hashes(oid_gate, file_path, self.wallet, cid, shell=client_shell) + self.get_object_and_verify_hashes(oid_gate, file_path, self.wallet, cid) self.get_object_and_verify_hashes( oid_curl, file_path, self.wallet, cid, - shell=client_shell, object_getter=get_via_http_curl, ) @allure.title("Test Put/Get over HTTP using Curl utility") - def test_put_http_get_http_curl(self, client_shell): + def test_put_http_get_http_curl(self): """ Test checks upload and download over HTTP using curl utility. """ cid = create_container( - self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL + self.wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=self.PLACEMENT_RULE_2, + basic_acl=PUBLIC_ACL, ) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) with allure.step("Put objects using curl utility"): - oid_simple = upload_via_http_gate_curl(cid=cid, filepath=file_path_simple) - oid_large = upload_via_http_gate_curl(cid=cid, filepath=file_path_large) + oid_simple = upload_via_http_gate_curl( + cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint + ) + oid_large = upload_via_http_gate_curl( + cid=cid, filepath=file_path_large, endpoint=self.cluster.default_http_gate_endpoint + ) for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): self.get_object_and_verify_hashes( @@ -256,45 +331,57 @@ class TestHttpGate: file_path, self.wallet, cid, - shell=client_shell, object_getter=get_via_http_curl, ) - @staticmethod @allure.step("Try to get object and expect error") - def try_to_get_object_and_expect_error(cid: str, oid: str, error_pattern: str) -> None: + def try_to_get_object_and_expect_error(self, cid: str, oid: str, error_pattern: str) -> None: try: - get_via_http_gate(cid=cid, oid=oid) + get_via_http_gate(cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() assert match, f"Expected {err} to match {error_pattern}" - @staticmethod @allure.step("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( - oid: str, file_name: str, cid: str, attrs: dict + self, oid: str, file_name: str, cid: str, attrs: dict ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid) - got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs) + got_file_path_http = get_via_http_gate( + cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint + ) + got_file_path_http_attr = get_via_http_gate_by_attribute( + cid=cid, attribute=attrs, endpoint=self.cluster.default_http_gate_endpoint + ) TestHttpGate._assert_hashes_are_equal( file_name, got_file_path_http, got_file_path_http_attr ) - @staticmethod @allure.step("Verify object can be get using HTTP") def get_object_and_verify_hashes( - oid: str, file_name: str, wallet: str, cid: str, shell: Shell, object_getter=None + self, oid: str, file_name: str, wallet: str, cid: str, object_getter=None ) -> None: - nodes = get_nodes_without_object(wallet=wallet, cid=cid, oid=oid, shell=shell) - random_node = choice(nodes) + nodes = get_nodes_without_object( + wallet=wallet, + cid=cid, + oid=oid, + shell=self.shell, + nodes=self.cluster.storage_nodes, + ) + random_node = random.choice(nodes) object_getter = object_getter or get_via_http_gate got_file_path = get_object( - wallet=wallet, cid=cid, oid=oid, shell=shell, endpoint=random_node + wallet=wallet, + cid=cid, + oid=oid, + shell=self.shell, + endpoint=random_node.get_rpc_endpoint(), + ) + got_file_path_http = object_getter( + cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint ) - got_file_path_http = object_getter(cid=cid, oid=oid) TestHttpGate._assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) diff --git a/pytest_tests/testsuites/session_token/test_object_session_token.py b/pytest_tests/testsuites/session_token/test_object_session_token.py index a1a3569..13f91d4 100644 --- a/pytest_tests/testsuites/session_token/test_object_session_token.py +++ b/pytest_tests/testsuites/session_token/test_object_session_token.py @@ -2,131 +2,146 @@ import random import allure import pytest -from common import COMPLEX_OBJ_SIZE, NEOFS_NETMAP_DICT, SIMPLE_OBJ_SIZE, WALLET_PASS +from cluster_test_base import ClusterTestBase +from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, WALLET_PASS from file_helper import generate_file from grpc_responses import SESSION_NOT_FOUND -from neofs_testlib.shell import Shell from neofs_testlib.utils.wallet import get_last_address_from_wallet from python_keywords.container import create_container -from python_keywords.neofs_verbs import delete_object, put_object +from python_keywords.neofs_verbs import delete_object, put_object, put_object_to_random_node from steps.session_token import create_session_token -@allure.title("Test Object Operations with Session Token") @pytest.mark.sanity @pytest.mark.session_token -@pytest.mark.parametrize( - "object_size", - [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], - ids=["simple object", "complex object"], -) -def test_object_session_token(prepare_wallet_and_deposit, client_shell: Shell, object_size): - """ - Test how operations over objects are executed with a session token +class TestDynamicObjectSession(ClusterTestBase): + @allure.title("Test Object Operations with Session Token") + @pytest.mark.parametrize( + "object_size", + [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], + ids=["simple object", "complex object"], + ) + def test_object_session_token(self, default_wallet, object_size): + """ + Test how operations over objects are executed with a session token - Steps: - 1. Create a private container - 2. Obj operation requests to the node which IS NOT in the container but granted - with a session token - 3. Obj operation requests to the node which IS in the container and NOT granted - with a session token - 4. Obj operation requests to the node which IS NOT in the container and NOT granted - with a session token - """ + Steps: + 1. Create a private container + 2. Obj operation requests to the node which IS NOT in the container but granted + with a session token + 3. Obj operation requests to the node which IS in the container and NOT granted + with a session token + 4. Obj operation requests to the node which IS NOT in the container and NOT granted + with a session token + """ - with allure.step("Init wallet"): - wallet = prepare_wallet_and_deposit - address = get_last_address_from_wallet(wallet, "") + with allure.step("Init wallet"): + wallet = default_wallet + address = get_last_address_from_wallet(wallet, "") - with allure.step("Nodes Settlements"): - ( - session_token_node_name, - container_node_name, - noncontainer_node_name, - ) = random.sample(list(NEOFS_NETMAP_DICT.keys()), 3) - session_token_node = NEOFS_NETMAP_DICT[session_token_node_name]["rpc"] - container_node = NEOFS_NETMAP_DICT[container_node_name]["rpc"] - noncontainer_node = NEOFS_NETMAP_DICT[noncontainer_node_name]["rpc"] + with allure.step("Nodes Settlements"): + ( + session_token_node, + container_node, + non_container_node, + ) = random.sample(self.cluster.storage_nodes, 3) - with allure.step("Create Session Token"): - session_token = create_session_token( - shell=client_shell, - owner=address, - wallet_path=wallet, - wallet_password=WALLET_PASS, - rpc_endpoint=session_token_node, - ) + with allure.step("Create Session Token"): + session_token = create_session_token( + shell=self.shell, + owner=address, + wallet_path=wallet, + wallet_password=WALLET_PASS, + rpc_endpoint=session_token_node.get_rpc_endpoint(), + ) - with allure.step("Create Private Container"): - un_locode = NEOFS_NETMAP_DICT[container_node_name]["UN-LOCODE"] - locode = "SPB" if un_locode == "RU LED" else un_locode.split()[1] - placement_policy = ( - f"REP 1 IN LOC_{locode}_PLACE CBF 1 SELECT 1 FROM LOC_{locode} " - f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" ' - f'EQ "{un_locode}" AS LOC_{locode}' - ) - cid = create_container(wallet, shell=client_shell, rule=placement_policy) + with allure.step("Create Private Container"): + un_locode = container_node.get_un_locode() + locode = "SPB" if un_locode == "RU LED" else un_locode.split()[1] + placement_policy = ( + f"REP 1 IN LOC_{locode}_PLACE CBF 1 SELECT 1 FROM LOC_{locode} " + f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" ' + f'EQ "{un_locode}" AS LOC_{locode}' + ) + cid = create_container( + wallet, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + rule=placement_policy, + ) - with allure.step("Put Objects"): - file_path = generate_file(object_size) - oid = put_object(wallet=wallet, path=file_path, cid=cid, shell=client_shell) - oid_delete = put_object(wallet=wallet, path=file_path, cid=cid, shell=client_shell) + with allure.step("Put Objects"): + file_path = generate_file(object_size) + oid = put_object_to_random_node( + wallet=wallet, + path=file_path, + cid=cid, + shell=self.shell, + cluster=self.cluster, + ) + oid_delete = put_object_to_random_node( + wallet=wallet, + path=file_path, + cid=cid, + shell=self.shell, + cluster=self.cluster, + ) - with allure.step("Node not in container but granted a session token"): - put_object( - wallet=wallet, - path=file_path, - cid=cid, - shell=client_shell, - endpoint=session_token_node, - session=session_token, - ) - delete_object( - wallet=wallet, - cid=cid, - oid=oid_delete, - shell=client_shell, - endpoint=session_token_node, - session=session_token, - ) - - with allure.step("Node in container and not granted a session token"): - with pytest.raises(Exception, match=SESSION_NOT_FOUND): + with allure.step("Node not in container but granted a session token"): put_object( wallet=wallet, path=file_path, cid=cid, - shell=client_shell, - endpoint=container_node, + shell=self.shell, + endpoint=session_token_node.get_rpc_endpoint(), session=session_token, ) - with pytest.raises(Exception, match=SESSION_NOT_FOUND): delete_object( wallet=wallet, cid=cid, - oid=oid, - shell=client_shell, - endpoint=container_node, + oid=oid_delete, + shell=self.shell, + endpoint=session_token_node.get_rpc_endpoint(), session=session_token, ) - with allure.step("Node not in container and not granted a session token"): - with pytest.raises(Exception, match=SESSION_NOT_FOUND): - put_object( - wallet=wallet, - path=file_path, - cid=cid, - shell=client_shell, - endpoint=noncontainer_node, - session=session_token, - ) - with pytest.raises(Exception, match=SESSION_NOT_FOUND): - delete_object( - wallet=wallet, - cid=cid, - oid=oid, - shell=client_shell, - endpoint=noncontainer_node, - session=session_token, - ) + with allure.step("Node in container and not granted a session token"): + with pytest.raises(Exception, match=SESSION_NOT_FOUND): + put_object( + wallet=wallet, + path=file_path, + cid=cid, + shell=self.shell, + endpoint=container_node.get_rpc_endpoint(), + session=session_token, + ) + with pytest.raises(Exception, match=SESSION_NOT_FOUND): + delete_object( + wallet=wallet, + cid=cid, + oid=oid, + shell=self.shell, + endpoint=container_node.get_rpc_endpoint(), + session=session_token, + ) + + with allure.step("Node not in container and not granted a session token"): + with pytest.raises(Exception, match=SESSION_NOT_FOUND): + put_object( + wallet=wallet, + path=file_path, + cid=cid, + shell=self.shell, + endpoint=non_container_node.get_rpc_endpoint(), + session=session_token, + ) + with pytest.raises(Exception, match=SESSION_NOT_FOUND): + delete_object( + wallet=wallet, + cid=cid, + oid=oid, + shell=self.shell, + endpoint=non_container_node.get_rpc_endpoint(), + session=session_token, + ) diff --git a/pytest_tests/testsuites/session_token/test_static_object_session_token.py b/pytest_tests/testsuites/session_token/test_static_object_session_token.py index 3f6ce97..d7c145b 100644 --- a/pytest_tests/testsuites/session_token/test_static_object_session_token.py +++ b/pytest_tests/testsuites/session_token/test_static_object_session_token.py @@ -2,8 +2,10 @@ import logging import allure import pytest +from cluster import Cluster +from cluster_test_base import ClusterTestBase from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE -from epoch import ensure_fresh_epoch, tick_epoch +from epoch import ensure_fresh_epoch from file_helper import generate_file from grpc_responses import MALFORMED_REQUEST, OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND from neofs_testlib.shell import Shell @@ -13,10 +15,11 @@ from python_keywords.neofs_verbs import ( delete_object, get_netmap_netinfo, get_object, + get_object_from_random_node, get_range, get_range_hash, head_object, - put_object, + put_object_to_random_node, search_object, ) from wallet import WalletFile @@ -42,23 +45,29 @@ RANGE_OFFSET_FOR_COMPLEX_OBJECT = 200 @pytest.fixture(scope="module") -def storage_containers(owner_wallet: WalletFile, client_shell: Shell) -> list[str]: - # Separate containers for complex/simple objects to avoid side-effects - cid = create_container(owner_wallet.path, shell=client_shell) - other_cid = create_container(owner_wallet.path, shell=client_shell) +def storage_containers( + owner_wallet: WalletFile, client_shell: Shell, cluster: Cluster +) -> list[str]: + cid = create_container( + owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint + ) + other_cid = create_container( + owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint + ) yield [cid, other_cid] @pytest.fixture( params=[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"], - # Scope session to upload/delete each files set only once + # Scope module to upload/delete each files set only once scope="module", ) def storage_objects( owner_wallet: WalletFile, client_shell: Shell, storage_containers: list[str], + cluster: Cluster, request: FixtureRequest, ) -> list[StorageObjectInfo]: @@ -68,11 +77,12 @@ def storage_objects( with allure.step("Put objects"): # upload couple objects for _ in range(3): - storage_object_id = put_object( + storage_object_id = put_object_to_random_node( wallet=owner_wallet.path, path=file_path, cid=storage_containers[0], shell=client_shell, + cluster=cluster, ) storage_object = StorageObjectInfo(storage_containers[0], storage_object_id) @@ -84,18 +94,18 @@ def storage_objects( yield storage_objects # Teardown after all tests done with current param - delete_objects(storage_objects, client_shell) + delete_objects(storage_objects, client_shell, cluster) @allure.step("Get ranges for test") -def get_ranges(storage_object: StorageObjectInfo, shell: Shell) -> list[str]: +def get_ranges(storage_object: StorageObjectInfo, shell: Shell, endpoint: str) -> list[str]: """ Returns ranges to test range/hash methods via static session """ object_size = storage_object.size if object_size == COMPLEX_OBJ_SIZE: - net_info = get_netmap_netinfo(storage_object.wallet_file_path, shell) + net_info = get_netmap_netinfo(storage_object.wallet_file_path, shell, endpoint) max_object_size = net_info["maximum_object_size"] # make sure to test multiple parts of complex object assert object_size >= max_object_size + RANGE_OFFSET_FOR_COMPLEX_OBJECT @@ -116,7 +126,7 @@ def static_sessions( storage_containers: list[str], storage_objects: list[StorageObjectInfo], client_shell: Shell, - prepare_tmp_dir: str, + temp_directory: str, ) -> dict[ObjectVerb, str]: """ Returns dict with static session token file paths for all verbs with default lifetime with @@ -130,545 +140,589 @@ def static_sessions( storage_objects[0:2], verb, client_shell, - prepare_tmp_dir, + temp_directory, ) for verb in ObjectVerb } -@allure.title("Validate static session with read operations") @pytest.mark.static_session -@pytest.mark.parametrize( - "method_under_test,verb", - [ - (head_object, ObjectVerb.HEAD), - (get_object, ObjectVerb.GET), - ], -) -def test_static_session_read( - user_wallet: WalletFile, - client_shell: Shell, - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - method_under_test, - verb: ObjectVerb, - request: FixtureRequest, -): - """ - Validate static session with read operations - """ - allure.dynamic.title( - f"Validate static session with read operations for {request.node.callspec.id}" +class TestObjectStaticSession(ClusterTestBase): + @allure.title("Validate static session with read operations") + @pytest.mark.parametrize( + "method_under_test,verb", + [ + (head_object, ObjectVerb.HEAD), + (get_object, ObjectVerb.GET), + ], ) - - for storage_object in storage_objects[0:2]: - method_under_test( - user_wallet.path, - storage_object.cid, - storage_object.oid, - client_shell, - session=static_sessions[verb], + def test_static_session_read( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + method_under_test, + verb: str, + request: FixtureRequest, + ): + """ + Validate static session with read operations + """ + allure.dynamic.title( + f"Validate static session with read operations for {request.node.callspec.id}" ) + for node in self.cluster.storage_nodes: + for storage_object in storage_objects[0:2]: + method_under_test( + user_wallet.path, + storage_object.cid, + storage_object.oid, + shell=self.shell, + endpoint=node.get_rpc_endpoint(), + session=static_sessions[verb], + ) -@allure.title("Validate static session with range operations") -@pytest.mark.static_session -@pytest.mark.parametrize( - "method_under_test,verb", - [(get_range, ObjectVerb.RANGE), (get_range_hash, ObjectVerb.RANGEHASH)], -) -def test_static_session_range( - user_wallet: WalletFile, - client_shell: Shell, - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - method_under_test, - verb: ObjectVerb, - request: FixtureRequest, -): - """ - Validate static session with range operations - """ - allure.dynamic.title( - f"Validate static session with range operations for {request.node.callspec.id}" + @allure.title("Validate static session with range operations") + @pytest.mark.static_session + @pytest.mark.parametrize( + "method_under_test,verb", + [(get_range, ObjectVerb.RANGE), (get_range_hash, ObjectVerb.RANGEHASH)], ) - storage_object = storage_objects[0] - ranges_to_test = get_ranges(storage_object, client_shell) + def test_static_session_range( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + method_under_test, + verb: str, + request: FixtureRequest, + ): + """ + Validate static session with range operations + """ + allure.dynamic.title( + f"Validate static session with range operations for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + ranges_to_test = get_ranges(storage_object, self.shell, self.cluster.default_rpc_endpoint) - for range_to_test in ranges_to_test: - with allure.step(f"Check range {range_to_test}"): - method_under_test( + for range_to_test in ranges_to_test: + with allure.step(f"Check range {range_to_test}"): + method_under_test( + user_wallet.path, + storage_object.cid, + storage_object.oid, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + session=static_sessions[verb], + range_cut=range_to_test, + ) + + @allure.title("Validate static session with search operation") + @pytest.mark.static_session + @pytest.mark.xfail + # (see https://github.com/nspcc-dev/neofs-node/issues/2030) + def test_static_session_search( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + request: FixtureRequest, + ): + """ + Validate static session with search operations + """ + allure.dynamic.title(f"Validate static session with search for {request.node.callspec.id}") + + cid = storage_objects[0].cid + expected_object_ids = [storage_object.oid for storage_object in storage_objects[0:2]] + actual_object_ids = search_object( + user_wallet.path, + cid, + self.shell, + endpoint=self.cluster.default_rpc_endpoint, + session=static_sessions[ObjectVerb.SEARCH], + root=True, + ) + assert expected_object_ids == actual_object_ids + + @allure.title("Validate static session with object id not in session") + @pytest.mark.static_session + def test_static_session_unrelated_object( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + request: FixtureRequest, + ): + """ + Validate static session with object id not in session + """ + allure.dynamic.title( + f"Validate static session with object id not in session for {request.node.callspec.id}" + ) + with pytest.raises(Exception, match=UNRELATED_OBJECT): + head_object( + user_wallet.path, + storage_objects[2].cid, + storage_objects[2].oid, + self.shell, + self.cluster.default_rpc_endpoint, + session=static_sessions[ObjectVerb.HEAD], + ) + + @allure.title("Validate static session with user id not in session") + @pytest.mark.static_session + def test_static_session_head_unrelated_user( + self, + stranger_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + request: FixtureRequest, + ): + """ + Validate static session with user id not in session + """ + allure.dynamic.title( + f"Validate static session with user id not in session for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + + with pytest.raises(Exception, match=UNRELATED_KEY): + head_object( + stranger_wallet.path, + storage_object.cid, + storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + session=static_sessions[ObjectVerb.HEAD], + ) + + @allure.title("Validate static session with wrong verb in session") + @pytest.mark.static_session + def test_static_session_head_wrong_verb( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + request: FixtureRequest, + ): + """ + Validate static session with wrong verb in session + """ + allure.dynamic.title( + f"Validate static session with wrong verb in session for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + + with pytest.raises(Exception, match=WRONG_VERB): + get_object( user_wallet.path, storage_object.cid, storage_object.oid, - shell=client_shell, - session=static_sessions[verb], - range_cut=range_to_test, + self.shell, + self.cluster.default_rpc_endpoint, + session=static_sessions[ObjectVerb.HEAD], ) + @allure.title("Validate static session with container id not in session") + @pytest.mark.static_session + def test_static_session_unrelated_container( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + storage_containers: list[str], + static_sessions: list[str], + request: FixtureRequest, + ): + """ + Validate static session with container id not in session + """ + allure.dynamic.title( + f"Validate static session with container id not in session for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] -@allure.title("Validate static session with search operation") -@pytest.mark.static_session -@pytest.mark.xfail -# (see https://github.com/nspcc-dev/neofs-node/issues/2030) -def test_static_session_search( - user_wallet: WalletFile, - client_shell: Shell, - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - request: FixtureRequest, -): - """ - Validate static session with search operations - """ - allure.dynamic.title(f"Validate static session with search for {request.node.callspec.id}") + with pytest.raises(Exception, match=UNRELATED_CONTAINER): + get_object_from_random_node( + user_wallet.path, + storage_containers[1], + storage_object.oid, + self.shell, + self.cluster, + session=static_sessions[ObjectVerb.GET], + ) - cid = storage_objects[0].cid - expected_object_ids = [storage_object.oid for storage_object in storage_objects[0:2]] - actual_object_ids = search_object( - user_wallet.path, cid, client_shell, session=static_sessions[ObjectVerb.SEARCH], root=True - ) - assert expected_object_ids == actual_object_ids + @allure.title("Validate static session which signed by another wallet") + @pytest.mark.static_session + def test_static_session_signed_by_other( + self, + owner_wallet: WalletFile, + user_wallet: WalletFile, + stranger_wallet: WalletFile, + storage_containers: list[int], + storage_objects: list[StorageObjectInfo], + temp_directory: str, + request: FixtureRequest, + ): + """ + Validate static session which signed by another wallet + """ + allure.dynamic.title( + f"Validate static session which signed by another wallet for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + session_token_file = generate_object_session_token( + owner_wallet, + user_wallet, + [storage_object.oid], + storage_containers[0], + ObjectVerb.HEAD, + temp_directory, + ) + signed_token_file = sign_session_token(self.shell, session_token_file, stranger_wallet) + with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): + head_object( + user_wallet.path, + storage_object.cid, + storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + session=signed_token_file, + ) + + @allure.title("Validate static session which signed for another container") + @pytest.mark.static_session + def test_static_session_signed_for_other_container( + self, + owner_wallet: WalletFile, + user_wallet: WalletFile, + storage_containers: list[str], + storage_objects: list[StorageObjectInfo], + temp_directory: str, + request: FixtureRequest, + ): + """ + Validate static session which signed for another container + """ + allure.dynamic.title( + f"Validate static session which signed for another container for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + container = storage_containers[1] + + session_token_file = generate_object_session_token( + owner_wallet, + user_wallet, + [storage_object.oid], + container, + ObjectVerb.HEAD, + temp_directory, + ) + signed_token_file = sign_session_token(self.shell, session_token_file, owner_wallet) + with pytest.raises(Exception, match=OBJECT_NOT_FOUND): + head_object( + user_wallet.path, + container, + storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + session=signed_token_file, + ) + + @allure.title("Validate static session which wasn't signed") + @pytest.mark.static_session + def test_static_session_without_sign( + self, + owner_wallet: WalletFile, + user_wallet: WalletFile, + storage_containers: list[str], + storage_objects: list[StorageObjectInfo], + temp_directory: str, + request: FixtureRequest, + ): + """ + Validate static session which wasn't signed + """ + allure.dynamic.title( + f"Validate static session which wasn't signed for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + + session_token_file = generate_object_session_token( + owner_wallet, + user_wallet, + [storage_object.oid], + storage_containers[0], + ObjectVerb.HEAD, + temp_directory, + ) + with pytest.raises(Exception, match=INVALID_SIGNATURE): + head_object( + user_wallet.path, + storage_object.cid, + storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + session=session_token_file, + ) + + @allure.title("Validate static session which expires at next epoch") + @pytest.mark.static_session + def test_static_session_expiration_at_next( + self, + owner_wallet: WalletFile, + user_wallet: WalletFile, + storage_containers: list[str], + storage_objects: list[StorageObjectInfo], + temp_directory: str, + request: FixtureRequest, + ): + """ + Validate static session which expires at next epoch + """ + allure.dynamic.title( + f"Validate static session which expires at next epoch for {request.node.callspec.id}" + ) + epoch = ensure_fresh_epoch(self.shell, self.cluster) + + container = storage_containers[0] + object_id = storage_objects[0].oid + expiration = Lifetime(epoch + 1, epoch, epoch) + + token_expire_at_next_epoch = get_object_signed_token( + owner_wallet, + user_wallet, + container, + storage_objects, + ObjectVerb.HEAD, + self.shell, + temp_directory, + expiration, + ) -@allure.title("Validate static session with object id not in session") -@pytest.mark.static_session -def test_static_session_unrelated_object( - user_wallet: WalletFile, - client_shell: Shell, - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - request: FixtureRequest, -): - """ - Validate static session with object id not in session - """ - allure.dynamic.title( - f"Validate static session with object id not in session for {request.node.callspec.id}" - ) - with pytest.raises(Exception, match=UNRELATED_OBJECT): head_object( user_wallet.path, - storage_objects[2].cid, - storage_objects[2].oid, - client_shell, - session=static_sessions[ObjectVerb.HEAD], + container, + object_id, + self.shell, + self.cluster.default_rpc_endpoint, + session=token_expire_at_next_epoch, ) + self.tick_epoch() -@allure.title("Validate static session with user id not in session") -@pytest.mark.static_session -def test_static_session_head_unrelated_user( - stranger_wallet: WalletFile, - client_shell: Shell, - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - request: FixtureRequest, -): - """ - Validate static session with user id not in session - """ - allure.dynamic.title( - f"Validate static session with user id not in session for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] + with pytest.raises(Exception, match=MALFORMED_REQUEST): + head_object( + user_wallet.path, + container, + object_id, + self.shell, + self.cluster.default_rpc_endpoint, + session=token_expire_at_next_epoch, + ) - with pytest.raises(Exception, match=UNRELATED_KEY): - head_object( - stranger_wallet.path, - storage_object.cid, - storage_object.oid, - client_shell, - session=static_sessions[ObjectVerb.HEAD], + @allure.title("Validate static session which is valid starting from next epoch") + @pytest.mark.static_session + def test_static_session_start_at_next( + self, + owner_wallet: WalletFile, + user_wallet: WalletFile, + storage_containers: list[str], + storage_objects: list[StorageObjectInfo], + temp_directory: str, + request: FixtureRequest, + ): + """ + Validate static session which is valid starting from next epoch + """ + allure.dynamic.title( + f"Validate static session which is valid starting from next epoch for {request.node.callspec.id}" + ) + epoch = ensure_fresh_epoch(self.shell, self.cluster) + + container = storage_containers[0] + object_id = storage_objects[0].oid + expiration = Lifetime(epoch + 2, epoch + 1, epoch) + + token_start_at_next_epoch = get_object_signed_token( + owner_wallet, + user_wallet, + container, + storage_objects, + ObjectVerb.HEAD, + self.shell, + temp_directory, + expiration, ) + with pytest.raises(Exception, match=MALFORMED_REQUEST): + head_object( + user_wallet.path, + container, + object_id, + self.shell, + self.cluster.default_rpc_endpoint, + session=token_start_at_next_epoch, + ) -@allure.title("Validate static session with wrong verb in session") -@pytest.mark.static_session -def test_static_session_head_wrong_verb( - user_wallet: WalletFile, - client_shell: Shell, - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - request: FixtureRequest, -): - """ - Validate static session with wrong verb in session - """ - allure.dynamic.title( - f"Validate static session with wrong verb in session for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] - - with pytest.raises(Exception, match=WRONG_VERB): - get_object( - user_wallet.path, - storage_object.cid, - storage_object.oid, - client_shell, - session=static_sessions[ObjectVerb.HEAD], - ) - - -@allure.title("Validate static session with container id not in session") -@pytest.mark.static_session -def test_static_session_unrelated_container( - owner_wallet: WalletFile, - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - request: FixtureRequest, -): - """ - Validate static session with container id not in session - """ - allure.dynamic.title( - f"Validate static session with container id not in session for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] - - with pytest.raises(Exception, match=UNRELATED_CONTAINER): - get_object( - user_wallet.path, - storage_containers[1], - storage_object.oid, - client_shell, - session=static_sessions[ObjectVerb.GET], - ) - - -@allure.title("Validate static session which signed by another wallet") -@pytest.mark.static_session -def test_static_session_signed_by_other( - owner_wallet: WalletFile, - user_wallet: WalletFile, - stranger_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - prepare_tmp_dir: str, - request: FixtureRequest, -): - """ - Validate static session which signed by another wallet - """ - allure.dynamic.title( - f"Validate static session which signed by another wallet for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] - - session_token_file = generate_object_session_token( - owner_wallet, - user_wallet, - [storage_object.oid], - storage_containers[0], - ObjectVerb.HEAD, - prepare_tmp_dir, - ) - signed_token_file = sign_session_token(client_shell, session_token_file, stranger_wallet) - with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): + self.tick_epoch() head_object( user_wallet.path, - storage_object.cid, - storage_object.oid, - client_shell, - session=signed_token_file, + container, + object_id, + self.shell, + self.cluster.default_rpc_endpoint, + session=token_start_at_next_epoch, ) + self.tick_epoch() + with pytest.raises(Exception, match=MALFORMED_REQUEST): + head_object( + user_wallet.path, + container, + object_id, + self.shell, + self.cluster.default_rpc_endpoint, + session=token_start_at_next_epoch, + ) -@allure.title("Validate static session which signed for another container") -@pytest.mark.static_session -def test_static_session_signed_for_other_container( - owner_wallet: WalletFile, - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - prepare_tmp_dir: str, - request: FixtureRequest, -): - """ - Validate static session which signed for another container - """ - allure.dynamic.title( - f"Validate static session which signed for another container for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] - container = storage_containers[1] + @allure.title("Validate static session which is already expired") + @pytest.mark.static_session + def test_static_session_already_expired( + self, + owner_wallet: WalletFile, + user_wallet: WalletFile, + storage_containers: list[str], + storage_objects: list[StorageObjectInfo], + temp_directory: str, + request: FixtureRequest, + ): + """ + Validate static session which is already expired + """ + allure.dynamic.title( + f"Validate static session which is already expired for {request.node.callspec.id}" + ) + epoch = ensure_fresh_epoch(self.shell, self.cluster) - session_token_file = generate_object_session_token( - owner_wallet, user_wallet, [storage_object.oid], container, ObjectVerb.HEAD, prepare_tmp_dir - ) - signed_token_file = sign_session_token(client_shell, session_token_file, owner_wallet) - with pytest.raises(Exception, match=OBJECT_NOT_FOUND): - head_object( - user_wallet.path, container, storage_object.oid, client_shell, session=signed_token_file + container = storage_containers[0] + object_id = storage_objects[0].oid + expiration = Lifetime(epoch - 1, epoch - 2, epoch - 2) + + token_already_expired = get_object_signed_token( + owner_wallet, + user_wallet, + container, + storage_objects, + ObjectVerb.HEAD, + self.shell, + temp_directory, + expiration, ) + with pytest.raises(Exception, match=MALFORMED_REQUEST): + head_object( + user_wallet.path, + container, + object_id, + self.shell, + self.cluster.default_rpc_endpoint, + session=token_already_expired, + ) -@allure.title("Validate static session which wasn't signed") -@pytest.mark.static_session -def test_static_session_without_sign( - owner_wallet: WalletFile, - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - prepare_tmp_dir: str, - request: FixtureRequest, -): - """ - Validate static session which wasn't signed - """ - allure.dynamic.title( - f"Validate static session which wasn't signed for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] + @allure.title("Delete verb should be restricted for static session") + def test_static_session_delete_verb( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + request: FixtureRequest, + ): + """ + Delete verb should be restricted for static session + """ + allure.dynamic.title( + f"Delete verb should be restricted for static session for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): + delete_object( + user_wallet.path, + storage_object.cid, + storage_object.oid, + self.shell, + endpoint=self.cluster.default_rpc_endpoint, + session=static_sessions[ObjectVerb.DELETE], + ) - session_token_file = generate_object_session_token( - owner_wallet, - user_wallet, - [storage_object.oid], - storage_containers[0], - ObjectVerb.HEAD, - prepare_tmp_dir, - ) - with pytest.raises(Exception, match=INVALID_SIGNATURE): - head_object( - user_wallet.path, - storage_object.cid, - storage_object.oid, - client_shell, - session=session_token_file, + @allure.title("Put verb should be restricted for static session") + def test_static_session_put_verb( + self, + user_wallet: WalletFile, + storage_objects: list[StorageObjectInfo], + static_sessions: list[str], + request: FixtureRequest, + ): + """ + Put verb should be restricted for static session + """ + allure.dynamic.title( + f"Put verb should be restricted for static session for {request.node.callspec.id}" + ) + storage_object = storage_objects[0] + with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): + put_object_to_random_node( + user_wallet.path, + storage_object.file_path, + storage_object.cid, + self.shell, + self.cluster, + session=static_sessions[ObjectVerb.PUT], + ) + + @allure.title("Validate static session which is issued in future epoch") + @pytest.mark.static_session + def test_static_session_invalid_issued_epoch( + self, + owner_wallet: WalletFile, + user_wallet: WalletFile, + storage_containers: list[str], + storage_objects: list[StorageObjectInfo], + temp_directory: str, + request: FixtureRequest, + ): + """ + Validate static session which is issued in future epoch + """ + allure.dynamic.title( + f"Validate static session which is issued in future epoch for {request.node.callspec.id}" + ) + epoch = ensure_fresh_epoch(self.shell, self.cluster) + + container = storage_containers[0] + object_id = storage_objects[0].oid + expiration = Lifetime(epoch + 10, 0, epoch + 1) + + token_invalid_issue_time = get_object_signed_token( + owner_wallet, + user_wallet, + container, + storage_objects, + ObjectVerb.HEAD, + self.shell, + temp_directory, + expiration, ) - -@allure.title("Validate static session which expires at next epoch") -@pytest.mark.static_session -def test_static_session_expiration_at_next( - owner_wallet: WalletFile, - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - prepare_tmp_dir: str, - request: FixtureRequest, -): - """ - Validate static session which expires at next epoch - """ - allure.dynamic.title( - f"Validate static session which expires at next epoch for {request.node.callspec.id}" - ) - epoch = ensure_fresh_epoch(client_shell) - - container = storage_containers[0] - object_id = storage_objects[0].oid - expiration = Lifetime(epoch + 1, epoch, epoch) - - token_expire_at_next_epoch = get_object_signed_token( - owner_wallet, - user_wallet, - container, - storage_objects, - ObjectVerb.HEAD, - client_shell, - prepare_tmp_dir, - expiration, - ) - - head_object( - user_wallet.path, container, object_id, client_shell, session=token_expire_at_next_epoch - ) - - tick_epoch(client_shell) - - with pytest.raises(Exception, match=MALFORMED_REQUEST): - head_object( - user_wallet.path, container, object_id, client_shell, session=token_expire_at_next_epoch - ) - - -@allure.title("Validate static session which is valid starting from next epoch") -@pytest.mark.static_session -def test_static_session_start_at_next( - owner_wallet: WalletFile, - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - prepare_tmp_dir: str, - request: FixtureRequest, -): - """ - Validate static session which is valid starting from next epoch - """ - allure.dynamic.title( - "Validate static session which is valid starting from next epoch " - f"for {request.node.callspec.id}" - ) - epoch = ensure_fresh_epoch(client_shell) - - container = storage_containers[0] - object_id = storage_objects[0].oid - expiration = Lifetime(epoch + 2, epoch + 1, epoch) - - token_start_at_next_epoch = get_object_signed_token( - owner_wallet, - user_wallet, - container, - storage_objects, - ObjectVerb.HEAD, - client_shell, - prepare_tmp_dir, - expiration, - ) - - with pytest.raises(Exception, match=MALFORMED_REQUEST): - head_object( - user_wallet.path, container, object_id, client_shell, session=token_start_at_next_epoch - ) - - tick_epoch(client_shell) - head_object( - user_wallet.path, container, object_id, client_shell, session=token_start_at_next_epoch - ) - - tick_epoch(client_shell) - with pytest.raises(Exception, match=MALFORMED_REQUEST): - head_object( - user_wallet.path, container, object_id, client_shell, session=token_start_at_next_epoch - ) - - -@allure.title("Validate static session which is already expired") -@pytest.mark.static_session -def test_static_session_already_expired( - owner_wallet: WalletFile, - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - prepare_tmp_dir: str, - request: FixtureRequest, -): - """ - Validate static session which is already expired - """ - allure.dynamic.title( - f"Validate static session which is already expired for {request.node.callspec.id}" - ) - epoch = ensure_fresh_epoch(client_shell) - - container = storage_containers[0] - object_id = storage_objects[0].oid - expiration = Lifetime(epoch - 1, epoch - 2, epoch - 2) - - token_already_expired = get_object_signed_token( - owner_wallet, - user_wallet, - container, - storage_objects, - ObjectVerb.HEAD, - client_shell, - prepare_tmp_dir, - expiration, - ) - - with pytest.raises(Exception, match=MALFORMED_REQUEST): - head_object( - user_wallet.path, container, object_id, client_shell, session=token_already_expired - ) - - -@allure.title("Delete verb should be restricted for static session") -def test_static_session_delete_verb( - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - request: FixtureRequest, -): - """ - Delete verb should be restricted for static session - """ - allure.dynamic.title( - f"Delete verb should be restricted for static session for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] - with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - delete_object( - user_wallet.path, - storage_object.cid, - storage_object.oid, - client_shell, - session=static_sessions[ObjectVerb.DELETE], - ) - - -@allure.title("Put verb should be restricted for static session") -def test_static_session_put_verb( - user_wallet: WalletFile, - client_shell: Shell, - storage_objects: list[StorageObjectInfo], - static_sessions: dict[ObjectVerb, str], - request: FixtureRequest, -): - """ - Put verb should be restricted for static session - """ - allure.dynamic.title( - f"Put verb should be restricted for static session for {request.node.callspec.id}" - ) - storage_object = storage_objects[0] - with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): - put_object( - user_wallet.path, - storage_object.file_path, - storage_object.cid, - client_shell, - session=static_sessions[ObjectVerb.PUT], - ) - - -@allure.title("Validate static session which is issued in future epoch") -@pytest.mark.static_session -def test_static_session_invalid_issued_epoch( - owner_wallet: WalletFile, - user_wallet: WalletFile, - client_shell: Shell, - storage_containers: list[str], - storage_objects: list[StorageObjectInfo], - prepare_tmp_dir: str, - request: FixtureRequest, -): - """ - Validate static session which is issued in future epoch - """ - allure.dynamic.title( - f"Validate static session which is issued in future epoch for {request.node.callspec.id}" - ) - epoch = ensure_fresh_epoch(client_shell) - - container = storage_containers[0] - object_id = storage_objects[0].oid - expiration = Lifetime(epoch + 10, 0, epoch + 1) - - token_invalid_issue_time = get_object_signed_token( - owner_wallet, - user_wallet, - container, - storage_objects, - ObjectVerb.HEAD, - client_shell, - prepare_tmp_dir, - expiration, - ) - - with pytest.raises(Exception, match=MALFORMED_REQUEST): - head_object( - user_wallet.path, container, object_id, client_shell, session=token_invalid_issue_time - ) + with pytest.raises(Exception, match=MALFORMED_REQUEST): + head_object( + user_wallet.path, + container, + object_id, + self.shell, + self.cluster.default_rpc_endpoint, + session=token_invalid_issue_time, + ) diff --git a/pytest_tests/testsuites/session_token/test_static_session_token_container.py b/pytest_tests/testsuites/session_token/test_static_session_token_container.py index 4592155..d1ba879 100644 --- a/pytest_tests/testsuites/session_token/test_static_session_token_container.py +++ b/pytest_tests/testsuites/session_token/test_static_session_token_container.py @@ -21,31 +21,31 @@ from python_keywords.object_access import can_put_object from wallet import WalletFile from wellknown_acl import PUBLIC_ACL +from steps.cluster_test_base import ClusterTestBase from steps.session_token import ContainerVerb, get_container_signed_token -class TestSessionTokenContainer: +class TestSessionTokenContainer(ClusterTestBase): @pytest.fixture(scope="module") def static_sessions( self, owner_wallet: WalletFile, user_wallet: WalletFile, client_shell: Shell, - prepare_tmp_dir: str, + temp_directory: str, ) -> dict[ContainerVerb, str]: """ Returns dict with static session token file paths for all verbs with default lifetime """ return { verb: get_container_signed_token( - owner_wallet, user_wallet, verb, client_shell, prepare_tmp_dir + owner_wallet, user_wallet, verb, client_shell, temp_directory ) for verb in ContainerVerb } def test_static_session_token_container_create( self, - client_shell: Shell, owner_wallet: WalletFile, user_wallet: WalletFile, static_sessions: dict[ContainerVerb, str], @@ -57,21 +57,26 @@ class TestSessionTokenContainer: cid = create_container( user_wallet.path, session_token=static_sessions[ContainerVerb.CREATE], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, wait_for_creation=False, ) - container_info: dict[str, str] = get_container(owner_wallet.path, cid, shell=client_shell) + container_info: dict[str, str] = get_container( + owner_wallet.path, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) assert container_info["ownerID"] == owner_wallet.get_address() - assert cid not in list_containers(user_wallet.path, shell=client_shell) - assert cid in list_containers(owner_wallet.path, shell=client_shell) + assert cid not in list_containers( + user_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) + assert cid in list_containers( + owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) @pytest.mark.skip("Failed with timeout") def test_static_session_token_container_create_with_other_verb( self, - client_shell: Shell, - owner_wallet: WalletFile, user_wallet: WalletFile, static_sessions: dict[ContainerVerb, str], ): @@ -84,15 +89,14 @@ class TestSessionTokenContainer: create_container( user_wallet.path, session_token=static_sessions[verb], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, wait_for_creation=False, ) @pytest.mark.skip("Failed with timeout") def test_static_session_token_container_create_with_other_wallet( self, - client_shell: Shell, - owner_wallet: WalletFile, stranger_wallet: WalletFile, static_sessions: dict[ContainerVerb, str], ): @@ -104,13 +108,13 @@ class TestSessionTokenContainer: create_container( stranger_wallet.path, session_token=static_sessions[ContainerVerb.CREATE], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, wait_for_creation=False, ) def test_static_session_token_container_delete( self, - client_shell: Shell, owner_wallet: WalletFile, user_wallet: WalletFile, static_sessions: dict[ContainerVerb, str], @@ -119,20 +123,27 @@ class TestSessionTokenContainer: Validate static session with delete operation """ with allure.step("Create container"): - cid = create_container(owner_wallet.path, shell=client_shell, wait_for_creation=False) + cid = create_container( + owner_wallet.path, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + wait_for_creation=False, + ) with allure.step("Delete container with static session token"): delete_container( wallet=user_wallet.path, cid=cid, session_token=static_sessions[ContainerVerb.DELETE], - shell=client_shell, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, ) - assert cid not in list_containers(owner_wallet.path, shell=client_shell) + assert cid not in list_containers( + owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint + ) def test_static_session_token_container_set_eacl( self, - client_shell: Shell, owner_wallet: WalletFile, user_wallet: WalletFile, stranger_wallet: WalletFile, @@ -142,9 +153,14 @@ class TestSessionTokenContainer: Validate static session with set eacl operation """ with allure.step("Create container"): - cid = create_container(owner_wallet.path, basic_acl=PUBLIC_ACL, shell=client_shell) + cid = create_container( + owner_wallet.path, + basic_acl=PUBLIC_ACL, + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, + ) file_path = generate_file() - assert can_put_object(stranger_wallet.path, cid, file_path, client_shell) + assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster) with allure.step(f"Deny all operations for other via eACL"): eacl_deny = [ @@ -154,10 +170,11 @@ class TestSessionTokenContainer: set_eacl( user_wallet.path, cid, - create_eacl(cid, eacl_deny, shell=client_shell), - shell=client_shell, + create_eacl(cid, eacl_deny, shell=self.shell), + shell=self.shell, + endpoint=self.cluster.default_rpc_endpoint, session_token=static_sessions[ContainerVerb.SETEACL], ) wait_for_cache_expired() - assert not can_put_object(stranger_wallet.path, cid, file_path, client_shell) + assert not can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster) diff --git a/robot/resources/lib/python_keywords/acl.py b/robot/resources/lib/python_keywords/acl.py index 3164b7f..78f9ed9 100644 --- a/robot/resources/lib/python_keywords/acl.py +++ b/robot/resources/lib/python_keywords/acl.py @@ -10,7 +10,7 @@ from typing import Any, Dict, List, Optional, Union import allure import base58 -from common import ASSETS_DIR, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG +from common import ASSETS_DIR, NEOFS_CLI_EXEC, WALLET_CONFIG from data_formatters import get_wallet_public_key from neofs_testlib.cli import NeofsCli from neofs_testlib.shell import Shell @@ -116,10 +116,10 @@ class EACLRule: @allure.title("Get extended ACL") -def get_eacl(wallet_path: str, cid: str, shell: Shell) -> Optional[str]: +def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) try: - result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=NEOFS_ENDPOINT, cid=cid) + result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid) except RuntimeError as exc: logger.info("Extended ACL table is not set for this container") logger.info(f"Got exception while getting eacl: {exc}") @@ -135,12 +135,13 @@ def set_eacl( cid: str, eacl_table_path: str, shell: Shell, + endpoint: str, session_token: Optional[str] = None, ) -> None: cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli.container.set_eacl( wallet=wallet_path, - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=endpoint, cid=cid, table=eacl_table_path, await_mode=True, @@ -166,7 +167,11 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: def form_bearertoken_file( - wif: str, cid: str, eacl_rule_list: List[Union[EACLRule, EACLPubKey]], shell: Shell + wif: str, + cid: str, + eacl_rule_list: List[Union[EACLRule, EACLPubKey]], + shell: Shell, + endpoint: str, ) -> str: """ This function fetches eACL for given on behalf of , @@ -176,7 +181,7 @@ def form_bearertoken_file( enc_cid = _encode_cid_for_eacl(cid) file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - eacl = get_eacl(wif, cid, shell=shell) + eacl = get_eacl(wif, cid, shell, endpoint) json_eacl = dict() if eacl: eacl = eacl.replace("eACL: ", "").split("Signature")[0] diff --git a/robot/resources/lib/python_keywords/complex_object_actions.py b/robot/resources/lib/python_keywords/complex_object_actions.py index 391686c..5547ef9 100644 --- a/robot/resources/lib/python_keywords/complex_object_actions.py +++ b/robot/resources/lib/python_keywords/complex_object_actions.py @@ -15,7 +15,8 @@ from typing import Optional import allure import neofs_verbs -from common import NEOFS_NETMAP, WALLET_CONFIG +from cluster import StorageNode +from common import WALLET_CONFIG from neofs_testlib.shell import Shell logger = logging.getLogger("NeoLogger") @@ -27,6 +28,7 @@ def get_link_object( cid: str, oid: str, shell: Shell, + nodes: list[StorageNode], bearer: str = "", wallet_config: str = WALLET_CONFIG, is_direct: bool = True, @@ -38,6 +40,7 @@ def get_link_object( cid (str): Container ID which stores the Large Object oid (str): Large Object ID shell: executor for cli command + nodes: list of nodes to do search on bearer (optional, str): path to Bearer token file wallet_config (optional, str): path to the neofs-cli config file is_direct: send request directly to the node or not; this flag @@ -47,14 +50,15 @@ def get_link_object( When no Link Object ID is found after all Storage Nodes polling, the function throws an error. """ - for node in NEOFS_NETMAP: + for node in nodes: + endpoint = node.get_rpc_endpoint() try: resp = neofs_verbs.head_object( wallet, cid, oid, shell=shell, - endpoint=node, + endpoint=endpoint, is_raw=True, is_direct=is_direct, bearer=bearer, @@ -63,13 +67,15 @@ def get_link_object( if resp["link"]: return resp["link"] except Exception: - logger.info(f"No Link Object found on {node}; continue") + logger.info(f"No Link Object found on {endpoint}; continue") logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes") return None @allure.step("Get Last Object") -def get_last_object(wallet: str, cid: str, oid: str, shell: Shell) -> Optional[str]: +def get_last_object( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> Optional[str]: """ Args: wallet (str): path to the wallet on whose behalf the Storage Nodes @@ -77,19 +83,21 @@ def get_last_object(wallet: str, cid: str, oid: str, shell: Shell) -> Optional[s cid (str): Container ID which stores the Large Object oid (str): Large Object ID shell: executor for cli command + nodes: list of nodes to do search on Returns: (str): Last Object ID When no Last Object ID is found after all Storage Nodes polling, the function throws an error. """ - for node in NEOFS_NETMAP: + for node in nodes: + endpoint = node.get_rpc_endpoint() try: resp = neofs_verbs.head_object( - wallet, cid, oid, shell=shell, endpoint=node, is_raw=True, is_direct=True + wallet, cid, oid, shell=shell, endpoint=endpoint, is_raw=True, is_direct=True ) if resp["lastPart"]: return resp["lastPart"] except Exception: - logger.info(f"No Last Object found on {node}; continue") + logger.info(f"No Last Object found on {endpoint}; continue") logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes") return None diff --git a/robot/resources/lib/python_keywords/container.py b/robot/resources/lib/python_keywords/container.py index 0b749ae..a8783a0 100644 --- a/robot/resources/lib/python_keywords/container.py +++ b/robot/resources/lib/python_keywords/container.py @@ -11,7 +11,7 @@ from typing import Optional, Union import allure import json_transformers -from common import NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG +from common import NEOFS_CLI_EXEC, WALLET_CONFIG from neofs_testlib.cli import NeofsCli from neofs_testlib.shell import Shell @@ -24,6 +24,7 @@ DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" def create_container( wallet: str, shell: Shell, + endpoint: str, rule: str = DEFAULT_PLACEMENT_RULE, basic_acl: str = "", attributes: Optional[dict] = None, @@ -49,6 +50,7 @@ def create_container( the session token; this parameter makes sense when paired with `session_token` shell: executor for cli command + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key options (optional, dict): any other options to pass to the call name (optional, str): container name attribute await_mode (bool): block execution until container is persisted @@ -60,7 +62,7 @@ def create_container( cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) result = cli.container.create( - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=endpoint, wallet=session_wallet if session_wallet else wallet, policy=rule, basic_acl=basic_acl, @@ -76,16 +78,16 @@ def create_container( logger.info("Container created; waiting until it is persisted in the sidechain") if wait_for_creation: - wait_for_container_creation(wallet, cid, shell=shell) + wait_for_container_creation(wallet, cid, shell, endpoint) return cid def wait_for_container_creation( - wallet: str, cid: str, shell: Shell, attempts: int = 15, sleep_interval: int = 1 + wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1 ): for _ in range(attempts): - containers = list_containers(wallet, shell=shell) + containers = list_containers(wallet, shell, endpoint) if cid in containers: return logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") @@ -96,11 +98,11 @@ def wait_for_container_creation( def wait_for_container_deletion( - wallet: str, cid: str, shell: Shell, attempts: int = 30, sleep_interval: int = 1 + wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1 ): for _ in range(attempts): try: - get_container(wallet, cid, shell=shell) + get_container(wallet, cid, shell=shell, endpoint=endpoint) sleep(sleep_interval) continue except Exception as err: @@ -111,18 +113,19 @@ def wait_for_container_deletion( @allure.step("List Containers") -def list_containers(wallet: str, shell: Shell) -> list[str]: +def list_containers(wallet: str, shell: Shell, endpoint: str) -> list[str]: """ A wrapper for `neofs-cli container list` call. It returns all the available containers for the given wallet. Args: wallet (str): a wallet on whose behalf we list the containers shell: executor for cli command + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key Returns: (list): list of containers """ cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) - result = cli.container.list(rpc_endpoint=NEOFS_ENDPOINT, wallet=wallet) + result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet) logger.info(f"Containers: \n{result}") return result.stdout.split() @@ -132,6 +135,7 @@ def get_container( wallet: str, cid: str, shell: Shell, + endpoint: str, json_mode: bool = True, ) -> Union[dict, str]: """ @@ -141,14 +145,14 @@ def get_container( wallet (str): path to a wallet on whose behalf we get the container cid (str): ID of the container to get shell: executor for cli command + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key json_mode (bool): return container in JSON format Returns: (dict, str): dict of container attributes """ + cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) - result = cli.container.get( - rpc_endpoint=NEOFS_ENDPOINT, wallet=wallet, cid=cid, json_mode=json_mode - ) + result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode) if not json_mode: return result.stdout @@ -166,7 +170,12 @@ def get_container( # TODO: make the error message about a non-found container more user-friendly # https://github.com/nspcc-dev/neofs-contract/issues/121 def delete_container( - wallet: str, cid: str, shell: Shell, force: bool = False, session_token: Optional[str] = None + wallet: str, + cid: str, + shell: Shell, + endpoint: str, + force: bool = False, + session_token: Optional[str] = None, ) -> None: """ A wrapper for `neofs-cli container delete` call. @@ -174,6 +183,7 @@ def delete_container( wallet (str): path to a wallet on whose behalf we delete the container cid (str): ID of the container to delete shell: executor for cli command + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key force (bool): do not check whether container contains locks and remove immediately session_token: a path to session token file This function doesn't return anything. @@ -181,7 +191,7 @@ def delete_container( cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli.container.delete( - wallet=wallet, cid=cid, rpc_endpoint=NEOFS_ENDPOINT, force=force, session=session_token + wallet=wallet, cid=cid, rpc_endpoint=endpoint, force=force, session=session_token ) @@ -212,10 +222,10 @@ def _parse_cid(output: str) -> str: @allure.step("Search container by name") -def search_container_by_name(wallet: str, name: str, shell: Shell): - list_cids = list_containers(wallet, shell) +def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): + list_cids = list_containers(wallet, shell, endpoint) for cid in list_cids: - cont_info = get_container(wallet, cid, shell, True) + cont_info = get_container(wallet, cid, shell, endpoint, True) if cont_info.get("attributes").get("Name", None) == name: return cid return None diff --git a/robot/resources/lib/python_keywords/container_access.py b/robot/resources/lib/python_keywords/container_access.py index 79f64e4..1e09406 100644 --- a/robot/resources/lib/python_keywords/container_access.py +++ b/robot/resources/lib/python_keywords/container_access.py @@ -1,6 +1,7 @@ from typing import List, Optional from acl import EACLOperation +from cluster import Cluster from neofs_testlib.shell import Shell from python_keywords.object_access import ( can_delete_object, @@ -19,17 +20,21 @@ def check_full_access_to_container( oid: str, file_name: str, shell: Shell, + cluster: Cluster, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, ): - assert can_put_object(wallet, cid, file_name, shell, bearer, wallet_config, xhdr) - assert can_get_head_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) - assert can_get_range_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) - assert can_get_range_hash_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) - assert can_search_object(wallet, cid, shell, oid, bearer, wallet_config, xhdr) - assert can_get_object(wallet, cid, oid, file_name, shell, bearer, wallet_config, xhdr) - assert can_delete_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) + endpoint = cluster.default_rpc_endpoint + assert can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr) + assert can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) + assert can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) + assert can_get_range_hash_of_object( + wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr + ) + assert can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr) + assert can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr) + assert can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) def check_no_access_to_container( @@ -38,17 +43,25 @@ def check_no_access_to_container( oid: str, file_name: str, shell: Shell, + cluster: Cluster, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, ): - assert not can_put_object(wallet, cid, file_name, shell, bearer, wallet_config, xhdr) - assert not can_get_head_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) - assert not can_get_range_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) - assert not can_get_range_hash_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) - assert not can_search_object(wallet, cid, shell, oid, bearer, wallet_config, xhdr) - assert not can_get_object(wallet, cid, oid, file_name, shell, bearer, wallet_config, xhdr) - assert not can_delete_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) + endpoint = cluster.default_rpc_endpoint + assert not can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr) + assert not can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) + assert not can_get_range_of_object( + wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr + ) + assert not can_get_range_hash_of_object( + wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr + ) + assert not can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr) + assert not can_get_object( + wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr + ) + assert not can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) def check_custom_access_to_container( @@ -57,42 +70,44 @@ def check_custom_access_to_container( oid: str, file_name: str, shell: Shell, + cluster: Cluster, deny_operations: Optional[List[EACLOperation]] = None, ignore_operations: Optional[List[EACLOperation]] = None, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, ): + endpoint = cluster.default_rpc_endpoint deny_operations = [op.value for op in deny_operations or []] ignore_operations = [op.value for op in ignore_operations or []] checks: dict = {} if EACLOperation.PUT.value not in ignore_operations: checks[EACLOperation.PUT.value] = can_put_object( - wallet, cid, file_name, shell, bearer, wallet_config, xhdr + wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr ) if EACLOperation.HEAD.value not in ignore_operations: checks[EACLOperation.HEAD.value] = can_get_head_object( - wallet, cid, oid, shell, bearer, wallet_config, xhdr + wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr ) if EACLOperation.GET_RANGE.value not in ignore_operations: checks[EACLOperation.GET_RANGE.value] = can_get_range_of_object( - wallet, cid, oid, shell, bearer, wallet_config, xhdr + wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr ) if EACLOperation.GET_RANGE_HASH.value not in ignore_operations: checks[EACLOperation.GET_RANGE_HASH.value] = can_get_range_hash_of_object( - wallet, cid, oid, shell, bearer, wallet_config, xhdr + wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr ) if EACLOperation.SEARCH.value not in ignore_operations: checks[EACLOperation.SEARCH.value] = can_search_object( - wallet, cid, shell, oid, bearer, wallet_config, xhdr + wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr ) if EACLOperation.GET.value not in ignore_operations: checks[EACLOperation.GET.value] = can_get_object( - wallet, cid, oid, file_name, shell, bearer, wallet_config, xhdr + wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr ) if EACLOperation.DELETE.value not in ignore_operations: checks[EACLOperation.DELETE.value] = can_delete_object( - wallet, cid, oid, shell, bearer, wallet_config, xhdr + wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr ) failed_checks = [ @@ -114,6 +129,7 @@ def check_read_only_container( oid: str, file_name: str, shell: Shell, + cluster: Cluster, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, @@ -128,4 +144,5 @@ def check_read_only_container( wallet_config=wallet_config, xhdr=xhdr, shell=shell, + cluster=cluster, ) diff --git a/robot/resources/lib/python_keywords/epoch.py b/robot/resources/lib/python_keywords/epoch.py index 9a760bb..07affef 100644 --- a/robot/resources/lib/python_keywords/epoch.py +++ b/robot/resources/lib/python_keywords/epoch.py @@ -3,15 +3,8 @@ import logging from time import sleep import allure -from common import ( - IR_WALLET_PASS, - IR_WALLET_PATH, - MAINNET_BLOCK_TIME, - MORPH_ENDPOINT, - NEOFS_ADM_CONFIG_PATH, - NEOFS_ADM_EXEC, - NEOGO_EXECUTABLE, -) +from cluster import Cluster +from common import MAINNET_BLOCK_TIME, NEOFS_ADM_CONFIG_PATH, NEOFS_ADM_EXEC, NEOGO_EXECUTABLE from neofs_testlib.cli import NeofsAdm, NeoGo from neofs_testlib.shell import Shell from neofs_testlib.utils.wallet import get_last_address_from_wallet @@ -22,28 +15,32 @@ logger = logging.getLogger("NeoLogger") @allure.step("Ensure fresh epoch") -def ensure_fresh_epoch(shell: Shell) -> int: +def ensure_fresh_epoch(shell: Shell, cluster: Cluster) -> int: # ensure new fresh epoch to avoid epoch switch during test session - current_epoch = get_epoch(shell) - tick_epoch(shell) - epoch = get_epoch(shell) + current_epoch = get_epoch(shell, cluster) + tick_epoch(shell, cluster) + epoch = get_epoch(shell, cluster) assert epoch > current_epoch, "Epoch wasn't ticked" return epoch @allure.step("Get Epoch") -def get_epoch(shell: Shell): +def get_epoch(shell: Shell, cluster: Cluster): + morph_chain = cluster.morph_chain_nodes[0] + morph_endpoint = morph_chain.get_endpoint() + neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.contract.testinvokefunction( - scripthash=get_contract_hash("netmap.neofs", shell=shell), + scripthash=get_contract_hash(morph_chain, "netmap.neofs", shell=shell), method="epoch", - rpc_endpoint=MORPH_ENDPOINT, + rpc_endpoint=morph_endpoint, ) return int(json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]) @allure.step("Tick Epoch") -def tick_epoch(shell: Shell): +def tick_epoch(shell: Shell, cluster: Cluster): + if NEOFS_ADM_EXEC and NEOFS_ADM_CONFIG_PATH: # If neofs-adm is available, then we tick epoch with it (to be consistent with UAT tests) neofsadm = NeofsAdm( @@ -52,21 +49,30 @@ def tick_epoch(shell: Shell): neofsadm.morph.force_new_epoch() return - # Otherwise we tick epoch using transaction - cur_epoch = get_epoch(shell) + # Use first node by default - ir_address = get_last_address_from_wallet(IR_WALLET_PATH, IR_WALLET_PASS) + # Otherwise we tick epoch using transaction + cur_epoch = get_epoch(shell, cluster) + + ir_node = cluster.ir_nodes[0] + # In case if no local_wallet_path is provided, we use wallet_path + ir_wallet_path = ir_node.get_wallet_path() + ir_wallet_pass = ir_node.get_wallet_password() + ir_address = get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass) + + morph_chain = cluster.morph_chain_nodes[0] + morph_endpoint = morph_chain.get_endpoint() neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo.contract.invokefunction( - wallet=IR_WALLET_PATH, - wallet_password=IR_WALLET_PASS, - scripthash=get_contract_hash("netmap.neofs", shell=shell), + wallet=ir_wallet_path, + wallet_password=ir_wallet_pass, + scripthash=get_contract_hash(morph_chain, "netmap.neofs", shell=shell), method="newEpoch", arguments=f"int:{cur_epoch + 1}", multisig_hash=f"{ir_address}:Global", address=ir_address, - rpc_endpoint=MORPH_ENDPOINT, + rpc_endpoint=morph_endpoint, force=True, gas=1, ) diff --git a/robot/resources/lib/python_keywords/failover_utils.py b/robot/resources/lib/python_keywords/failover_utils.py index 109beaa..118a772 100644 --- a/robot/resources/lib/python_keywords/failover_utils.py +++ b/robot/resources/lib/python_keywords/failover_utils.py @@ -1,55 +1,51 @@ import logging from time import sleep -from typing import Optional import allure -from common import NEOFS_NETMAP_DICT -from neofs_testlib.hosting import Hosting +from cluster import Cluster, StorageNode from neofs_testlib.shell import Shell -from python_keywords.node_management import node_healthcheck +from python_keywords.node_management import storage_node_healthcheck from storage_policy import get_nodes_with_object logger = logging.getLogger("NeoLogger") @allure.step("Wait for object replication") -def wait_object_replication_on_nodes( - wallet: str, +def wait_object_replication( cid: str, oid: str, expected_copies: int, shell: Shell, - excluded_nodes: Optional[list[str]] = None, -) -> list[str]: - excluded_nodes = excluded_nodes or [] + nodes: list[StorageNode], +) -> list[StorageNode]: sleep_interval, attempts = 15, 20 - nodes = [] - for __attempt in range(attempts): - nodes = get_nodes_with_object(wallet, cid, oid, shell=shell, skip_nodes=excluded_nodes) - if len(nodes) >= expected_copies: - return nodes + nodes_with_object = [] + for _ in range(attempts): + nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes) + if len(nodes_with_object) >= expected_copies: + return nodes_with_object sleep(sleep_interval) raise AssertionError( - f"Expected {expected_copies} copies of object, but found {len(nodes)}. " + f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. " f"Waiting time {sleep_interval * attempts}" ) -@allure.step("Wait for storage node returned to cluster") -def wait_all_storage_node_returned(hosting: Hosting) -> None: +@allure.step("Wait for storage nodes returned to cluster") +def wait_all_storage_nodes_returned(cluster: Cluster) -> None: sleep_interval, attempts = 15, 20 for __attempt in range(attempts): - if is_all_storage_node_returned(hosting): + if is_all_storage_nodes_returned(cluster): return sleep(sleep_interval) raise AssertionError("Storage node(s) is broken") -def is_all_storage_node_returned(hosting: Hosting) -> bool: +def is_all_storage_nodes_returned(cluster: Cluster) -> bool: with allure.step("Run health check for all storage nodes"): - for node_name in NEOFS_NETMAP_DICT.keys(): + for node in cluster.storage_nodes: try: - health_check = node_healthcheck(hosting, node_name) + health_check = storage_node_healthcheck(node) except Exception as err: logger.warning(f"Node healthcheck fails with error {err}") return False diff --git a/robot/resources/lib/python_keywords/http_gate.py b/robot/resources/lib/python_keywords/http_gate.py index fa462eb..f376afe 100644 --- a/robot/resources/lib/python_keywords/http_gate.py +++ b/robot/resources/lib/python_keywords/http_gate.py @@ -9,7 +9,6 @@ from urllib.parse import quote_plus import allure import requests from cli_helpers import _cmd_run -from common import HTTP_GATE logger = logging.getLogger("NeoLogger") @@ -17,13 +16,14 @@ ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") @allure.step("Get via HTTP Gate") -def get_via_http_gate(cid: str, oid: str): +def get_via_http_gate(cid: str, oid: str, endpoint: str): """ This function gets given object from HTTP gate - :param cid: CID to get object from - :param oid: object OID + cid: container id to get object from + oid: object ID + endpoint: http gate endpoint """ - request = f"{HTTP_GATE}/get/{cid}/{oid}" + request = f"{endpoint}/get/{cid}/{oid}" resp = requests.get(request, stream=True) if not resp.ok: @@ -44,13 +44,14 @@ def get_via_http_gate(cid: str, oid: str): @allure.step("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str): +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): """ This function gets given object from HTTP gate - :param cid: CID to get object from - :param prefix: common prefix + cid: container id to get object from + prefix: common prefix + endpoint: http gate endpoint """ - request = f"{HTTP_GATE}/zip/{cid}/{prefix}" + request = f"{endpoint}/zip/{cid}/{prefix}" resp = requests.get(request, stream=True) if not resp.ok: @@ -75,15 +76,16 @@ def get_via_zip_http_gate(cid: str, prefix: str): @allure.step("Get via HTTP Gate by attribute") -def get_via_http_gate_by_attribute(cid: str, attribute: dict): +def get_via_http_gate_by_attribute(cid: str, attribute: dict, endpoint: str): """ This function gets given object from HTTP gate - :param cid: CID to get object from - :param attribute: attribute name: attribute value pair + cid: CID to get object from + attribute: attribute {name: attribute} value pair + endpoint: http gate endpoint """ attr_name = list(attribute.keys())[0] attr_value = quote_plus(str(attribute.get(attr_name))) - request = f"{HTTP_GATE}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" resp = requests.get(request, stream=True) if not resp.ok: @@ -104,14 +106,15 @@ def get_via_http_gate_by_attribute(cid: str, attribute: dict): @allure.step("Upload via HTTP Gate") -def upload_via_http_gate(cid: str, path: str, headers: dict = None) -> str: +def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: dict = None) -> str: """ This function upload given object through HTTP gate - :param cid: CID to get object from - :param path: File path to upload - :param headers: Object header + cid: CID to get object from + path: File path to upload + endpoint: http gate endpoint + headers: Object header """ - request = f"{HTTP_GATE}/upload/{cid}" + request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} resp = requests.post(request, files=files, data=body, headers=headers) @@ -134,15 +137,16 @@ def upload_via_http_gate(cid: str, path: str, headers: dict = None) -> str: @allure.step("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( - cid: str, filepath: str, large_object=False, headers: dict = None + cid: str, filepath: str, endpoint: str, large_object=False, headers: dict = None ) -> str: """ This function upload given object through HTTP gate using curl utility. - :param cid: CID to get object from - :param filepath: File path to upload - :param headers: Object header + cid: CID to get object from + filepath: File path to upload + headers: Object header + endpoint: http gate endpoint """ - request = f"{HTTP_GATE}/upload/{cid}" + request = f"{endpoint}/upload/{cid}" files = f"file=@{filepath};filename={os.path.basename(filepath)}" cmd = f"curl -F '{files}' {request}" if large_object: @@ -156,13 +160,14 @@ def upload_via_http_gate_curl( @allure.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str) -> str: +def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str: """ This function gets given object from HTTP gate using curl utility. - :param cid: CID to get object from - :param oid: object OID + cid: CID to get object from + oid: object OID + endpoint: http gate endpoint """ - request = f"{HTTP_GATE}/get/{cid}/{oid}" + request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") cmd = f"curl {request} > {file_path}" diff --git a/robot/resources/lib/python_keywords/neofs_verbs.py b/robot/resources/lib/python_keywords/neofs_verbs.py index fd17e61..2501dc4 100644 --- a/robot/resources/lib/python_keywords/neofs_verbs.py +++ b/robot/resources/lib/python_keywords/neofs_verbs.py @@ -1,29 +1,77 @@ import json import logging import os -import random import re import uuid from typing import Any, Optional import allure import json_transformers -from common import ASSETS_DIR, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, NEOFS_NETMAP, WALLET_CONFIG +from cluster import Cluster +from common import ASSETS_DIR, NEOFS_CLI_EXEC, WALLET_CONFIG from neofs_testlib.cli import NeofsCli from neofs_testlib.shell import Shell logger = logging.getLogger("NeoLogger") -@allure.step("Get object") +@allure.step("Get object from random node") +def get_object_from_random_node( + wallet: str, + cid: str, + oid: str, + shell: Shell, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, + no_progress: bool = True, + session: Optional[str] = None, +) -> str: + """ + GET from NeoFS random storage node + + Args: + wallet: wallet on whose behalf GET is done + cid: ID of Container where we get the Object from + oid: Object ID + shell: executor for cli command + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + write_object (optional, str): path to downloaded file, appends to `--file` key + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config(optional, str): path to the wallet config + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + Returns: + (str): path to downloaded file + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return get_object( + wallet, + cid, + oid, + shell, + endpoint, + bearer, + write_object, + xhdr, + wallet_config, + no_progress, + session, + ) + + +@allure.step("Get object from {endpoint}") def get_object( wallet: str, cid: str, oid: str, shell: Shell, + endpoint: str = None, bearer: Optional[str] = None, - write_object: str = "", - endpoint: str = "", + write_object: Optional[str] = None, xhdr: Optional[dict] = None, wallet_config: Optional[str] = None, no_progress: bool = True, @@ -37,9 +85,9 @@ def get_object( cid (str): ID of Container where we get the Object from oid (str): Object ID shell: executor for cli command - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - write_object (optional, str): path to downloaded file, appends to `--file` key - endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + write_object: path to downloaded file, appends to `--file` key + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key wallet_config(optional, str): path to the wallet config no_progress(optional, bool): do not show progress bar xhdr (optional, dict): Request X-Headers in form of Key=Value @@ -52,12 +100,9 @@ def get_object( write_object = str(uuid.uuid4()) file_path = os.path.join(ASSETS_DIR, write_object) - if not endpoint: - endpoint = random.sample(NEOFS_NETMAP, 1)[0] - cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli.object.get( - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, wallet=wallet, cid=cid, oid=oid, @@ -71,15 +116,15 @@ def get_object( return file_path -@allure.step("Get Range Hash") +@allure.step("Get Range Hash from {endpoint}") def get_range_hash( wallet: str, cid: str, oid: str, range_cut: str, shell: Shell, + endpoint: str, bearer: Optional[str] = None, - endpoint: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, @@ -102,10 +147,9 @@ def get_range_hash( Returns: None """ - cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) result = cli.object.hash( - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, wallet=wallet, cid=cid, oid=oid, @@ -119,16 +163,69 @@ def get_range_hash( return result.stdout.split(":")[1].strip() -@allure.step("Put object") +@allure.step("Put object to random node") +def put_object_to_random_node( + wallet: str, + path: str, + cid: str, + shell: Shell, + cluster: Cluster, + bearer: Optional[str] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, +): + """ + PUT of given file to a random storage node. + + Args: + wallet: wallet on whose behalf PUT is done + path: path to file to be PUT + cid: ID of Container where we get the Object from + shell: executor for cli command + cluster: cluster under test + bearer: path to Bearer Token file, appends to `--bearer` key + attributes: User attributes in form of Key1=Value1,Key2=Value2 + cluster: cluster under test + wallet_config: path to the wallet config + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + Returns: + ID of uploaded Object + """ + + endpoint = cluster.get_random_storage_rpc_endpoint() + return put_object( + wallet, + path, + cid, + shell, + endpoint, + bearer, + attributes, + xhdr, + wallet_config, + expire_at, + no_progress, + session, + ) + + +@allure.step("Put object at {endpoint} in container {cid}") def put_object( wallet: str, path: str, cid: str, shell: Shell, + endpoint: str, bearer: Optional[str] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, - endpoint: Optional[str] = None, wallet_config: Optional[str] = None, expire_at: Optional[int] = None, no_progress: bool = True, @@ -138,25 +235,21 @@ def put_object( PUT of given file. Args: - wallet (str): wallet on whose behalf PUT is done - path (str): path to file to be PUT - cid (str): ID of Container where we get the Object from + wallet: wallet on whose behalf PUT is done + path: path to file to be PUT + cid: ID of Container where we get the Object from shell: executor for cli command - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - attributes (optional, str): User attributes in form of Key1=Value1,Key2=Value2 - endpoint(optional, str): NeoFS endpoint to send request to - wallet_config(optional, str): path to the wallet config - no_progress(optional, bool): do not show progress bar - expire_at (optional, int): Last epoch in the life of the object - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token + bearer: path to Bearer Token file, appends to `--bearer` key + attributes: User attributes in form of Key1=Value1,Key2=Value2 + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token Returns: (str): ID of uploaded Object """ - if not endpoint: - endpoint = random.sample(NEOFS_NETMAP, 1)[0] - if not endpoint: - logger.info(f"---DEB:\n{NEOFS_NETMAP}") cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) result = cli.object.put( @@ -178,13 +271,13 @@ def put_object( return oid.strip() -@allure.step("Delete object") +@allure.step("Delete object {cid}/{oid} from {endpoint}") def delete_object( wallet: str, cid: str, oid: str, shell: Shell, - endpoint: Optional[str] = None, + endpoint: str = None, bearer: str = "", wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, @@ -194,21 +287,22 @@ def delete_object( DELETE an Object. Args: - wallet (str): wallet on whose behalf DELETE is done - cid (str): ID of Container where we get the Object from - oid (str): ID of Object we are going to delete + wallet: wallet on whose behalf DELETE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to delete shell: executor for cli command - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config(optional, str): path to the wallet config - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token Returns: (str): Tombstone ID """ + cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) result = cli.object.delete( - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, wallet=wallet, cid=cid, oid=oid, @@ -229,7 +323,7 @@ def get_range( oid: str, range_cut: str, shell: Shell, - endpoint: Optional[str] = None, + endpoint: str = None, wallet_config: Optional[str] = None, bearer: str = "", xhdr: Optional[dict] = None, @@ -239,16 +333,16 @@ def get_range( GETRANGE an Object. Args: - wallet (str): wallet on whose behalf GETRANGE is done - cid (str): ID of Container where we get the Object from - oid (str): ID of Object we are going to request - range_cut (str): range to take data from in the form offset:length + wallet: wallet on whose behalf GETRANGE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to request + range_cut: range to take data from in the form offset:length shell: executor for cli command - endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - wallet_config(optional, str): path to the wallet config - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + wallet_config: path to the wallet config + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token Returns: (str, bytes) - path to the file with range content and content of this file as bytes """ @@ -256,7 +350,7 @@ def get_range( cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli.object.range( - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, wallet=wallet, cid=cid, oid=oid, @@ -278,9 +372,9 @@ def lock_object( cid: str, oid: str, shell: Shell, + endpoint: str, lifetime: Optional[int] = None, expire_at: Optional[int] = None, - endpoint: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, session: Optional[str] = None, @@ -298,7 +392,8 @@ def lock_object( oid: Object ID. lifetime: Lock lifetime. expire_at: Lock expiration epoch. - endpoint: Remote node address. + shell: executor for cli command + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. @@ -310,7 +405,7 @@ def lock_object( cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) result = cli.object.lock( - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, lifetime=lifetime, expire_at=expire_at, address=address, @@ -334,8 +429,8 @@ def search_object( wallet: str, cid: str, shell: Shell, + endpoint: str, bearer: str = "", - endpoint: Optional[str] = None, filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, wallet_config: Optional[str] = None, @@ -348,26 +443,26 @@ def search_object( SEARCH an Object. Args: - wallet (str): wallet on whose behalf SEARCH is done - cid (str): ID of Container where we get the Object from + wallet: wallet on whose behalf SEARCH is done + cid: ID of Container where we get the Object from shell: executor for cli command - bearer (optional, str): path to Bearer Token file, appends to `--bearer` key - endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key - filters (optional, dict): key=value pairs to filter Objects - expected_objects_list (optional, list): a list of ObjectIDs to compare found Objects with - wallet_config(optional, str): path to the wallet config - xhdr (optional, dict): Request X-Headers in form of Key=Value - session (optional, dict): path to a JSON-encoded container session token + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key + filters: key=value pairs to filter Objects + expected_objects_list: a list of ObjectIDs to compare found Objects with + wallet_config: path to the wallet config + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token phy: Search physically stored objects. root: Search for user objects. Returns: - (list): list of found ObjectIDs + list of found ObjectIDs """ cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) result = cli.object.search( - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, wallet=wallet, cid=cid, bearer=bearer, @@ -401,8 +496,8 @@ def search_object( def get_netmap_netinfo( wallet: str, shell: Shell, + endpoint: str, wallet_config: Optional[str] = None, - endpoint: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -411,7 +506,7 @@ def get_netmap_netinfo( Get netmap netinfo output from node Args: - wallet (str): wallet on whose behalf SEARCH is done + wallet (str): wallet on whose behalf request is done shell: executor for cli command endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key address: Address of wallet account @@ -426,7 +521,7 @@ def get_netmap_netinfo( cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) output = cli.netmap.netinfo( wallet=wallet, - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, address=address, ttl=ttl, xhdr=xhdr, @@ -452,9 +547,9 @@ def head_object( cid: str, oid: str, shell: Shell, + endpoint: str, bearer: str = "", xhdr: Optional[dict] = None, - endpoint: Optional[str] = None, json_output: bool = True, is_raw: bool = False, is_direct: bool = False, @@ -489,7 +584,7 @@ def head_object( cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) result = cli.object.head( - rpc_endpoint=endpoint or NEOFS_ENDPOINT, + rpc_endpoint=endpoint, wallet=wallet, cid=cid, oid=oid, diff --git a/robot/resources/lib/python_keywords/node_management.py b/robot/resources/lib/python_keywords/node_management.py index 8c032a1..828735f 100644 --- a/robot/resources/lib/python_keywords/node_management.py +++ b/robot/resources/lib/python_keywords/node_management.py @@ -6,17 +6,10 @@ from dataclasses import dataclass from typing import Optional import allure -from common import ( - MORPH_BLOCK_TIME, - NEOFS_CLI_EXEC, - NEOFS_NETMAP_DICT, - STORAGE_WALLET_CONFIG, - STORAGE_WALLET_PASS, -) -from data_formatters import get_wallet_public_key +from cluster import Cluster, StorageNode +from common import MORPH_BLOCK_TIME, NEOFS_CLI_EXEC from epoch import tick_epoch from neofs_testlib.cli import NeofsCli -from neofs_testlib.hosting import Hosting from neofs_testlib.shell import Shell from utility import parse_time @@ -39,183 +32,189 @@ class HealthStatus: return HealthStatus(network, health) -@allure.step("Stop storage nodes") -def stop_nodes(hosting: Hosting, number: int, nodes: list[str]) -> list[str]: +@allure.step("Stop random storage nodes") +def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]: """ Shuts down the given number of randomly selected storage nodes. Args: - number (int): the number of nodes to shut down - nodes (list): the list of nodes for possible shut down + number: the number of storage nodes to stop + nodes: the list of storage nodes to stop Returns: - (list): the list of nodes that were shut down + the list of nodes that were stopped """ nodes_to_stop = random.sample(nodes, number) for node in nodes_to_stop: - host = hosting.get_host_by_service(node) - host.stop_service(node) + node.stop_service() return nodes_to_stop -@allure.step("Start storage nodes") -def start_nodes(hosting: Hosting, nodes: list[str]) -> None: +@allure.step("Start storage node") +def start_storage_nodes(nodes: list[StorageNode]) -> None: """ The function starts specified storage nodes. Args: - nodes (list): the list of nodes to start + nodes: the list of nodes to start """ for node in nodes: - host = hosting.get_host_by_service(node) - host.start_service(node) + node.start_service() -@allure.step("Get Locode") -def get_locode() -> str: - endpoint_values = random.choice(list(NEOFS_NETMAP_DICT.values())) - locode = endpoint_values["UN-LOCODE"] - logger.info(f"Random locode chosen: {locode}") +@allure.step("Get Locode from random storage node") +def get_locode_from_random_node(cluster: Cluster) -> str: + node = random.choice(cluster.storage_nodes) + locode = node.get_un_locode() + logger.info(f"Chosen '{locode}' locode from node {node}") return locode -@allure.step("Healthcheck for node {node_name}") -def node_healthcheck(hosting: Hosting, node_name: str) -> HealthStatus: +@allure.step("Healthcheck for storage node {node}") +def storage_node_healthcheck(node: StorageNode) -> HealthStatus: """ - The function returns node's health status. + The function returns storage node's health status. Args: - node_name str: node name for which health status should be retrieved. + node: storage node for which health status should be retrieved. Returns: health status as HealthStatus object. """ command = "control healthcheck" - output = _run_control_command_with_retries(hosting, node_name, command) + output = _run_control_command_with_retries(node, command) return HealthStatus.from_stdout(output) -@allure.step("Set status for node {node_name}") -def node_set_status(hosting: Hosting, node_name: str, status: str, retries: int = 0) -> None: +@allure.step("Set status for {node}") +def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: """ The function sets particular status for given node. Args: - node_name: node name for which status should be set. + node: node for which status should be set. status: online or offline. retries (optional, int): number of retry attempts if it didn't work from the first time """ command = f"control set-status --status {status}" - _run_control_command_with_retries(hosting, node_name, command, retries) + _run_control_command_with_retries(node, command, retries) @allure.step("Get netmap snapshot") -def get_netmap_snapshot(node_name: str, shell: Shell) -> str: +def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: """ The function returns string representation of netmap snapshot. Args: - node_name str: node name from which netmap snapshot should be requested. + node: node from which netmap snapshot should be requested. Returns: string representation of netmap """ - node_info = NEOFS_NETMAP_DICT[node_name] - cli = NeofsCli(shell, NEOFS_CLI_EXEC, config_file=STORAGE_WALLET_CONFIG) + + storage_wallet_config = node.get_wallet_config_path() + storage_wallet_path = node.get_wallet_path() + + cli = NeofsCli(shell, NEOFS_CLI_EXEC, config_file=storage_wallet_config) return cli.netmap.snapshot( - rpc_endpoint=node_info["rpc"], - wallet=node_info["wallet_path"], + rpc_endpoint=node.get_rpc_endpoint(), + wallet=storage_wallet_path, ).stdout -@allure.step("Get shard list for node {node_name}") -def node_shard_list(hosting: Hosting, node_name: str) -> list[str]: +@allure.step("Get shard list for {node}") +def node_shard_list(node: StorageNode) -> list[str]: """ - The function returns list of shards for specified node. + The function returns list of shards for specified storage node. Args: - node_name str: node name for which shards should be returned. + node: node for which shards should be returned. Returns: list of shards. """ command = "control shards list" - output = _run_control_command_with_retries(hosting, node_name, command) + output = _run_control_command_with_retries(node, command) return re.findall(r"Shard (.*):", output) -@allure.step("Shard set for node {node_name}") -def node_shard_set_mode(hosting: Hosting, node_name: str, shard: str, mode: str) -> str: +@allure.step("Shard set for {node}") +def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: """ The function sets mode for specified shard. Args: - node_name str: node name on which shard mode should be set. + node: node on which shard mode should be set. """ command = f"control shards set-mode --id {shard} --mode {mode}" - return _run_control_command_with_retries(hosting, node_name, command) + return _run_control_command_with_retries(node, command) -@allure.step("Drop object from node {node_name}") -def drop_object(hosting: Hosting, node_name: str, cid: str, oid: str) -> str: +@allure.step("Drop object from {node}") +def drop_object(node: StorageNode, cid: str, oid: str) -> str: """ The function drops object from specified node. Args: - node_name str: node name from which object should be dropped. + node_id str: node from which object should be dropped. """ command = f"control drop-objects -o {cid}/{oid}" - return _run_control_command_with_retries(hosting, node_name, command) + return _run_control_command_with_retries(node, command) -@allure.step("Delete data of node {node_name}") -def delete_node_data(hosting: Hosting, node_name: str) -> None: - host = hosting.get_host_by_service(node_name) - host.stop_service(node_name) - host.delete_storage_node_data(node_name) +@allure.step("Delete data from host for node {node}") +def delete_node_data(node: StorageNode) -> None: + node.stop_service() + node.host.delete_storage_node_data(node.name) time.sleep(parse_time(MORPH_BLOCK_TIME)) @allure.step("Exclude node {node_to_exclude} from network map") def exclude_node_from_network_map( - hosting: Hosting, node_to_exclude: str, alive_node: str, shell: Shell + node_to_exclude: StorageNode, + alive_node: StorageNode, + shell: Shell, + cluster: Cluster, ) -> None: - node_wallet_path = NEOFS_NETMAP_DICT[node_to_exclude]["wallet_path"] - node_netmap_key = get_wallet_public_key(node_wallet_path, STORAGE_WALLET_PASS) + node_netmap_key = node_to_exclude.get_wallet_public_key() - node_set_status(hosting, node_to_exclude, status="offline") + storage_node_set_status(node_to_exclude, status="offline") time.sleep(parse_time(MORPH_BLOCK_TIME)) - tick_epoch(shell=shell) + tick_epoch(shell, cluster) - snapshot = get_netmap_snapshot(node_name=alive_node, shell=shell) + snapshot = get_netmap_snapshot(node=alive_node, shell=shell) assert ( node_netmap_key not in snapshot - ), f"Expected node with key {node_netmap_key} not in network map" + ), f"Expected node with key {node_netmap_key} to be absent in network map" @allure.step("Include node {node_to_include} into network map") def include_node_to_network_map( - hosting: Hosting, node_to_include: str, alive_node: str, shell: Shell + node_to_include: StorageNode, + alive_node: StorageNode, + shell: Shell, + cluster: Cluster, ) -> None: - node_set_status(hosting, node_to_include, status="online") + storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. - # First sleep can be ommited afer https://github.com/nspcc-dev/neofs-node/issues/1790 complete. + # First sleep can be omitted after https://github.com/nspcc-dev/neofs-node/issues/1790 complete. time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) - tick_epoch(shell=shell) + tick_epoch(shell, cluster) time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) check_node_in_map(node_to_include, shell, alive_node) -@allure.step("Check node {node_name} in network map") -def check_node_in_map(node_name: str, shell: Shell, alive_node: Optional[str] = None) -> None: - alive_node = alive_node or node_name - node_wallet_path = NEOFS_NETMAP_DICT[node_name]["wallet_path"] - node_netmap_key = get_wallet_public_key(node_wallet_path, STORAGE_WALLET_PASS) +@allure.step("Check node {node} in network map") +def check_node_in_map( + node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None +) -> None: + alive_node = alive_node or node - logger.info(f"Node {node_name} netmap key: {node_netmap_key}") + node_netmap_key = node.get_wallet_public_key() + logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") - snapshot = get_netmap_snapshot(node_name=alive_node, shell=shell) - assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} in network map" + snapshot = get_netmap_snapshot(alive_node, shell) + assert ( + node_netmap_key in snapshot + ), f"Expected node with key {node_netmap_key} to be in network map" -def _run_control_command_with_retries( - hosting: Hosting, node_name: str, command: str, retries: int = 0 -) -> str: +def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str: for attempt in range(1 + retries): # original attempt + specified retries try: - return _run_control_command(hosting, node_name, command) + return _run_control_command(node, command) except AssertionError as err: if attempt < retries: logger.warning(f"Command {command} failed with error {err} and will be retried") @@ -223,16 +222,16 @@ def _run_control_command_with_retries( raise AssertionError(f"Command {command} failed with error {err}") from err -def _run_control_command(hosting: Hosting, service_name: str, command: str) -> None: - host = hosting.get_host_by_service(service_name) +def _run_control_command(node: StorageNode, command: str) -> None: + host = node.host - service_config = host.get_service_config(service_name) + service_config = host.get_service_config(node.name) wallet_path = service_config.attributes["wallet_path"] wallet_password = service_config.attributes["wallet_password"] control_endpoint = service_config.attributes["control_endpoint"] shell = host.get_shell() - wallet_config_path = f"/tmp/{service_name}-config.yaml" + wallet_config_path = f"/tmp/{node.name}-config.yaml" wallet_config = f'password: "{wallet_password}"' shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") diff --git a/robot/resources/lib/python_keywords/object_access.py b/robot/resources/lib/python_keywords/object_access.py index d2de3a4..79e8ba8 100644 --- a/robot/resources/lib/python_keywords/object_access.py +++ b/robot/resources/lib/python_keywords/object_access.py @@ -1,16 +1,17 @@ from typing import Optional import allure +from cluster import Cluster from file_helper import get_file_hash from grpc_responses import OBJECT_ACCESS_DENIED, error_matches_status from neofs_testlib.shell import Shell from python_keywords.neofs_verbs import ( delete_object, - get_object, + get_object_from_random_node, get_range, get_range_hash, head_object, - put_object, + put_object_to_random_node, search_object, ) @@ -23,13 +24,14 @@ def can_get_object( oid: str, file_name: str, shell: Shell, + cluster: Cluster, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, ) -> bool: with allure.step("Try get object from container"): try: - got_file_path = get_object( + got_file_path = get_object_from_random_node( wallet, cid, oid, @@ -37,6 +39,7 @@ def can_get_object( wallet_config=wallet_config, xhdr=xhdr, shell=shell, + cluster=cluster, ) except OPERATION_ERROR_TYPE as err: assert error_matches_status( @@ -52,6 +55,7 @@ def can_put_object( cid: str, file_name: str, shell: Shell, + cluster: Cluster, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, @@ -59,7 +63,7 @@ def can_put_object( ) -> bool: with allure.step("Try put object to container"): try: - put_object( + put_object_to_random_node( wallet, file_name, cid, @@ -68,6 +72,7 @@ def can_put_object( xhdr=xhdr, attributes=attributes, shell=shell, + cluster=cluster, ) except OPERATION_ERROR_TYPE as err: assert error_matches_status( @@ -82,6 +87,7 @@ def can_delete_object( cid: str, oid: str, shell: Shell, + endpoint: str, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, @@ -89,7 +95,14 @@ def can_delete_object( with allure.step("Try delete object from container"): try: delete_object( - wallet, cid, oid, bearer=bearer, wallet_config=wallet_config, xhdr=xhdr, shell=shell + wallet, + cid, + oid, + bearer=bearer, + wallet_config=wallet_config, + xhdr=xhdr, + shell=shell, + endpoint=endpoint, ) except OPERATION_ERROR_TYPE as err: assert error_matches_status( @@ -104,6 +117,7 @@ def can_get_head_object( cid: str, oid: str, shell: Shell, + endpoint: str, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, @@ -118,6 +132,7 @@ def can_get_head_object( wallet_config=wallet_config, xhdr=xhdr, shell=shell, + endpoint=endpoint, ) except OPERATION_ERROR_TYPE as err: assert error_matches_status( @@ -132,6 +147,7 @@ def can_get_range_of_object( cid: str, oid: str, shell: Shell, + endpoint: str, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, @@ -147,6 +163,7 @@ def can_get_range_of_object( wallet_config=wallet_config, xhdr=xhdr, shell=shell, + endpoint=endpoint, ) except OPERATION_ERROR_TYPE as err: assert error_matches_status( @@ -161,6 +178,7 @@ def can_get_range_hash_of_object( cid: str, oid: str, shell: Shell, + endpoint: str, bearer: Optional[str] = None, wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, @@ -176,6 +194,7 @@ def can_get_range_hash_of_object( wallet_config=wallet_config, xhdr=xhdr, shell=shell, + endpoint=endpoint, ) except OPERATION_ERROR_TYPE as err: assert error_matches_status( @@ -189,6 +208,7 @@ def can_search_object( wallet: str, cid: str, shell: Shell, + endpoint: str, oid: Optional[str] = None, bearer: Optional[str] = None, wallet_config: Optional[str] = None, @@ -197,7 +217,13 @@ def can_search_object( with allure.step("Try search object in container"): try: oids = search_object( - wallet, cid, bearer=bearer, wallet_config=wallet_config, xhdr=xhdr, shell=shell + wallet, + cid, + bearer=bearer, + wallet_config=wallet_config, + xhdr=xhdr, + shell=shell, + endpoint=endpoint, ) except OPERATION_ERROR_TYPE as err: assert error_matches_status( diff --git a/robot/resources/lib/python_keywords/payment_neogo.py b/robot/resources/lib/python_keywords/payment_neogo.py index ee3e71e..06f1fd0 100644 --- a/robot/resources/lib/python_keywords/payment_neogo.py +++ b/robot/resources/lib/python_keywords/payment_neogo.py @@ -6,19 +6,9 @@ import time from typing import Optional import allure -from common import ( - GAS_HASH, - MAINNET_BLOCK_TIME, - MAINNET_SINGLE_ADDR, - MAINNET_WALLET_PASS, - MAINNET_WALLET_PATH, - MORPH_ENDPOINT, - NEO_MAINNET_ENDPOINT, - NEOFS_CONTRACT, - NEOGO_EXECUTABLE, -) +from cluster import MainChain, MorphChain +from common import GAS_HASH, MAINNET_BLOCK_TIME, NEOFS_CONTRACT, NEOGO_EXECUTABLE from neo3 import wallet as neo3_wallet -from neofs_testlib.blockchain import RPCClient from neofs_testlib.cli import NeoGo from neofs_testlib.shell import Shell from neofs_testlib.utils.converters import contract_hash_to_address @@ -32,30 +22,26 @@ TX_PERSIST_TIMEOUT = 15 # seconds ASSET_POWER_MAINCHAIN = 10**8 ASSET_POWER_SIDECHAIN = 10**12 -morph_rpc_client = RPCClient(MORPH_ENDPOINT) -mainnet_rpc_client = RPCClient(NEO_MAINNET_ENDPOINT) + +def get_nns_contract_hash(morph_chain: MorphChain) -> str: + return morph_chain.rpc_client.get_contract_state(1)["hash"] -def get_nns_contract_hash() -> str: - rpc_client = RPCClient(MORPH_ENDPOINT) - return rpc_client.get_contract_state(1)["hash"] - - -def get_contract_hash(resolve_name: str, shell: Shell) -> str: - nns_contract_hash = get_nns_contract_hash() +def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) -> str: + nns_contract_hash = get_nns_contract_hash(morph_chain) neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.contract.testinvokefunction( scripthash=nns_contract_hash, method="resolve", arguments=f"string:{resolve_name} int:16", - rpc_endpoint=MORPH_ENDPOINT, + rpc_endpoint=morph_chain.get_endpoint(), ) stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] return bytes.decode(base64.b64decode(stack_data[0]["value"])) @allure.step("Withdraw Mainnet Gas") -def withdraw_mainnet_gas(shell: Shell, wlt: str, amount: int): +def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int): address = get_last_address_from_wallet(wlt, EMPTY_PASSWORD) scripthash = neo3_wallet.Account.address_to_script_hash(address) @@ -63,7 +49,7 @@ def withdraw_mainnet_gas(shell: Shell, wlt: str, amount: int): out = neogo.contract.invokefunction( wallet=wlt, address=address, - rpc_endpoint=NEO_MAINNET_ENDPOINT, + rpc_endpoint=main_chain.get_endpoint(), scripthash=NEOFS_CONTRACT, method="withdraw", arguments=f"{scripthash} int:{amount}", @@ -79,7 +65,7 @@ def withdraw_mainnet_gas(shell: Shell, wlt: str, amount: int): raise AssertionError(f"TX {tx} hasn't been processed") -def transaction_accepted(tx_id: str): +def transaction_accepted(main_chain: MainChain, tx_id: str): """ This function returns True in case of accepted TX. Args: @@ -91,7 +77,7 @@ def transaction_accepted(tx_id: str): try: for _ in range(0, TX_PERSIST_TIMEOUT): time.sleep(1) - resp = mainnet_rpc_client.get_transaction_height(tx_id) + resp = main_chain.rpc_client.get_transaction_height(tx_id) if resp is not None: logger.info(f"TX is accepted in block: {resp}") return True @@ -102,7 +88,7 @@ def transaction_accepted(tx_id: str): @allure.step("Get NeoFS Balance") -def get_balance(shell: Shell, wallet_path: str, wallet_password: str = ""): +def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): """ This function returns NeoFS balance for given wallet. """ @@ -111,8 +97,8 @@ def get_balance(shell: Shell, wallet_path: str, wallet_password: str = ""): acc = wallet.accounts[-1] payload = [{"type": "Hash160", "value": str(acc.script_hash)}] try: - resp = morph_rpc_client.invoke_function( - get_contract_hash("balance.neofs", shell=shell), "balanceOf", payload + resp = morph_chain.rpc_client.invoke_function( + get_contract_hash(morph_chain, "balance.neofs", shell=shell), "balanceOf", payload ) logger.info(f"Got response \n{resp}") value = int(resp["stack"][0]["value"]) @@ -126,9 +112,10 @@ def get_balance(shell: Shell, wallet_path: str, wallet_password: str = ""): def transfer_gas( shell: Shell, amount: int, - wallet_from_path: str = MAINNET_WALLET_PATH, - wallet_from_password: str = MAINNET_WALLET_PASS, - address_from: str = MAINNET_SINGLE_ADDR, + main_chain: MainChain, + wallet_from_path: Optional[str] = None, + wallet_from_password: Optional[str] = None, + address_from: Optional[str] = None, address_to: Optional[str] = None, wallet_to_path: Optional[str] = None, wallet_to_password: Optional[str] = None, @@ -148,11 +135,20 @@ def transfer_gas( address_to: The address of the wallet to transfer assets to. amount: Amount of gas to transfer. """ + wallet_from_path = wallet_from_path or main_chain.get_wallet_path() + wallet_from_password = ( + wallet_from_password + if wallet_from_password is not None + else main_chain.get_wallet_password() + ) + address_from = address_from or get_last_address_from_wallet( + wallet_from_path, wallet_from_password + ) address_to = address_to or get_last_address_from_wallet(wallet_to_path, wallet_to_password) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.nep17.transfer( - rpc_endpoint=NEO_MAINNET_ENDPOINT, + rpc_endpoint=main_chain.get_endpoint(), wallet=wallet_from_path, wallet_password=wallet_from_password, amount=amount, @@ -164,13 +160,19 @@ def transfer_gas( txid = out.stdout.strip().split("\n")[-1] if len(txid) != 64: raise Exception("Got no TXID after run the command") - if not transaction_accepted(txid): + if not transaction_accepted(main_chain, txid): raise AssertionError(f"TX {txid} hasn't been processed") time.sleep(parse_time(MAINNET_BLOCK_TIME)) @allure.step("NeoFS Deposit") -def deposit_gas(shell: Shell, amount: int, wallet_from_path: str, wallet_from_password: str): +def deposit_gas( + shell: Shell, + main_chain: MainChain, + amount: int, + wallet_from_path: str, + wallet_from_password: str, +): """ Transferring GAS from given wallet to NeoFS contract address. """ @@ -182,6 +184,7 @@ def deposit_gas(shell: Shell, amount: int, wallet_from_path: str, wallet_from_pa ) transfer_gas( shell=shell, + main_chain=main_chain, amount=amount, wallet_from_path=wallet_from_path, wallet_from_password=wallet_from_password, @@ -191,8 +194,8 @@ def deposit_gas(shell: Shell, amount: int, wallet_from_path: str, wallet_from_pa @allure.step("Get Mainnet Balance") -def get_mainnet_balance(address: str): - resp = mainnet_rpc_client.get_nep17_balances(address=address) +def get_mainnet_balance(main_chain: MainChain, address: str): + resp = main_chain.rpc_client.get_nep17_balances(address=address) logger.info(f"Got getnep17balances response: {resp}") for balance in resp["balance"]: if balance["assethash"] == GAS_HASH: @@ -201,8 +204,8 @@ def get_mainnet_balance(address: str): @allure.step("Get Sidechain Balance") -def get_sidechain_balance(address: str): - resp = morph_rpc_client.get_nep17_balances(address=address) +def get_sidechain_balance(morph_chain: MorphChain, address: str): + resp = morph_chain.rpc_client.get_nep17_balances(address=address) logger.info(f"Got getnep17balances response: {resp}") for balance in resp["balance"]: if balance["assethash"] == GAS_HASH: diff --git a/robot/resources/lib/python_keywords/storage_group.py b/robot/resources/lib/python_keywords/storage_group.py index ecb284b..8a00bb7 100644 --- a/robot/resources/lib/python_keywords/storage_group.py +++ b/robot/resources/lib/python_keywords/storage_group.py @@ -6,7 +6,8 @@ import logging from typing import Optional import allure -from common import COMPLEX_OBJ_SIZE, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, SIMPLE_OBJ_SIZE, WALLET_CONFIG +from cluster import Cluster +from common import COMPLEX_OBJ_SIZE, NEOFS_CLI_EXEC, SIMPLE_OBJ_SIZE, WALLET_CONFIG from complex_object_actions import get_link_object from neofs_testlib.cli import NeofsCli from neofs_testlib.shell import Shell @@ -18,6 +19,7 @@ logger = logging.getLogger("NeoLogger") @allure.step("Put Storagegroup") def put_storagegroup( shell: Shell, + endpoint: str, wallet: str, cid: str, objects: list, @@ -47,7 +49,7 @@ def put_storagegroup( lifetime=lifetime, members=objects, bearer=bearer, - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=endpoint, ) gid = result.stdout.split("\n")[1].split(": ")[1] return gid @@ -56,6 +58,7 @@ def put_storagegroup( @allure.step("List Storagegroup") def list_storagegroup( shell: Shell, + endpoint: str, wallet: str, cid: str, bearer: Optional[str] = None, @@ -78,7 +81,7 @@ def list_storagegroup( wallet=wallet, cid=cid, bearer=bearer, - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=endpoint, ) # throwing off the first string of output found_objects = result.stdout.split("\n")[1:] @@ -88,6 +91,7 @@ def list_storagegroup( @allure.step("Get Storagegroup") def get_storagegroup( shell: Shell, + endpoint: str, wallet: str, cid: str, gid: str, @@ -112,7 +116,7 @@ def get_storagegroup( cid=cid, bearer=bearer, id=gid, - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=endpoint, ) # TODO: temporary solution for parsing output. Needs to be replaced with @@ -136,6 +140,7 @@ def get_storagegroup( @allure.step("Delete Storagegroup") def delete_storagegroup( shell: Shell, + endpoint: str, wallet: str, cid: str, gid: str, @@ -160,7 +165,7 @@ def delete_storagegroup( cid=cid, bearer=bearer, id=gid, - rpc_endpoint=NEOFS_ENDPOINT, + rpc_endpoint=endpoint, ) tombstone_id = result.stdout.strip().split("\n")[1].split(": ")[1] return tombstone_id @@ -169,6 +174,7 @@ def delete_storagegroup( @allure.step("Verify list operation over Storagegroup") def verify_list_storage_group( shell: Shell, + endpoint: str, wallet: str, cid: str, gid: str, @@ -176,7 +182,12 @@ def verify_list_storage_group( wallet_config: str = WALLET_CONFIG, ): storage_groups = list_storagegroup( - shell=shell, wallet=wallet, cid=cid, bearer=bearer, wallet_config=wallet_config + shell=shell, + endpoint=endpoint, + wallet=wallet, + cid=cid, + bearer=bearer, + wallet_config=wallet_config, ) assert gid in storage_groups @@ -184,6 +195,7 @@ def verify_list_storage_group( @allure.step("Verify get operation over Storagegroup") def verify_get_storage_group( shell: Shell, + cluster: Cluster, wallet: str, cid: str, gid: str, @@ -193,16 +205,24 @@ def verify_get_storage_group( wallet_config: str = WALLET_CONFIG, ): obj_parts = [] + endpoint = cluster.default_rpc_endpoint if object_size == COMPLEX_OBJ_SIZE: for obj in obj_list: link_oid = get_link_object( - wallet, cid, obj, shell=shell, bearer=bearer, wallet_config=wallet_config + wallet, + cid, + obj, + shell=shell, + nodes=cluster.storage_nodes, + bearer=bearer, + wallet_config=wallet_config, ) obj_head = head_object( wallet=wallet, cid=cid, oid=link_oid, shell=shell, + endpoint=endpoint, is_raw=True, bearer=bearer, wallet_config=wallet_config, @@ -212,6 +232,7 @@ def verify_get_storage_group( obj_num = len(obj_list) storagegroup_data = get_storagegroup( shell=shell, + endpoint=endpoint, wallet=wallet, cid=cid, gid=gid, diff --git a/robot/resources/lib/python_keywords/storage_policy.py b/robot/resources/lib/python_keywords/storage_policy.py index b48e989..4e0492b 100644 --- a/robot/resources/lib/python_keywords/storage_policy.py +++ b/robot/resources/lib/python_keywords/storage_policy.py @@ -6,12 +6,12 @@ """ import logging -from typing import List, Optional +from typing import List import allure import complex_object_actions import neofs_verbs -from common import NEOFS_NETMAP +from cluster import StorageNode from grpc_responses import OBJECT_NOT_FOUND, error_matches_status from neofs_testlib.shell import Shell @@ -19,7 +19,9 @@ logger = logging.getLogger("NeoLogger") @allure.step("Get Object Copies") -def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell) -> int: +def get_object_copies( + complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: """ The function performs requests to all nodes of the container and finds out if they store a copy of the object. The procedure is @@ -37,14 +39,16 @@ def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: S (int): the number of object copies in the container """ return ( - get_simple_object_copies(wallet, cid, oid, shell) + get_simple_object_copies(wallet, cid, oid, shell, nodes) if complexity == "Simple" - else get_complex_object_copies(wallet, cid, oid, shell) + else get_complex_object_copies(wallet, cid, oid, shell, nodes) ) @allure.step("Get Simple Object Copies") -def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> int: +def get_simple_object_copies( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: """ To figure out the number of a simple object copies, only direct HEAD requests should be made to the every node of the container. @@ -55,14 +59,15 @@ def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> i cid (str): ID of the container oid (str): ID of the Object shell: executor for cli command + nodes: nodes to search on Returns: (int): the number of object copies in the container """ copies = 0 - for node in NEOFS_NETMAP: + for node in nodes: try: response = neofs_verbs.head_object( - wallet, cid, oid, shell=shell, endpoint=node, is_direct=True + wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True ) if response: logger.info(f"Found object {oid} on node {node}") @@ -74,7 +79,9 @@ def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> i @allure.step("Get Complex Object Copies") -def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> int: +def get_complex_object_copies( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: """ To figure out the number of a complex object copies, we firstly need to retrieve its Last object. We consider that the number of @@ -90,37 +97,40 @@ def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> Returns: (int): the number of object copies in the container """ - last_oid = complex_object_actions.get_last_object(wallet, cid, oid, shell) + last_oid = complex_object_actions.get_last_object(wallet, cid, oid, shell, nodes) assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes" - return get_simple_object_copies(wallet, cid, last_oid, shell) + return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) @allure.step("Get Nodes With Object") def get_nodes_with_object( - wallet: str, cid: str, oid: str, shell: Shell, skip_nodes: Optional[list[str]] = None -) -> list[str]: + cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> list[StorageNode]: """ The function returns list of nodes which store the given object. Args: - wallet (str): the path to the wallet on whose behalf - we request the nodes cid (str): ID of the container which store the object oid (str): object ID shell: executor for cli command - skip_nodes (list): list of nodes that should be excluded from check + nodes: nodes to find on Returns: (list): nodes which store the object """ - nodes_to_search = NEOFS_NETMAP - if skip_nodes: - nodes_to_search = [node for node in NEOFS_NETMAP if node not in skip_nodes] nodes_list = [] - for node in nodes_to_search: + for node in nodes: + wallet = node.get_wallet_path() + wallet_config = node.get_wallet_config_path() try: res = neofs_verbs.head_object( - wallet, cid, oid, shell=shell, endpoint=node, is_direct=True + wallet, + cid, + oid, + shell=shell, + endpoint=node.get_rpc_endpoint(), + is_direct=True, + wallet_config=wallet_config, ) if res is not None: logger.info(f"Found object {oid} on node {node}") @@ -132,7 +142,9 @@ def get_nodes_with_object( @allure.step("Get Nodes Without Object") -def get_nodes_without_object(wallet: str, cid: str, oid: str, shell: Shell) -> List[str]: +def get_nodes_without_object( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> list[StorageNode]: """ The function returns list of nodes which do not store the given object. @@ -146,10 +158,10 @@ def get_nodes_without_object(wallet: str, cid: str, oid: str, shell: Shell) -> L (list): nodes which do not store the object """ nodes_list = [] - for node in NEOFS_NETMAP: + for node in nodes: try: res = neofs_verbs.head_object( - wallet, cid, oid, shell=shell, endpoint=node, is_direct=True + wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True ) if res is None: nodes_list.append(node) diff --git a/robot/resources/lib/python_keywords/tombstone.py b/robot/resources/lib/python_keywords/tombstone.py index 00b87c3..22617e9 100644 --- a/robot/resources/lib/python_keywords/tombstone.py +++ b/robot/resources/lib/python_keywords/tombstone.py @@ -10,8 +10,10 @@ logger = logging.getLogger("NeoLogger") @allure.step("Verify Head Tombstone") -def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell): - header = head_object(wallet_path, cid, oid_ts, shell=shell)["header"] +def verify_head_tombstone( + wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str +): + header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] logger.info(f"Header Session OIDs is {s_oid}") diff --git a/robot/variables/common.py b/robot/variables/common.py index 8c9295b..a4a8b41 100644 --- a/robot/variables/common.py +++ b/robot/variables/common.py @@ -17,13 +17,6 @@ NEOFS_CONTRACT_CACHE_TIMEOUT = os.getenv("NEOFS_CONTRACT_CACHE_TIMEOUT", "30s") # of 1min plus 15 seconds for GC pass itself) STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s") -# TODO: we should use hosting instead of these endpoints -NEOFS_ENDPOINT = os.getenv("NEOFS_ENDPOINT", "s01.neofs.devenv:8080") -NEO_MAINNET_ENDPOINT = os.getenv("NEO_MAINNET_ENDPOINT", "http://main-chain.neofs.devenv:30333") -MORPH_ENDPOINT = os.getenv("MORPH_ENDPOINT", "http://morph-chain.neofs.devenv:30333") -HTTP_GATE = os.getenv("HTTP_GATE", "http://http.neofs.devenv") -S3_GATE = os.getenv("S3_GATE", "https://s3.neofs.devenv:8080") - GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf") NEOFS_CONTRACT = os.getenv("NEOFS_IR_CONTRACTS_NEOFS") @@ -43,81 +36,12 @@ BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 10) BACKGROUND_OBJ_SIZE = os.getenv("BACKGROUND_OBJ_SIZE", 1024) BACKGROUND_LOAD_MAX_TIME = os.getenv("BACKGROUND_LOAD_MAX_TIME", 600) -# Configuration of storage nodes -# TODO: we should use hosting instead of all these variables -STORAGE_RPC_ENDPOINT_1 = os.getenv("STORAGE_RPC_ENDPOINT_1", "s01.neofs.devenv:8080") -STORAGE_RPC_ENDPOINT_2 = os.getenv("STORAGE_RPC_ENDPOINT_2", "s02.neofs.devenv:8080") -STORAGE_RPC_ENDPOINT_3 = os.getenv("STORAGE_RPC_ENDPOINT_3", "s03.neofs.devenv:8080") -STORAGE_RPC_ENDPOINT_4 = os.getenv("STORAGE_RPC_ENDPOINT_4", "s04.neofs.devenv:8080") - -STORAGE_CONTROL_ENDPOINT_1 = os.getenv("STORAGE_CONTROL_ENDPOINT_1", "s01.neofs.devenv:8081") -STORAGE_CONTROL_ENDPOINT_2 = os.getenv("STORAGE_CONTROL_ENDPOINT_2", "s02.neofs.devenv:8081") -STORAGE_CONTROL_ENDPOINT_3 = os.getenv("STORAGE_CONTROL_ENDPOINT_3", "s03.neofs.devenv:8081") -STORAGE_CONTROL_ENDPOINT_4 = os.getenv("STORAGE_CONTROL_ENDPOINT_4", "s04.neofs.devenv:8081") - -STORAGE_WALLET_PATH_1 = os.getenv( - "STORAGE_WALLET_PATH_1", os.path.join(DEVENV_PATH, "services", "storage", "wallet01.json") -) -STORAGE_WALLET_PATH_2 = os.getenv( - "STORAGE_WALLET_PATH_2", os.path.join(DEVENV_PATH, "services", "storage", "wallet02.json") -) -STORAGE_WALLET_PATH_3 = os.getenv( - "STORAGE_WALLET_PATH_3", os.path.join(DEVENV_PATH, "services", "storage", "wallet03.json") -) -STORAGE_WALLET_PATH_4 = os.getenv( - "STORAGE_WALLET_PATH_4", os.path.join(DEVENV_PATH, "services", "storage", "wallet04.json") -) -STORAGE_WALLET_PATH = STORAGE_WALLET_PATH_1 -STORAGE_WALLET_PASS = os.getenv("STORAGE_WALLET_PASS", "") - -NEOFS_NETMAP_DICT = { - "s01": { - "rpc": STORAGE_RPC_ENDPOINT_1, - "control": STORAGE_CONTROL_ENDPOINT_1, - "wallet_path": STORAGE_WALLET_PATH_1, - "UN-LOCODE": "RU MOW", - }, - "s02": { - "rpc": STORAGE_RPC_ENDPOINT_2, - "control": STORAGE_CONTROL_ENDPOINT_2, - "wallet_path": STORAGE_WALLET_PATH_2, - "UN-LOCODE": "RU LED", - }, - "s03": { - "rpc": STORAGE_RPC_ENDPOINT_3, - "control": STORAGE_CONTROL_ENDPOINT_3, - "wallet_path": STORAGE_WALLET_PATH_3, - "UN-LOCODE": "SE STO", - }, - "s04": { - "rpc": STORAGE_RPC_ENDPOINT_4, - "control": STORAGE_CONTROL_ENDPOINT_4, - "wallet_path": STORAGE_WALLET_PATH_4, - "UN-LOCODE": "FI HEL", - }, -} -NEOFS_NETMAP = [node["rpc"] for node in NEOFS_NETMAP_DICT.values()] - # Paths to CLI executables on machine that runs tests NEOGO_EXECUTABLE = os.getenv("NEOGO_EXECUTABLE", "neo-go") NEOFS_CLI_EXEC = os.getenv("NEOFS_CLI_EXEC", "neofs-cli") NEOFS_AUTHMATE_EXEC = os.getenv("NEOFS_AUTHMATE_EXEC", "neofs-authmate") NEOFS_ADM_EXEC = os.getenv("NEOFS_ADM_EXEC", "neofs-adm") -MAINNET_WALLET_PATH = os.getenv( - "MAINNET_WALLET_PATH", os.path.join(DEVENV_PATH, "services", "chain", "node-wallet.json") -) -MAINNET_SINGLE_ADDR = os.getenv("MAINNET_SINGLE_ADDR", "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP") -MAINNET_WALLET_PASS = os.getenv("MAINNET_WALLET_PASS", "one") - -IR_WALLET_PATH = os.getenv("IR_WALLET_PATH", os.path.join(DEVENV_PATH, "services", "ir", "az.json")) -IR_WALLET_PASS = os.getenv("IR_WALLET_PASS", "one") - -S3_GATE_WALLET_PATH = os.getenv( - "S3_GATE_WALLET_PATH", os.path.join(DEVENV_PATH, "services", "s3_gate", "wallet.json") -) -S3_GATE_WALLET_PASS = os.getenv("S3_GATE_WALLET_PASS", "s3") - # Config for neofs-adm utility. Optional if tests are running against devenv NEOFS_ADM_CONFIG_PATH = os.getenv("NEOFS_ADM_CONFIG_PATH") @@ -134,15 +58,3 @@ S3_GATE_SERVICE_NAME_REGEX = r"s3-gate\d\d" WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml") with open(WALLET_CONFIG, "w") as file: yaml.dump({"password": WALLET_PASS}, file) - -STORAGE_WALLET_CONFIG = os.path.join(os.getcwd(), "storage_wallet_config.yml") -with open(STORAGE_WALLET_CONFIG, "w") as file: - yaml.dump({"password": STORAGE_WALLET_PASS}, file) - -MAINNET_WALLET_CONFIG = os.path.join(os.getcwd(), "mainnet_wallet_config.yml") -with open(MAINNET_WALLET_CONFIG, "w") as file: - yaml.dump({"password": MAINNET_WALLET_PASS}, file) - -IR_WALLET_CONFIG = os.path.join(os.getcwd(), "ir_wallet_config.yml") -with open(IR_WALLET_CONFIG, "w") as file: - yaml.dump({"password": IR_WALLET_PASS}, file)