Refactor for cluster usage

Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
Andrey Berezin 2022-12-06 01:31:45 +03:00 committed by abereziny
parent d9e881001e
commit bd05aae585
46 changed files with 3859 additions and 2703 deletions

View file

@ -6,7 +6,7 @@ from typing import Optional
import allure import allure
from cli_helpers import _cmd_run from cli_helpers import _cmd_run
from common import ASSETS_DIR, S3_GATE from common import ASSETS_DIR
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
REGULAR_TIMEOUT = 90 REGULAR_TIMEOUT = 90
@ -17,6 +17,10 @@ class AwsCliClient:
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
# certificate in devenv) and disable automatic pagination in CLI output # certificate in devenv) and disable automatic pagination in CLI output
common_flags = "--no-verify-ssl --no-paginate" common_flags = "--no-verify-ssl --no-paginate"
s3gate_endpoint: str
def __init__(self, s3gate_endpoint) -> None:
self.s3gate_endpoint = s3gate_endpoint
def create_bucket( def create_bucket(
self, self,
@ -36,7 +40,7 @@ class AwsCliClient:
object_lock = " --no-object-lock-enabled-for-bucket" object_lock = " --no-object-lock-enabled-for-bucket"
cmd = ( cmd = (
f"aws {self.common_flags} s3api create-bucket --bucket {Bucket} " f"aws {self.common_flags} s3api create-bucket --bucket {Bucket} "
f"{object_lock} --endpoint {S3_GATE}" f"{object_lock} --endpoint {self.s3gate_endpoint}"
) )
if ACL: if ACL:
cmd += f" --acl {ACL}" cmd += f" --acl {ACL}"
@ -51,14 +55,14 @@ class AwsCliClient:
_cmd_run(cmd, REGULAR_TIMEOUT) _cmd_run(cmd, REGULAR_TIMEOUT)
def list_buckets(self) -> dict: def list_buckets(self) -> dict:
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {S3_GATE}" cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}"
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
def get_bucket_acl(self, Bucket: str) -> dict: def get_bucket_acl(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {Bucket} " f"aws {self.common_flags} s3api get-bucket-acl --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, REGULAR_TIMEOUT) output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -66,7 +70,7 @@ class AwsCliClient:
def get_bucket_versioning(self, Bucket: str) -> dict: def get_bucket_versioning(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} " f"aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, REGULAR_TIMEOUT) output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -74,7 +78,7 @@ class AwsCliClient:
def get_bucket_location(self, Bucket: str) -> dict: def get_bucket_location(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-location --bucket {Bucket} " f"aws {self.common_flags} s3api get-bucket-location --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, REGULAR_TIMEOUT) output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -83,14 +87,15 @@ class AwsCliClient:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} " f"aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} "
f'--versioning-configuration Status={VersioningConfiguration.get("Status")} ' f'--versioning-configuration Status={VersioningConfiguration.get("Status")} '
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
def list_objects(self, Bucket: str) -> dict: def list_objects(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {Bucket} " f"--endpoint {S3_GATE}" f"aws {self.common_flags} s3api list-objects --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -98,7 +103,7 @@ class AwsCliClient:
def list_objects_v2(self, Bucket: str) -> dict: def list_objects_v2(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} " f"aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -106,7 +111,7 @@ class AwsCliClient:
def list_object_versions(self, Bucket: str) -> dict: def list_object_versions(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {Bucket} " f"aws {self.common_flags} s3api list-object-versions --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -124,7 +129,7 @@ class AwsCliClient:
) -> dict: ) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api copy-object --copy-source {CopySource} " f"aws {self.common_flags} s3api copy-object --copy-source {CopySource} "
f"--bucket {Bucket} --key {Key} --endpoint {S3_GATE}" f"--bucket {Bucket} --key {Key} --endpoint {self.s3gate_endpoint}"
) )
if ACL: if ACL:
cmd += f" --acl {ACL}" cmd += f" --acl {ACL}"
@ -142,7 +147,7 @@ class AwsCliClient:
return self._to_json(output) return self._to_json(output)
def head_bucket(self, Bucket: str) -> dict: def head_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {S3_GATE}" cmd = f"aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {self.s3gate_endpoint}"
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -162,7 +167,7 @@ class AwsCliClient:
) -> dict: ) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} "
f"--body {Body} --endpoint {S3_GATE}" f"--body {Body} --endpoint {self.s3gate_endpoint}"
) )
if Metadata: if Metadata:
cmd += f" --metadata" cmd += f" --metadata"
@ -189,7 +194,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}" f"{version} --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -205,7 +210,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} "
f"{version} {file_path} --endpoint {S3_GATE}" f"{version} {file_path} --endpoint {self.s3gate_endpoint}"
) )
if Range: if Range:
cmd += f" --range {Range}" cmd += f" --range {Range}"
@ -216,7 +221,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-object-acl --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api get-object-acl --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}" f"{version} --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, REGULAR_TIMEOUT) output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -231,7 +236,7 @@ class AwsCliClient:
) -> dict: ) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-acl --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api put-object-acl --bucket {Bucket} --key {Key} "
f" --endpoint {S3_GATE}" f" --endpoint {self.s3gate_endpoint}"
) )
if ACL: if ACL:
cmd += f" --acl {ACL}" cmd += f" --acl {ACL}"
@ -251,7 +256,7 @@ class AwsCliClient:
) -> dict: ) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-bucket-acl --bucket {Bucket} " f"aws {self.common_flags} s3api put-bucket-acl --bucket {Bucket} "
f" --endpoint {S3_GATE}" f" --endpoint {self.s3gate_endpoint}"
) )
if ACL: if ACL:
cmd += f" --acl {ACL}" cmd += f" --acl {ACL}"
@ -270,7 +275,7 @@ class AwsCliClient:
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-objects --bucket {Bucket} " f"aws {self.common_flags} s3api delete-objects --bucket {Bucket} "
f"--delete file://{file_path} --endpoint {S3_GATE}" f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, LONG_TIMEOUT) output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -279,7 +284,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-object --bucket {Bucket} " f"aws {self.common_flags} s3api delete-object --bucket {Bucket} "
f"--key {Key} {version} --endpoint {S3_GATE}" f"--key {Key} {version} --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, LONG_TIMEOUT) output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -300,20 +305,20 @@ class AwsCliClient:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} "
f"--key {key} {version} {parts} {part_number} --object-attributes {attrs} " f"--key {key} {version} {parts} {part_number} --object-attributes {attrs} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
def delete_bucket(self, Bucket: str) -> dict: def delete_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}" cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {self.s3gate_endpoint}"
output = _cmd_run(cmd, LONG_TIMEOUT) output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output) return self._to_json(output)
def get_bucket_tagging(self, Bucket: str) -> dict: def get_bucket_tagging(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} " f"aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -321,7 +326,7 @@ class AwsCliClient:
def get_bucket_policy(self, Bucket: str) -> dict: def get_bucket_policy(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-policy --bucket {Bucket} " f"aws {self.common_flags} s3api get-bucket-policy --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -329,7 +334,7 @@ class AwsCliClient:
def put_bucket_policy(self, Bucket: str, Policy: dict) -> dict: def put_bucket_policy(self, Bucket: str, Policy: dict) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-bucket-policy --bucket {Bucket} " f"aws {self.common_flags} s3api put-bucket-policy --bucket {Bucket} "
f"--policy {json.dumps(Policy)} --endpoint {S3_GATE}" f"--policy {json.dumps(Policy)} --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -337,7 +342,7 @@ class AwsCliClient:
def get_bucket_cors(self, Bucket: str) -> dict: def get_bucket_cors(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-bucket-cors --bucket {Bucket} " f"aws {self.common_flags} s3api get-bucket-cors --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -345,7 +350,7 @@ class AwsCliClient:
def put_bucket_cors(self, Bucket: str, CORSConfiguration: dict) -> dict: def put_bucket_cors(self, Bucket: str, CORSConfiguration: dict) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-bucket-cors --bucket {Bucket} " f"aws {self.common_flags} s3api put-bucket-cors --bucket {Bucket} "
f"--cors-configuration '{json.dumps(CORSConfiguration)}' --endpoint {S3_GATE}" f"--cors-configuration '{json.dumps(CORSConfiguration)}' --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -353,7 +358,7 @@ class AwsCliClient:
def delete_bucket_cors(self, Bucket: str) -> dict: def delete_bucket_cors(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-bucket-cors --bucket {Bucket} " f"aws {self.common_flags} s3api delete-bucket-cors --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -361,7 +366,7 @@ class AwsCliClient:
def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict: def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} " f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}" f"--tagging '{json.dumps(Tagging)}' --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -369,7 +374,7 @@ class AwsCliClient:
def delete_bucket_tagging(self, Bucket: str) -> dict: def delete_bucket_tagging(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} " f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} "
f"--endpoint {S3_GATE}" f"--endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -380,7 +385,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} "
f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {S3_GATE}" f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -391,7 +396,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} "
f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {S3_GATE}" f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -407,7 +412,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} "
f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {S3_GATE}" f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}"
) )
if not BypassGovernanceRetention is None: if not BypassGovernanceRetention is None:
cmd += " --bypass-governance-retention" cmd += " --bypass-governance-retention"
@ -420,7 +425,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} "
f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {S3_GATE}" f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -428,7 +433,7 @@ class AwsCliClient:
def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict: def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}" f"--tagging '{json.dumps(Tagging)}' --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -437,7 +442,7 @@ class AwsCliClient:
version = f" --version-id {VersionId}" if VersionId else "" version = f" --version-id {VersionId}" if VersionId else ""
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}" f"{version} --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, REGULAR_TIMEOUT) output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -445,7 +450,7 @@ class AwsCliClient:
def delete_object_tagging(self, Bucket: str, Key: str) -> dict: def delete_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} " f"aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} "
f"--key {Key} --endpoint {S3_GATE}" f"--key {Key} --endpoint {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -460,7 +465,7 @@ class AwsCliClient:
) -> dict: ) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} " f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} "
f"--endpoint-url {S3_GATE}" f"--endpoint-url {self.s3gate_endpoint}"
) )
if Metadata: if Metadata:
cmd += f" --metadata" cmd += f" --metadata"
@ -481,7 +486,7 @@ class AwsCliClient:
) -> dict: ) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket_name} " f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket_name} "
f"--endpoint-url {S3_GATE} --recursive" f"--endpoint-url {self.s3gate_endpoint} --recursive"
) )
if Metadata: if Metadata:
cmd += f" --metadata" cmd += f" --metadata"
@ -495,7 +500,7 @@ class AwsCliClient:
def create_multipart_upload(self, Bucket: str, Key: str) -> dict: def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} " f"aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} "
f"--key {Key} --endpoint-url {S3_GATE}" f"--key {Key} --endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -503,7 +508,7 @@ class AwsCliClient:
def list_multipart_uploads(self, Bucket: str) -> dict: def list_multipart_uploads(self, Bucket: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} " f"aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} "
f"--endpoint-url {S3_GATE}" f"--endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -511,7 +516,7 @@ class AwsCliClient:
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict: def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} " f"aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --endpoint-url {S3_GATE}" f"--key {Key} --upload-id {UploadId} --endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -520,7 +525,7 @@ class AwsCliClient:
cmd = ( cmd = (
f"aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --part-number {PartNumber} --body {Body} " f"--upload-id {UploadId} --part-number {PartNumber} --body {Body} "
f"--endpoint-url {S3_GATE}" f"--endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, LONG_TIMEOUT) output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -531,7 +536,7 @@ class AwsCliClient:
cmd = ( cmd = (
f"aws {self.common_flags} s3api upload-part-copy --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api upload-part-copy --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --part-number {PartNumber} --copy-source {CopySource} " f"--upload-id {UploadId} --part-number {PartNumber} --copy-source {CopySource} "
f"--endpoint-url {S3_GATE}" f"--endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd, LONG_TIMEOUT) output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output) return self._to_json(output)
@ -539,7 +544,7 @@ class AwsCliClient:
def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict: def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict:
cmd = ( cmd = (
f"aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} " f"aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --endpoint-url {S3_GATE}" f"--upload-id {UploadId} --endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -555,7 +560,7 @@ class AwsCliClient:
cmd = ( cmd = (
f"aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} " f"aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} " f"--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} "
f"--endpoint-url {S3_GATE}" f"--endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -563,7 +568,7 @@ class AwsCliClient:
def put_object_lock_configuration(self, Bucket, ObjectLockConfiguration): def put_object_lock_configuration(self, Bucket, ObjectLockConfiguration):
cmd = ( cmd = (
f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {Bucket} " f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {Bucket} "
f"--object-lock-configuration '{json.dumps(ObjectLockConfiguration)}' --endpoint-url {S3_GATE}" f"--object-lock-configuration '{json.dumps(ObjectLockConfiguration)}' --endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)
@ -571,7 +576,7 @@ class AwsCliClient:
def get_object_lock_configuration(self, Bucket): def get_object_lock_configuration(self, Bucket):
cmd = ( cmd = (
f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {Bucket} " f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {Bucket} "
f"--endpoint-url {S3_GATE}" f"--endpoint-url {self.s3gate_endpoint}"
) )
output = _cmd_run(cmd) output = _cmd_run(cmd)
return self._to_json(output) return self._to_json(output)

View file

@ -0,0 +1,324 @@
import random
import re
from dataclasses import dataclass
from typing import Any
import data_formatters
from neofs_testlib.blockchain import RPCClient
from neofs_testlib.hosting import Host, Hosting
from neofs_testlib.hosting.config import ServiceConfig
from test_control import wait_for_success
@dataclass
class NodeBase:
"""
Represents a node of some underlying service
"""
id: str
name: str
host: Host
def __init__(self, id, name, host) -> None:
self.id = id
self.name = name
self.host = host
self.construct()
def construct(self):
pass
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return id(self.name)
def __str__(self):
return self.label
def __repr__(self) -> str:
return self.label
@property
def label(self) -> str:
return self.name
@wait_for_success(60, 1)
def start_service(self):
self.host.start_service(self.name)
@wait_for_success(60, 1)
def stop_service(self):
self.host.stop_service(self.name)
def get_wallet_password(self) -> str:
return self._get_attribute(_ConfigAttributes.WALLET_PASSWORD)
def get_wallet_path(self) -> str:
return self._get_attribute(
_ConfigAttributes.LOCAL_WALLET_PATH,
_ConfigAttributes.WALLET_PATH,
)
def get_wallet_config_path(self):
return self._get_attribute(
_ConfigAttributes.LOCAL_WALLET_CONFIG,
_ConfigAttributes.WALLET_CONFIG,
)
def get_wallet_public_key(self):
storage_wallet_path = self.get_wallet_path()
storage_wallet_pass = self.get_wallet_password()
return data_formatters.get_wallet_public_key(storage_wallet_path, storage_wallet_pass)
def _get_attribute(self, attribute_name: str, default_attribute_name: str = None) -> list[str]:
config = self.host.get_service_config(self.name)
if default_attribute_name:
return config.attributes.get(
attribute_name, config.attributes.get(default_attribute_name)
)
else:
return config.attributes.get(attribute_name)
def _get_service_config(self) -> ServiceConfig:
return self.host.get_service_config(self.name)
class InnerRingNode(NodeBase):
"""
Class represents inner ring node in a cluster
Inner ring node is not always the same as physical host (or physical node, if you will):
It can be service running in a container or on physical host
For testing perspective, it's not relevant how it is actually running,
since neofs network will still treat it as "node"
"""
pass
class S3Gate(NodeBase):
"""
Class represents S3 gateway in a cluster
"""
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class HTTPGate(NodeBase):
"""
Class represents HTTP gateway in a cluster
"""
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class MorphChain(NodeBase):
"""
Class represents side-chain aka morph-chain consensus node in a cluster
Consensus node is not always the same as physical host (or physical node, if you will):
It can be service running in a container or on physical host
For testing perspective, it's not relevant how it is actually running,
since neofs network will still treat it as "node"
"""
rpc_client: RPCClient = None
def construct(self):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class MainChain(NodeBase):
"""
Class represents main-chain consensus node in a cluster
Consensus node is not always the same as physical host:
It can be service running in a container or on physical host (or physical node, if you will):
For testing perspective, it's not relevant how it is actually running,
since neofs network will still treat it as "node"
"""
rpc_client: RPCClient = None
def construct(self):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class StorageNode(NodeBase):
"""
Class represents storage node in a storage cluster
Storage node is not always the same as physical host:
It can be service running in a container or on physical host (or physical node, if you will):
For testing perspective, it's not relevant how it is actually running,
since neofs network will still treat it as "node"
"""
def get_rpc_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.RPC_ENDPOINT)
def get_control_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.CONTROL_ENDPOINT)
def get_un_locode(self):
return self._get_attribute(_ConfigAttributes.UN_LOCODE)
@property
def label(self) -> str:
return f"{self.name}: {self.get_rpc_endpoint()}"
class Cluster:
"""
This class represents a Cluster object for the whole storage based on provided hosting
"""
default_rpc_endpoint: str
default_s3_gate_endpoint: str
def __init__(self, hosting: Hosting) -> None:
self._hosting = hosting
self.default_rpc_endpoint = self.storage_nodes[0].get_rpc_endpoint()
self.default_s3_gate_endpoint = self.s3gates[0].get_endpoint()
self.default_http_gate_endpoint = self.http_gates[0].get_endpoint()
@property
def hosts(self) -> list[Host]:
"""
Returns list of Hosts
"""
return self._hosting.hosts
@property
def hosting(self) -> Hosting:
return self._hosting
@property
def storage_nodes(self) -> list[StorageNode]:
"""
Returns list of Storage Nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.STORAGE)
@property
def s3gates(self) -> list[S3Gate]:
"""
Returns list of S3 gates
"""
return self._get_nodes(_ServicesNames.S3_GATE)
@property
def http_gates(self) -> list[S3Gate]:
"""
Returns list of HTTP gates
"""
return self._get_nodes(_ServicesNames.HTTP_GATE)
@property
def morph_chain_nodes(self) -> list[MorphChain]:
"""
Returns list of morph-chain consensus nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.MORPH_CHAIN)
@property
def main_chain_nodes(self) -> list[MainChain]:
"""
Returns list of main-chain consensus nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.MAIN_CHAIN)
@property
def ir_nodes(self) -> list[InnerRingNode]:
"""
Returns list of inner-ring nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.INNER_RING)
def _get_nodes(self, service_name) -> list[StorageNode]:
configs = self.hosting.find_service_configs(f"{service_name}\d*$")
class_mapping: dict[str, Any] = {
_ServicesNames.STORAGE: StorageNode,
_ServicesNames.INNER_RING: InnerRingNode,
_ServicesNames.MORPH_CHAIN: MorphChain,
_ServicesNames.S3_GATE: S3Gate,
_ServicesNames.HTTP_GATE: HTTPGate,
_ServicesNames.MAIN_CHAIN: MainChain,
}
cls = class_mapping.get(service_name)
return [
cls(
self._get_id(config.name),
config.name,
self.hosting.get_host_by_service(config.name),
)
for config in configs
]
def _get_id(self, node_name) -> str:
pattern = "\d*$"
matches = re.search(pattern, node_name)
if matches:
return int(matches.group())
def get_random_storage_rpc_endpoint(self) -> str:
return random.choice(self.get_storage_rpc_endpoints())
def get_storage_rpc_endpoints(self) -> list[str]:
nodes = self.storage_nodes
return [node.get_rpc_endpoint() for node in nodes]
def get_morph_endpoints(self) -> list[str]:
nodes = self.morph_chain_nodes
return [node.get_endpoint() for node in nodes]
class _ServicesNames:
STORAGE = "s"
S3_GATE = "s3-gate"
HTTP_GATE = "http-gate"
MORPH_CHAIN = "morph-chain"
INNER_RING = "ir"
MAIN_CHAIN = "main-chain"
class _ConfigAttributes:
WALLET_PASSWORD = "wallet_password"
WALLET_PATH = "wallet_path"
WALLET_CONFIG = "wallet_config"
LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_config_path"
RPC_ENDPOINT = "rpc_endpoint"
ENDPOINT = "endpoint"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"

View file

@ -2,9 +2,10 @@ from dataclasses import dataclass
from typing import Optional from typing import Optional
import allure import allure
from cluster import Cluster
from file_helper import generate_file, get_file_hash from file_helper import generate_file, get_file_hash
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from neofs_verbs import put_object from neofs_verbs import put_object_to_random_node
from storage_object import StorageObjectInfo from storage_object import StorageObjectInfo
from wallet import WalletFile from wallet import WalletFile
@ -16,9 +17,15 @@ class StorageContainerInfo:
class StorageContainer: class StorageContainer:
def __init__(self, storage_container_info: StorageContainerInfo, shell: Shell) -> None: def __init__(
self,
storage_container_info: StorageContainerInfo,
shell: Shell,
cluster: Cluster,
) -> None:
self.shell = shell self.shell = shell
self.storage_container_info = storage_container_info self.storage_container_info = storage_container_info
self.cluster = cluster
def get_id(self) -> str: def get_id(self) -> str:
return self.storage_container_info.id return self.storage_container_info.id
@ -36,12 +43,13 @@ class StorageContainer:
wallet_path = self.get_wallet_path() wallet_path = self.get_wallet_path()
with allure.step(f"Put object with size {size} to container {container_id}"): with allure.step(f"Put object with size {size} to container {container_id}"):
object_id = put_object( object_id = put_object_to_random_node(
wallet=wallet_path, wallet=wallet_path,
path=file_path, path=file_path,
cid=container_id, cid=container_id,
expire_at=expire_at, expire_at=expire_at,
shell=self.shell, shell=self.shell,
cluster=self.cluster,
) )
storage_object = StorageObjectInfo( storage_object = StorageObjectInfo(

View file

@ -3,6 +3,7 @@ import uuid
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional from typing import Optional
from cluster import Cluster
from common import FREE_STORAGE, WALLET_PASS from common import FREE_STORAGE, WALLET_PASS
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from neofs_testlib.utils.wallet import get_last_address_from_wallet, init_wallet from neofs_testlib.utils.wallet import get_last_address_from_wallet, init_wallet
@ -25,9 +26,10 @@ class WalletFile:
class WalletFactory: class WalletFactory:
def __init__(self, wallets_dir: str, shell: Shell) -> None: def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None:
self.shell = shell self.shell = shell
self.wallets_dir = wallets_dir self.wallets_dir = wallets_dir
self.cluster = cluster
def create_wallet(self, password: str = WALLET_PASS) -> WalletFile: def create_wallet(self, password: str = WALLET_PASS) -> WalletFile:
""" """
@ -40,17 +42,21 @@ class WalletFactory:
""" """
wallet_path = os.path.join(self.wallets_dir, f"{str(uuid.uuid4())}.json") wallet_path = os.path.join(self.wallets_dir, f"{str(uuid.uuid4())}.json")
init_wallet(wallet_path, password) init_wallet(wallet_path, password)
if not FREE_STORAGE: if not FREE_STORAGE:
main_chain = self.cluster.main_chain_nodes[0]
deposit = 30 deposit = 30
transfer_gas( transfer_gas(
shell=self.shell, shell=self.shell,
amount=deposit + 1, amount=deposit + 1,
main_chain=main_chain,
wallet_to_path=wallet_path, wallet_to_path=wallet_path,
wallet_to_password=password, wallet_to_password=password,
) )
deposit_gas( deposit_gas(
shell=self.shell, shell=self.shell,
amount=deposit, amount=deposit,
main_chain=main_chain,
wallet_from_path=wallet_path, wallet_from_path=wallet_path,
wallet_from_password=password, wallet_from_password=password,
) )

View file

@ -0,0 +1,25 @@
import epoch
import pytest
from cluster import Cluster
from neofs_testlib.shell import Shell
# To skip adding every mandatory singleton dependency to EACH test function
class ClusterTestBase:
shell: Shell
cluster: Cluster
@pytest.fixture(scope="session", autouse=True)
def fill_mandatory_dependencies(self, cluster: Cluster, client_shell: Shell):
ClusterTestBase.shell = client_shell
ClusterTestBase.cluster = cluster
yield
def tick_epoch(self):
epoch.tick_epoch(self.shell, self.cluster)
def get_epoch(self):
return epoch.get_epoch(self.shell, self.cluster)
def ensure_fresh_epoch(self):
return epoch.ensure_fresh_epoch(self.shell, self.cluster)

View file

@ -3,24 +3,25 @@ import logging
import os import os
import re import re
import uuid import uuid
from typing import Optional from typing import Any, Optional
import allure import allure
import boto3 import boto3
import pytest import pytest
import s3_gate_bucket
import s3_gate_object
import urllib3 import urllib3
from aws_cli_client import AwsCliClient
from botocore.config import Config from botocore.config import Config
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from cli_helpers import _cmd_run, _configure_aws_cli, _run_with_passwd from cli_helpers import _cmd_run, _configure_aws_cli, _run_with_passwd
from common import NEOFS_AUTHMATE_EXEC, NEOFS_ENDPOINT, S3_GATE, S3_GATE_WALLET_PATH from cluster import Cluster
from data_formatters import get_wallet_public_key from cluster_test_base import ClusterTestBase
from neofs_testlib.hosting import Hosting from common import NEOFS_AUTHMATE_EXEC
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from pytest import FixtureRequest
from python_keywords.container import list_containers from python_keywords.container import list_containers
from steps import s3_gate_bucket, s3_gate_object
from steps.aws_cli_client import AwsCliClient
# Disable warnings on self-signed certificate which the # Disable warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env # boto library produces on requests to S3-gate in dev-env
urllib3.disable_warnings() urllib3.disable_warnings()
@ -34,13 +35,15 @@ MAX_REQUEST_ATTEMPTS = 1
RETRY_MODE = "standard" RETRY_MODE = "standard"
class TestS3GateBase: class TestS3GateBase(ClusterTestBase):
s3_client = None s3_client: Any = None
@pytest.fixture(scope="class", autouse=True) @pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Create S3 client") @allure.title("[Class/Autouse]: Create S3 client")
def s3_client(self, prepare_wallet_and_deposit, client_shell: Shell, request, hosting: Hosting): def s3_client(
wallet = prepare_wallet_and_deposit self, default_wallet, client_shell: Shell, request: FixtureRequest, cluster: Cluster
) -> Any:
wallet = default_wallet
s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json" s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json"
policy = None if isinstance(request.param, str) else request.param[1] policy = None if isinstance(request.param, str) else request.param[1]
( (
@ -49,14 +52,20 @@ class TestS3GateBase:
access_key_id, access_key_id,
secret_access_key, secret_access_key,
owner_private_key, owner_private_key,
) = init_s3_credentials(wallet, hosting, s3_bearer_rules_file=s3_bearer_rules_file) ) = init_s3_credentials(wallet, cluster, s3_bearer_rules_file=s3_bearer_rules_file)
containers_list = list_containers(wallet, shell=client_shell) containers_list = list_containers(
wallet, shell=client_shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}" assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
if "aws cli" in request.param: if "aws cli" in request.param:
client = configure_cli_client(access_key_id, secret_access_key) client = configure_cli_client(
access_key_id, secret_access_key, cluster.default_s3_gate_endpoint
)
else: else:
client = configure_boto3_client(access_key_id, secret_access_key) client = configure_boto3_client(
access_key_id, secret_access_key, cluster.default_s3_gate_endpoint
)
TestS3GateBase.s3_client = client TestS3GateBase.s3_client = client
TestS3GateBase.wallet = wallet TestS3GateBase.wallet = wallet
@ -93,27 +102,22 @@ class TestS3GateBase:
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket) s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)
def get_wallet_password(hosting: Hosting, s3_service_name: str) -> str:
service_config = hosting.get_service_config(s3_service_name)
return service_config.attributes.get("wallet_password")
@allure.step("Init S3 Credentials") @allure.step("Init S3 Credentials")
def init_s3_credentials( def init_s3_credentials(
wallet_path: str, wallet_path: str,
hosting: Hosting, cluster: Cluster,
s3_bearer_rules_file: Optional[str] = None, s3_bearer_rules_file: Optional[str] = None,
policy: Optional[dict] = None, policy: Optional[dict] = None,
s3_service_name: str = "s3-gate01",
): ):
bucket = str(uuid.uuid4()) bucket = str(uuid.uuid4())
s3_bearer_rules = s3_bearer_rules_file or "robot/resources/files/s3_bearer_rules.json" s3_bearer_rules = s3_bearer_rules_file or "robot/resources/files/s3_bearer_rules.json"
s3_password = get_wallet_password(hosting, s3_service_name)
gate_public_key = get_wallet_public_key(S3_GATE_WALLET_PATH, s3_password) s3gate_node = cluster.s3gates[0]
gate_public_key = s3gate_node.get_wallet_public_key()
cmd = ( cmd = (
f"{NEOFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} " f"{NEOFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} "
f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} " f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} "
f"--peer {NEOFS_ENDPOINT} --container-friendly-name {bucket} " f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} "
f"--bearer-rules {s3_bearer_rules}" f"--bearer-rules {s3_bearer_rules}"
) )
if policy: if policy:
@ -148,9 +152,9 @@ def init_s3_credentials(
@allure.step("Configure S3 client (boto3)") @allure.step("Configure S3 client (boto3)")
def configure_boto3_client(access_key_id: str, secret_access_key: str): def configure_boto3_client(access_key_id: str, secret_access_key: str, s3gate_endpoint: str):
try: try:
session = boto3.session.Session() session = boto3.Session()
config = Config( config = Config(
retries={ retries={
"max_attempts": MAX_REQUEST_ATTEMPTS, "max_attempts": MAX_REQUEST_ATTEMPTS,
@ -163,7 +167,7 @@ def configure_boto3_client(access_key_id: str, secret_access_key: str):
aws_access_key_id=access_key_id, aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key, aws_secret_access_key=secret_access_key,
config=config, config=config,
endpoint_url=S3_GATE, endpoint_url=s3gate_endpoint,
verify=False, verify=False,
) )
return s3_client return s3_client
@ -175,9 +179,9 @@ def configure_boto3_client(access_key_id: str, secret_access_key: str):
@allure.step("Configure S3 client (aws cli)") @allure.step("Configure S3 client (aws cli)")
def configure_cli_client(access_key_id: str, secret_access_key: str): def configure_cli_client(access_key_id: str, secret_access_key: str, s3gate_endpoint: str):
try: try:
client = AwsCliClient() client = AwsCliClient(s3gate_endpoint)
_configure_aws_cli("aws configure", access_key_id, secret_access_key) _configure_aws_cli("aws configure", access_key_id, secret_access_key)
_cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") _cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}")
_cmd_run(f"aws configure set retry_mode {RETRY_MODE}") _cmd_run(f"aws configure set retry_mode {RETRY_MODE}")

View file

@ -1,18 +1,16 @@
import logging import logging
import os import os
import uuid import uuid
from enum import Enum
from time import sleep from time import sleep
from typing import Optional from typing import Optional
import allure import allure
import pytest import pytest
import urllib3 import urllib3
from aws_cli_client import AwsCliClient
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from cli_helpers import log_command_execution from cli_helpers import log_command_execution
from s3_gate_bucket import S3_SYNC_WAIT_TIME
from steps.aws_cli_client import AwsCliClient
from steps.s3_gate_bucket import S3_SYNC_WAIT_TIME
########################################################## ##########################################################
# Disabling warnings on self-signed certificate which the # Disabling warnings on self-signed certificate which the

View file

@ -9,7 +9,7 @@ from typing import Any, Optional
import allure import allure
import json_transformers import json_transformers
from common import ASSETS_DIR, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG from common import ASSETS_DIR, NEOFS_CLI_EXEC, WALLET_CONFIG
from data_formatters import get_wallet_public_key from data_formatters import get_wallet_public_key
from json_transformers import encode_for_json from json_transformers import encode_for_json
from neofs_testlib.cli import NeofsCli from neofs_testlib.cli import NeofsCli
@ -180,7 +180,7 @@ def generate_object_session_token(
) )
@allure.step("Get signed token for object session") @allure.step("Get signed token for container session")
def get_container_signed_token( def get_container_signed_token(
owner_wallet: WalletFile, owner_wallet: WalletFile,
user_wallet: WalletFile, user_wallet: WalletFile,
@ -190,7 +190,7 @@ def get_container_signed_token(
lifetime: Optional[Lifetime] = None, lifetime: Optional[Lifetime] = None,
) -> str: ) -> str:
""" """
Returns signed token file path for static object session Returns signed token file path for static container session
""" """
session_token_file = generate_container_session_token( session_token_file = generate_container_session_token(
owner_wallet=owner_wallet, owner_wallet=owner_wallet,
@ -235,7 +235,7 @@ def create_session_token(
owner: str, owner: str,
wallet_path: str, wallet_path: str,
wallet_password: str, wallet_password: str,
rpc_endpoint: str = NEOFS_ENDPOINT, rpc_endpoint: str,
) -> str: ) -> str:
""" """
Create session token for an object. Create session token for an object.

View file

@ -3,6 +3,7 @@ from time import sleep
import allure import allure
import pytest import pytest
from cluster import Cluster
from epoch import tick_epoch from epoch import tick_epoch
from grpc_responses import OBJECT_ALREADY_REMOVED from grpc_responses import OBJECT_ALREADY_REMOVED
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
@ -16,7 +17,9 @@ CLEANUP_TIMEOUT = 10
@allure.step("Delete Objects") @allure.step("Delete Objects")
def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> None: def delete_objects(
storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster
) -> None:
""" """
Deletes given storage objects. Deletes given storage objects.
@ -28,7 +31,11 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No
with allure.step("Delete objects"): with allure.step("Delete objects"):
for storage_object in storage_objects: for storage_object in storage_objects:
storage_object.tombstone = delete_object( storage_object.tombstone = delete_object(
storage_object.wallet_file_path, storage_object.cid, storage_object.oid, shell storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
) )
verify_head_tombstone( verify_head_tombstone(
wallet_path=storage_object.wallet_file_path, wallet_path=storage_object.wallet_file_path,
@ -36,9 +43,10 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No
oid_ts=storage_object.tombstone, oid_ts=storage_object.tombstone,
oid=storage_object.oid, oid=storage_object.oid,
shell=shell, shell=shell,
endpoint=cluster.default_rpc_endpoint,
) )
tick_epoch(shell=shell) tick_epoch(shell, cluster)
sleep(CLEANUP_TIMEOUT) sleep(CLEANUP_TIMEOUT)
with allure.step("Get objects and check errors"): with allure.step("Get objects and check errors"):
@ -49,4 +57,5 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell=shell, shell=shell,
endpoint=cluster.default_rpc_endpoint,
) )

View file

@ -1,24 +1,18 @@
import os import os
import uuid import uuid
from dataclasses import dataclass from dataclasses import dataclass
from typing import Dict, List, Optional from typing import Optional
import allure import allure
import pytest import pytest
from common import ( from cluster import Cluster
ASSETS_DIR, from common import WALLET_CONFIG, WALLET_PASS
IR_WALLET_CONFIG,
IR_WALLET_PATH,
STORAGE_WALLET_CONFIG,
STORAGE_WALLET_PATH,
WALLET_CONFIG,
WALLET_PASS,
)
from file_helper import generate_file from file_helper import generate_file
from neofs_testlib.shell import Shell
from neofs_testlib.utils.wallet import init_wallet from neofs_testlib.utils.wallet import init_wallet
from python_keywords.acl import EACLRole from python_keywords.acl import EACLRole
from python_keywords.container import create_container from python_keywords.container import create_container
from python_keywords.neofs_verbs import put_object from python_keywords.neofs_verbs import put_object_to_random_node
from wellknown_acl import PUBLIC_ACL from wellknown_acl import PUBLIC_ACL
OBJECT_COUNT = 5 OBJECT_COUNT = 5
@ -32,35 +26,42 @@ class Wallet:
@dataclass @dataclass
class Wallets: class Wallets:
wallets: Dict[EACLRole, List[Wallet]] wallets: dict[EACLRole, list[Wallet]]
def get_wallet(self, role: EACLRole = EACLRole.USER) -> Wallet: def get_wallet(self, role: EACLRole = EACLRole.USER) -> Wallet:
return self.wallets[role][0] return self.wallets[role][0]
def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> List[Wallet]: def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> list[Wallet]:
return self.wallets[role] return self.wallets[role]
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def wallets(prepare_wallet_and_deposit): def wallets(default_wallet, temp_directory, cluster: Cluster) -> Wallets:
other_wallets_paths = [ other_wallets_paths = [
os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") for _ in range(2) os.path.join(temp_directory, f"{str(uuid.uuid4())}.json") for _ in range(2)
] ]
for other_wallet_path in other_wallets_paths: for other_wallet_path in other_wallets_paths:
init_wallet(other_wallet_path, WALLET_PASS) init_wallet(other_wallet_path, WALLET_PASS)
ir_node = cluster.ir_nodes[0]
storage_node = cluster.storage_nodes[0]
ir_wallet_path = ir_node.get_wallet_path()
ir_wallet_config = ir_node.get_wallet_config_path()
storage_wallet_path = storage_node.get_wallet_path()
storage_wallet_config = storage_node.get_wallet_config_path()
yield Wallets( yield Wallets(
wallets={ wallets={
EACLRole.USER: [ EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=WALLET_CONFIG)],
Wallet(wallet_path=prepare_wallet_and_deposit, config_path=WALLET_CONFIG)
],
EACLRole.OTHERS: [ EACLRole.OTHERS: [
Wallet(wallet_path=other_wallet_path, config_path=WALLET_CONFIG) Wallet(wallet_path=other_wallet_path, config_path=WALLET_CONFIG)
for other_wallet_path in other_wallets_paths for other_wallet_path in other_wallets_paths
], ],
EACLRole.SYSTEM: [ EACLRole.SYSTEM: [
Wallet(wallet_path=IR_WALLET_PATH, config_path=IR_WALLET_CONFIG), Wallet(wallet_path=ir_wallet_path, config_path=ir_wallet_config),
Wallet(wallet_path=STORAGE_WALLET_PATH, config_path=STORAGE_WALLET_CONFIG), Wallet(wallet_path=storage_wallet_path, config_path=storage_wallet_config),
], ],
} }
) )
@ -72,19 +73,27 @@ def file_path():
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def eacl_container_with_objects(wallets, client_shell, file_path): def eacl_container_with_objects(
wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str
):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"): with allure.step("Create eACL public container"):
cid = create_container(user_wallet.wallet_path, basic_acl=PUBLIC_ACL, shell=client_shell) cid = create_container(
user_wallet.wallet_path,
basic_acl=PUBLIC_ACL,
shell=client_shell,
endpoint=cluster.default_rpc_endpoint,
)
with allure.step("Add test objects to container"): with allure.step("Add test objects to container"):
objects_oids = [ objects_oids = [
put_object( put_object_to_random_node(
user_wallet.wallet_path, user_wallet.wallet_path,
file_path, file_path,
cid, cid,
attributes={"key1": "val1", "key": val, "key2": "abc"}, attributes={"key1": "val1", "key": val, "key2": "abc"},
shell=client_shell, shell=client_shell,
cluster=cluster,
) )
for val in range(OBJECT_COUNT) for val in range(OBJECT_COUNT)
] ]

View file

@ -5,20 +5,10 @@ from typing import Optional
import allure import allure
import pytest import pytest
from common import ( from cluster_test_base import ClusterTestBase
ASSETS_DIR, from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE, WALLET_PASS
COMPLEX_OBJ_SIZE,
FREE_STORAGE,
IR_WALLET_CONFIG,
IR_WALLET_PASS,
IR_WALLET_PATH,
SIMPLE_OBJ_SIZE,
WALLET_PASS,
)
from epoch import tick_epoch
from file_helper import generate_file from file_helper import generate_file
from grpc_responses import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND from grpc_responses import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
from neofs_testlib.shell import Shell
from neofs_testlib.utils.wallet import init_wallet from neofs_testlib.utils.wallet import init_wallet
from python_keywords.acl import ( from python_keywords.acl import (
EACLAccess, EACLAccess,
@ -30,7 +20,7 @@ from python_keywords.acl import (
set_eacl, set_eacl,
) )
from python_keywords.container import create_container from python_keywords.container import create_container
from python_keywords.neofs_verbs import put_object from python_keywords.neofs_verbs import put_object_to_random_node
from python_keywords.payment_neogo import deposit_gas, transfer_gas from python_keywords.payment_neogo import deposit_gas, transfer_gas
from python_keywords.storage_group import ( from python_keywords.storage_group import (
delete_storagegroup, delete_storagegroup,
@ -53,53 +43,59 @@ deposit = 30
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.storage_group @pytest.mark.storage_group
class TestStorageGroup: class TestStorageGroup(ClusterTestBase):
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def prepare_two_wallets(self, prepare_wallet_and_deposit, client_shell): def prepare_two_wallets(self, default_wallet):
self.main_wallet = prepare_wallet_and_deposit self.main_wallet = default_wallet
self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
init_wallet(self.other_wallet, WALLET_PASS) init_wallet(self.other_wallet, WALLET_PASS)
if not FREE_STORAGE: if not FREE_STORAGE:
main_chain = self.cluster.main_chain_nodes[0]
deposit = 30 deposit = 30
transfer_gas( transfer_gas(
shell=client_shell, shell=self.shell,
amount=deposit + 1, amount=deposit + 1,
main_chain=main_chain,
wallet_to_path=self.other_wallet, wallet_to_path=self.other_wallet,
wallet_to_password=WALLET_PASS, wallet_to_password=WALLET_PASS,
) )
deposit_gas( deposit_gas(
shell=client_shell, shell=self.shell,
amount=deposit, amount=deposit,
main_chain=main_chain,
wallet_from_path=self.other_wallet, wallet_from_path=self.other_wallet,
wallet_from_password=WALLET_PASS, wallet_from_password=WALLET_PASS,
) )
@allure.title("Test Storage Group in Private Container") @allure.title("Test Storage Group in Private Container")
def test_storagegroup_basic_private_container(self, client_shell, object_size): def test_storagegroup_basic_private_container(self, object_size):
cid = create_container(self.main_wallet, shell=client_shell) cid = create_container(
self.main_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
file_path = generate_file(object_size) file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) oid = put_object_to_random_node(self.main_wallet, file_path, cid, self.shell, self.cluster)
objects = [oid] objects = [oid]
storage_group = put_storagegroup( storage_group = put_storagegroup(
shell=client_shell, wallet=self.main_wallet, cid=cid, objects=objects shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=self.main_wallet,
cid=cid,
objects=objects,
) )
self.expect_success_for_storagegroup_operations( self.expect_success_for_storagegroup_operations(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
object_size=object_size, object_size=object_size,
) )
self.expect_failure_for_storagegroup_operations( self.expect_failure_for_storagegroup_operations(
shell=client_shell,
wallet=self.other_wallet, wallet=self.other_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
gid=storage_group, gid=storage_group,
) )
self.storagegroup_operations_by_system_ro_container( self.storagegroup_operations_by_system_ro_container(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
@ -107,27 +103,31 @@ class TestStorageGroup:
) )
@allure.title("Test Storage Group in Public Container") @allure.title("Test Storage Group in Public Container")
def test_storagegroup_basic_public_container(self, client_shell, object_size): def test_storagegroup_basic_public_container(self, object_size):
cid = create_container(self.main_wallet, basic_acl="public-read-write", shell=client_shell) cid = create_container(
self.main_wallet,
basic_acl="public-read-write",
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
file_path = generate_file(object_size) file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) oid = put_object_to_random_node(
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
objects = [oid] objects = [oid]
self.expect_success_for_storagegroup_operations( self.expect_success_for_storagegroup_operations(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
object_size=object_size, object_size=object_size,
) )
self.expect_success_for_storagegroup_operations( self.expect_success_for_storagegroup_operations(
shell=client_shell,
wallet=self.other_wallet, wallet=self.other_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
object_size=object_size, object_size=object_size,
) )
self.storagegroup_operations_by_system_ro_container( self.storagegroup_operations_by_system_ro_container(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
@ -135,20 +135,25 @@ class TestStorageGroup:
) )
@allure.title("Test Storage Group in Read-Only Container") @allure.title("Test Storage Group in Read-Only Container")
def test_storagegroup_basic_ro_container(self, client_shell, object_size): def test_storagegroup_basic_ro_container(self, object_size):
cid = create_container(self.main_wallet, basic_acl="public-read", shell=client_shell) cid = create_container(
self.main_wallet,
basic_acl="public-read",
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
file_path = generate_file(object_size) file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) oid = put_object_to_random_node(
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
objects = [oid] objects = [oid]
self.expect_success_for_storagegroup_operations( self.expect_success_for_storagegroup_operations(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
object_size=object_size, object_size=object_size,
) )
self.storagegroup_operations_by_other_ro_container( self.storagegroup_operations_by_other_ro_container(
shell=client_shell,
owner_wallet=self.main_wallet, owner_wallet=self.main_wallet,
other_wallet=self.other_wallet, other_wallet=self.other_wallet,
cid=cid, cid=cid,
@ -156,7 +161,6 @@ class TestStorageGroup:
object_size=object_size, object_size=object_size,
) )
self.storagegroup_operations_by_system_ro_container( self.storagegroup_operations_by_system_ro_container(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
@ -164,21 +168,27 @@ class TestStorageGroup:
) )
@allure.title("Test Storage Group with Bearer Allow") @allure.title("Test Storage Group with Bearer Allow")
def test_storagegroup_bearer_allow(self, client_shell, object_size): def test_storagegroup_bearer_allow(self, object_size):
cid = create_container( cid = create_container(
self.main_wallet, basic_acl="eacl-public-read-write", shell=client_shell self.main_wallet,
basic_acl="eacl-public-read-write",
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
file_path = generate_file(object_size) file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) oid = put_object_to_random_node(
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
objects = [oid] objects = [oid]
self.expect_success_for_storagegroup_operations( self.expect_success_for_storagegroup_operations(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
object_size=object_size, object_size=object_size,
) )
storage_group = put_storagegroup(client_shell, self.main_wallet, cid, objects) storage_group = put_storagegroup(
self.shell, self.cluster.default_rpc_endpoint, self.main_wallet, cid, objects
)
eacl_deny = [ eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=role, operation=op) EACLRule(access=EACLAccess.DENY, role=role, operation=op)
for op in EACLOperation for op in EACLOperation
@ -187,11 +197,12 @@ class TestStorageGroup:
set_eacl( set_eacl(
self.main_wallet, self.main_wallet,
cid, cid,
create_eacl(cid, eacl_deny, shell=client_shell), create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
self.expect_failure_for_storagegroup_operations( self.expect_failure_for_storagegroup_operations(
client_shell, self.main_wallet, cid, objects, storage_group self.main_wallet, cid, objects, storage_group
) )
bearer_file = form_bearertoken_file( bearer_file = form_bearertoken_file(
self.main_wallet, self.main_wallet,
@ -200,10 +211,10 @@ class TestStorageGroup:
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER) EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER)
for op in EACLOperation for op in EACLOperation
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
self.expect_success_for_storagegroup_operations( self.expect_success_for_storagegroup_operations(
shell=client_shell,
wallet=self.main_wallet, wallet=self.main_wallet,
cid=cid, cid=cid,
obj_list=objects, obj_list=objects,
@ -212,24 +223,38 @@ class TestStorageGroup:
) )
@allure.title("Test to check Storage Group lifetime") @allure.title("Test to check Storage Group lifetime")
def test_storagegroup_lifetime(self, client_shell, object_size): def test_storagegroup_lifetime(self, object_size):
cid = create_container(self.main_wallet, shell=client_shell) cid = create_container(
self.main_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
file_path = generate_file(object_size) file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell) oid = put_object_to_random_node(
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
objects = [oid] objects = [oid]
storage_group = put_storagegroup(client_shell, self.main_wallet, cid, objects, lifetime=1) storage_group = put_storagegroup(
self.shell,
self.cluster.default_rpc_endpoint,
self.main_wallet,
cid,
objects,
lifetime=1,
)
with allure.step("Tick two epochs"): with allure.step("Tick two epochs"):
for _ in range(2): for _ in range(2):
tick_epoch(shell=client_shell) self.tick_epoch()
with pytest.raises(Exception, match=OBJECT_NOT_FOUND): with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_storagegroup( get_storagegroup(
shell=client_shell, wallet=self.main_wallet, cid=cid, gid=storage_group shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=self.main_wallet,
cid=cid,
gid=storage_group,
) )
@staticmethod
@allure.step("Run Storage Group Operations And Expect Success") @allure.step("Run Storage Group Operations And Expect Success")
def expect_success_for_storagegroup_operations( def expect_success_for_storagegroup_operations(
shell: Shell, self,
wallet: str, wallet: str,
cid: str, cid: str,
obj_list: list, obj_list: list,
@ -241,12 +266,20 @@ class TestStorageGroup:
Put, List, Get and Delete the Storage Group which contains Put, List, Get and Delete the Storage Group which contains
the Object. the Object.
""" """
storage_group = put_storagegroup(shell, wallet, cid, obj_list, bearer) storage_group = put_storagegroup(
self.shell, self.cluster.default_rpc_endpoint, wallet, cid, obj_list, bearer
)
verify_list_storage_group( verify_list_storage_group(
shell=shell, wallet=wallet, cid=cid, gid=storage_group, bearer=bearer shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=wallet,
cid=cid,
gid=storage_group,
bearer=bearer,
) )
verify_get_storage_group( verify_get_storage_group(
shell=shell, shell=self.shell,
cluster=self.cluster,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
gid=storage_group, gid=storage_group,
@ -254,12 +287,18 @@ class TestStorageGroup:
object_size=object_size, object_size=object_size,
bearer=bearer, bearer=bearer,
) )
delete_storagegroup(shell=shell, wallet=wallet, cid=cid, gid=storage_group, bearer=bearer) delete_storagegroup(
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=wallet,
cid=cid,
gid=storage_group,
bearer=bearer,
)
@staticmethod
@allure.step("Run Storage Group Operations And Expect Failure") @allure.step("Run Storage Group Operations And Expect Failure")
def expect_failure_for_storagegroup_operations( def expect_failure_for_storagegroup_operations(
shell: Shell, wallet: str, cid: str, obj_list: list, gid: str self, wallet: str, cid: str, obj_list: list, gid: str
): ):
""" """
This func verifies if the Object's owner isn't allowed to This func verifies if the Object's owner isn't allowed to
@ -267,30 +306,64 @@ class TestStorageGroup:
the Object. the Object.
""" """
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_storagegroup(shell=shell, wallet=wallet, cid=cid, objects=obj_list) put_storagegroup(
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=wallet,
cid=cid,
objects=obj_list,
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
list_storagegroup(shell=shell, wallet=wallet, cid=cid) list_storagegroup(
shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, wallet=wallet, cid=cid
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
get_storagegroup(shell=shell, wallet=wallet, cid=cid, gid=gid) get_storagegroup(
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=wallet,
cid=cid,
gid=gid,
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_storagegroup(shell=shell, wallet=wallet, cid=cid, gid=gid) delete_storagegroup(
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=wallet,
cid=cid,
gid=gid,
)
@staticmethod
@allure.step("Run Storage Group Operations On Other's Behalf In RO Container") @allure.step("Run Storage Group Operations On Other's Behalf In RO Container")
def storagegroup_operations_by_other_ro_container( def storagegroup_operations_by_other_ro_container(
shell: Shell, self,
owner_wallet: str, owner_wallet: str,
other_wallet: str, other_wallet: str,
cid: str, cid: str,
obj_list: list, obj_list: list,
object_size: int, object_size: int,
): ):
storage_group = put_storagegroup(shell, owner_wallet, cid, obj_list) storage_group = put_storagegroup(
self.shell, self.cluster.default_rpc_endpoint, owner_wallet, cid, obj_list
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_storagegroup(shell=shell, wallet=other_wallet, cid=cid, objects=obj_list) put_storagegroup(
verify_list_storage_group(shell=shell, wallet=other_wallet, cid=cid, gid=storage_group) shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=other_wallet,
cid=cid,
objects=obj_list,
)
verify_list_storage_group(
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=other_wallet,
cid=cid,
gid=storage_group,
)
verify_get_storage_group( verify_get_storage_group(
shell=shell, shell=self.shell,
cluster=self.cluster,
wallet=other_wallet, wallet=other_wallet,
cid=cid, cid=cid,
gid=storage_group, gid=storage_group,
@ -298,56 +371,81 @@ class TestStorageGroup:
object_size=object_size, object_size=object_size,
) )
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_storagegroup(shell=shell, wallet=other_wallet, cid=cid, gid=storage_group) delete_storagegroup(
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wallet=other_wallet,
cid=cid,
gid=storage_group,
)
@staticmethod
@allure.step("Run Storage Group Operations On Systems's Behalf In RO Container") @allure.step("Run Storage Group Operations On Systems's Behalf In RO Container")
def storagegroup_operations_by_system_ro_container( def storagegroup_operations_by_system_ro_container(
shell: Shell, wallet: str, cid: str, obj_list: list, object_size: int self, wallet: str, cid: str, obj_list: list, object_size: int
): ):
""" """
In this func we create a Storage Group on Inner Ring's key behalf In this func we create a Storage Group on Inner Ring's key behalf
and include an Object created on behalf of some user. We expect and include an Object created on behalf of some user. We expect
that System key is granted to make all operations except PUT and DELETE. that System key is granted to make all operations except PUT and DELETE.
""" """
ir_node = self.cluster.ir_nodes[0]
ir_wallet_path = ir_node.get_wallet_path()
ir_wallet_password = ir_node.get_wallet_password()
ir_wallet_config = ir_node.get_wallet_config_path()
if not FREE_STORAGE: if not FREE_STORAGE:
main_chain = self.cluster.main_chain_nodes[0]
deposit = 30 deposit = 30
transfer_gas( transfer_gas(
shell=shell, shell=self.shell,
amount=deposit + 1, amount=deposit + 1,
wallet_to_path=IR_WALLET_PATH, main_chain=main_chain,
wallet_to_password=IR_WALLET_PASS, wallet_to_path=ir_wallet_path,
wallet_to_password=ir_wallet_password,
) )
deposit_gas( deposit_gas(
shell=shell, shell=self.shell,
amount=deposit, amount=deposit,
wallet_from_path=IR_WALLET_PATH, main_chain=main_chain,
wallet_from_password=IR_WALLET_PASS, wallet_from_path=ir_wallet_path,
wallet_from_password=ir_wallet_password,
) )
storage_group = put_storagegroup(shell, wallet, cid, obj_list) storage_group = put_storagegroup(
self.shell, self.cluster.default_rpc_endpoint, wallet, cid, obj_list
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_storagegroup(shell, IR_WALLET_PATH, cid, obj_list, wallet_config=IR_WALLET_CONFIG) put_storagegroup(
self.shell,
self.cluster.default_rpc_endpoint,
ir_wallet_path,
cid,
obj_list,
wallet_config=ir_wallet_config,
)
verify_list_storage_group( verify_list_storage_group(
shell=shell, shell=self.shell,
wallet=IR_WALLET_PATH, endpoint=self.cluster.default_rpc_endpoint,
wallet=ir_wallet_path,
cid=cid, cid=cid,
gid=storage_group, gid=storage_group,
wallet_config=IR_WALLET_CONFIG, wallet_config=ir_wallet_config,
) )
verify_get_storage_group( verify_get_storage_group(
shell=shell, shell=self.shell,
wallet=IR_WALLET_PATH, cluster=self.cluster,
wallet=ir_wallet_path,
cid=cid, cid=cid,
gid=storage_group, gid=storage_group,
obj_list=obj_list, obj_list=obj_list,
object_size=object_size, object_size=object_size,
wallet_config=IR_WALLET_CONFIG, wallet_config=ir_wallet_config,
) )
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED): with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_storagegroup( delete_storagegroup(
shell=shell, shell=self.shell,
wallet=IR_WALLET_PATH, endpoint=self.cluster.default_rpc_endpoint,
wallet=ir_wallet_path,
cid=cid, cid=cid,
gid=storage_group, gid=storage_group,
wallet_config=IR_WALLET_CONFIG, wallet_config=ir_wallet_config,
) )

View file

@ -1,5 +1,6 @@
import allure import allure
import pytest import pytest
from cluster_test_base import ClusterTestBase
from python_keywords.acl import EACLRole from python_keywords.acl import EACLRole
from python_keywords.container import create_container from python_keywords.container import create_container
from python_keywords.container_access import ( from python_keywords.container_access import (
@ -7,7 +8,7 @@ from python_keywords.container_access import (
check_no_access_to_container, check_no_access_to_container,
check_read_only_container, check_read_only_container,
) )
from python_keywords.neofs_verbs import put_object from python_keywords.neofs_verbs import put_object_to_random_node
from wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F from wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
@ -15,13 +16,16 @@ from wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
@pytest.mark.smoke @pytest.mark.smoke
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.acl_basic @pytest.mark.acl_basic
class TestACLBasic: class TestACLBasic(ClusterTestBase):
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def public_container(self, client_shell, wallets): def public_container(self, wallets):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
with allure.step("Create public container"): with allure.step("Create public container"):
cid_public = create_container( cid_public = create_container(
user_wallet.wallet_path, basic_acl=PUBLIC_ACL_F, shell=client_shell user_wallet.wallet_path,
basic_acl=PUBLIC_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
yield cid_public yield cid_public
@ -30,11 +34,14 @@ class TestACLBasic:
# delete_container(user_wallet.wallet_path, cid_public) # delete_container(user_wallet.wallet_path, cid_public)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def private_container(self, client_shell, wallets): def private_container(self, wallets):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
with allure.step("Create private container"): with allure.step("Create private container"):
cid_private = create_container( cid_private = create_container(
user_wallet.wallet_path, basic_acl=PRIVATE_ACL_F, shell=client_shell user_wallet.wallet_path,
basic_acl=PRIVATE_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
yield cid_private yield cid_private
@ -43,11 +50,14 @@ class TestACLBasic:
# delete_container(user_wallet.wallet_path, cid_private) # delete_container(user_wallet.wallet_path, cid_private)
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def read_only_container(self, client_shell, wallets): def read_only_container(self, wallets):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
with allure.step("Create public readonly container"): with allure.step("Create public readonly container"):
cid_read_only = create_container( cid_read_only = create_container(
user_wallet.wallet_path, basic_acl=READONLY_ACL_F, shell=client_shell user_wallet.wallet_path,
basic_acl=READONLY_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
yield cid_read_only yield cid_read_only
@ -56,7 +66,7 @@ class TestACLBasic:
# delete_container(user_wallet.wallet_path, cid_read_only) # delete_container(user_wallet.wallet_path, cid_read_only)
@allure.title("Test basic ACL on public container") @allure.title("Test basic ACL on public container")
def test_basic_acl_public(self, wallets, client_shell, public_container, file_path): def test_basic_acl_public(self, wallets, public_container, file_path):
""" """
Test basic ACL set during public container creation. Test basic ACL set during public container creation.
""" """
@ -67,30 +77,42 @@ class TestACLBasic:
with allure.step("Add test objects to container"): with allure.step("Add test objects to container"):
# We create new objects for each wallet because check_full_access_to_container # We create new objects for each wallet because check_full_access_to_container
# deletes the object # deletes the object
owner_object_oid = put_object( owner_object_oid = put_object_to_random_node(
user_wallet.wallet_path, user_wallet.wallet_path,
file_path, file_path,
cid, cid,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes={"created": "owner"}, attributes={"created": "owner"},
) )
other_object_oid = put_object( other_object_oid = put_object_to_random_node(
other_wallet.wallet_path, other_wallet.wallet_path,
file_path, file_path,
cid, cid,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes={"created": "other"}, attributes={"created": "other"},
) )
with allure.step(f"Check {desc} has full access to public container"): with allure.step(f"Check {desc} has full access to public container"):
check_full_access_to_container( check_full_access_to_container(
wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell wallet.wallet_path,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
) )
check_full_access_to_container( check_full_access_to_container(
wallet.wallet_path, cid, other_object_oid, file_path, shell=client_shell wallet.wallet_path,
cid,
other_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
) )
@allure.title("Test basic ACL on private container") @allure.title("Test basic ACL on private container")
def test_basic_acl_private(self, wallets, client_shell, private_container, file_path): def test_basic_acl_private(self, wallets, private_container, file_path):
""" """
Test basic ACL set during private container creation. Test basic ACL set during private container creation.
""" """
@ -98,19 +120,29 @@ class TestACLBasic:
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = private_container cid = private_container
with allure.step("Add test objects to container"): with allure.step("Add test objects to container"):
owner_object_oid = put_object( owner_object_oid = put_object_to_random_node(
user_wallet.wallet_path, file_path, cid, shell=client_shell user_wallet.wallet_path, file_path, cid, shell=self.shell, cluster=self.cluster
) )
with allure.step("Check only owner has full access to private container"): with allure.step("Check only owner has full access to private container"):
with allure.step("Check no one except owner has access to operations with container"): with allure.step("Check no one except owner has access to operations with container"):
check_no_access_to_container( check_no_access_to_container(
other_wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell other_wallet.wallet_path,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
) )
with allure.step("Check owner has full access to private container"): with allure.step("Check owner has full access to private container"):
check_full_access_to_container( check_full_access_to_container(
user_wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell user_wallet.wallet_path,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
) )
@allure.title("Test basic ACL on readonly container") @allure.title("Test basic ACL on readonly container")
@ -123,14 +155,26 @@ class TestACLBasic:
cid = read_only_container cid = read_only_container
with allure.step("Add test objects to container"): with allure.step("Add test objects to container"):
object_oid = put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell) object_oid = put_object_to_random_node(
user_wallet.wallet_path, file_path, cid, shell=client_shell, cluster=self.cluster
)
with allure.step("Check other has read-only access to operations with container"): with allure.step("Check other has read-only access to operations with container"):
check_read_only_container( check_read_only_container(
other_wallet.wallet_path, cid, object_oid, file_path, shell=client_shell other_wallet.wallet_path,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
) )
with allure.step("Check owner has full access to public container"): with allure.step("Check owner has full access to public container"):
check_full_access_to_container( check_full_access_to_container(
user_wallet.wallet_path, cid, object_oid, file_path, shell=client_shell user_wallet.wallet_path,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
) )

View file

@ -1,5 +1,6 @@
import allure import allure
import pytest import pytest
from cluster_test_base import ClusterTestBase
from python_keywords.acl import ( from python_keywords.acl import (
EACLAccess, EACLAccess,
EACLOperation, EACLOperation,
@ -20,15 +21,14 @@ from python_keywords.container_access import (
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.acl_bearer @pytest.mark.acl_bearer
class TestACLBearer: class TestACLBearer(ClusterTestBase):
@pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS]) @pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS])
def test_bearer_token_operations( def test_bearer_token_operations(self, wallets, eacl_container_with_objects, role):
self, wallets, client_shell, eacl_container_with_objects, role
):
allure.dynamic.title(f"Testcase to validate NeoFS operations with {role.value} BearerToken") allure.dynamic.title(f"Testcase to validate NeoFS operations with {role.value} BearerToken")
cid, objects_oids, file_path = eacl_container_with_objects cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
deny_wallet = wallets.get_wallet(role) deny_wallet = wallets.get_wallet(role)
endpoint = self.cluster.default_rpc_endpoint
with allure.step(f"Check {role.value} has full access to container without bearer token"): with allure.step(f"Check {role.value} has full access to container without bearer token"):
check_full_access_to_container( check_full_access_to_container(
@ -37,15 +37,16 @@ class TestACLBearer:
objects_oids.pop(), objects_oids.pop(),
file_path, file_path,
wallet_config=deny_wallet.config_path, wallet_config=deny_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step(f"Set deny all operations for {role.value} via eACL"): with allure.step(f"Set deny all operations for {role.value} via eACL"):
eacl = [ eacl = [
EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in EACLOperation EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in EACLOperation
] ]
eacl_file = create_eacl(cid, eacl, shell=client_shell) eacl_file = create_eacl(cid, eacl, shell=self.shell)
set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=client_shell) set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=self.shell, endpoint=endpoint)
wait_for_cache_expired() wait_for_cache_expired()
with allure.step(f"Create bearer token for {role.value} with all operations allowed"): with allure.step(f"Create bearer token for {role.value} with all operations allowed"):
@ -56,7 +57,8 @@ class TestACLBearer:
EACLRule(operation=op, access=EACLAccess.ALLOW, role=role) EACLRule(operation=op, access=EACLAccess.ALLOW, role=role)
for op in EACLOperation for op in EACLOperation
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
with allure.step( with allure.step(
@ -68,7 +70,8 @@ class TestACLBearer:
objects_oids.pop(), objects_oids.pop(),
file_path, file_path,
wallet_config=deny_wallet.config_path, wallet_config=deny_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step( with allure.step(
@ -81,15 +84,16 @@ class TestACLBearer:
file_path, file_path,
bearer=bearer, bearer=bearer,
wallet_config=deny_wallet.config_path, wallet_config=deny_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step(f"Set allow all operations for {role.value} via eACL"): with allure.step(f"Set allow all operations for {role.value} via eACL"):
eacl = [ eacl = [
EACLRule(access=EACLAccess.ALLOW, role=role, operation=op) for op in EACLOperation EACLRule(access=EACLAccess.ALLOW, role=role, operation=op) for op in EACLOperation
] ]
eacl_file = create_eacl(cid, eacl, shell=client_shell) eacl_file = create_eacl(cid, eacl, shell=self.shell)
set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=client_shell) set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=self.shell, endpoint=endpoint)
wait_for_cache_expired() wait_for_cache_expired()
with allure.step( with allure.step(
@ -101,13 +105,13 @@ class TestACLBearer:
objects_oids.pop(), objects_oids.pop(),
file_path, file_path,
wallet_config=deny_wallet.config_path, wallet_config=deny_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
@allure.title("BearerToken Operations for compound Operations") @allure.title("BearerToken Operations for compound Operations")
def test_bearer_token_compound_operations( def test_bearer_token_compound_operations(self, wallets, eacl_container_with_objects):
self, wallets, client_shell, eacl_container_with_objects endpoint = self.cluster.default_rpc_endpoint
):
cid, objects_oids, file_path = eacl_container_with_objects cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
@ -153,8 +157,9 @@ class TestACLBearer:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
eacl_table_path=create_eacl(cid, eacl_deny, shell=client_shell), eacl_table_path=create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -166,7 +171,8 @@ class TestACLBearer:
file_path, file_path,
deny_operations=deny_map[EACLRole.USER], deny_operations=deny_map[EACLRole.USER],
wallet_config=user_wallet.config_path, wallet_config=user_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
check_custom_access_to_container( check_custom_access_to_container(
other_wallet.wallet_path, other_wallet.wallet_path,
@ -175,7 +181,8 @@ class TestACLBearer:
file_path, file_path,
deny_operations=deny_map[EACLRole.OTHERS], deny_operations=deny_map[EACLRole.OTHERS],
wallet_config=other_wallet.config_path, wallet_config=other_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step("Check rule consistency using bearer token"): with allure.step("Check rule consistency using bearer token"):
@ -186,7 +193,8 @@ class TestACLBearer:
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER) EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER)
for op in bearer_map[EACLRole.USER] for op in bearer_map[EACLRole.USER]
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
bearer_other = form_bearertoken_file( bearer_other = form_bearertoken_file(
@ -196,7 +204,8 @@ class TestACLBearer:
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in bearer_map[EACLRole.OTHERS] for op in bearer_map[EACLRole.OTHERS]
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
check_custom_access_to_container( check_custom_access_to_container(
@ -207,7 +216,8 @@ class TestACLBearer:
deny_operations=deny_map_with_bearer[EACLRole.USER], deny_operations=deny_map_with_bearer[EACLRole.USER],
bearer=bearer_user, bearer=bearer_user,
wallet_config=user_wallet.config_path, wallet_config=user_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
check_custom_access_to_container( check_custom_access_to_container(
other_wallet.wallet_path, other_wallet.wallet_path,
@ -217,5 +227,6 @@ class TestACLBearer:
deny_operations=deny_map_with_bearer[EACLRole.OTHERS], deny_operations=deny_map_with_bearer[EACLRole.OTHERS],
bearer=bearer_other, bearer=bearer_other,
wallet_config=other_wallet.config_path, wallet_config=other_wallet.config_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )

View file

@ -1,8 +1,7 @@
import allure import allure
import pytest import pytest
from common import NEOFS_NETMAP_DICT from cluster_test_base import ClusterTestBase
from failover_utils import wait_object_replication_on_nodes from failover_utils import wait_object_replication
from neofs_testlib.hosting import Hosting
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from python_keywords.acl import ( from python_keywords.acl import (
EACLAccess, EACLAccess,
@ -18,7 +17,7 @@ from python_keywords.container_access import (
check_full_access_to_container, check_full_access_to_container,
check_no_access_to_container, check_no_access_to_container,
) )
from python_keywords.neofs_verbs import put_object from python_keywords.neofs_verbs import put_object_to_random_node
from python_keywords.node_management import drop_object from python_keywords.node_management import drop_object
from python_keywords.object_access import ( from python_keywords.object_access import (
can_delete_object, can_delete_object,
@ -35,36 +34,39 @@ from wellknown_acl import PUBLIC_ACL
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.acl_extended @pytest.mark.acl_extended
class TestEACLContainer: class TestEACLContainer(ClusterTestBase):
NODE_COUNT = len(NEOFS_NETMAP_DICT.keys())
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def eacl_full_placement_container_with_object( def eacl_full_placement_container_with_object(self, wallets, file_path) -> str:
self, wallets, file_path, client_shell: Shell
) -> str:
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
storage_nodes = self.cluster.storage_nodes
node_count = len(storage_nodes)
with allure.step("Create eACL public container with full placement rule"): with allure.step("Create eACL public container with full placement rule"):
full_placement_rule = ( full_placement_rule = f"REP {node_count} IN X CBF 1 SELECT {node_count} FROM * AS X"
f"REP {self.NODE_COUNT} IN X CBF 1 SELECT {self.NODE_COUNT} FROM * AS X"
)
cid = create_container( cid = create_container(
wallet=user_wallet.wallet_path, wallet=user_wallet.wallet_path,
rule=full_placement_rule, rule=full_placement_rule,
basic_acl=PUBLIC_ACL, basic_acl=PUBLIC_ACL,
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
with allure.step("Add test object to container"): with allure.step("Add test object to container"):
oid = put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell) oid = put_object_to_random_node(
wait_object_replication_on_nodes( user_wallet.wallet_path, file_path, cid, shell=self.shell, cluster=self.cluster
user_wallet.wallet_path, cid, oid, self.NODE_COUNT, shell=client_shell )
wait_object_replication(
cid,
oid,
node_count,
shell=self.shell,
nodes=storage_nodes,
) )
yield cid, oid, file_path yield cid, oid, file_path
@pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS]) @pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS])
def test_extended_acl_deny_all_operations( def test_extended_acl_deny_all_operations(
self, wallets, client_shell, eacl_container_with_objects, deny_role self, wallets, eacl_container_with_objects, deny_role
): ):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS) other_wallet = wallets.get_wallet(EACLRole.OTHERS)
@ -83,8 +85,9 @@ class TestEACLContainer:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
create_eacl(cid, eacl_deny, shell=client_shell), create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -97,7 +100,8 @@ class TestEACLContainer:
cid, cid,
object_oids[0], object_oids[0],
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step( with allure.step(
@ -108,7 +112,8 @@ class TestEACLContainer:
cid, cid,
object_oids.pop(), object_oids.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step(f"Allow all operations for {deny_role_str} via eACL"): with allure.step(f"Allow all operations for {deny_role_str} via eACL"):
@ -119,30 +124,33 @@ class TestEACLContainer:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
create_eacl(cid, eacl_deny, shell=client_shell), create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
with allure.step(f"Check all have full access to eACL public container"): with allure.step("Check all have full access to eACL public container"):
check_full_access_to_container( check_full_access_to_container(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
object_oids.pop(), object_oids.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
check_full_access_to_container( check_full_access_to_container(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
object_oids.pop(), object_oids.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
@allure.title("Testcase to allow NeoFS operations for only one other pubkey.") @allure.title("Testcase to allow NeoFS operations for only one other pubkey.")
def test_extended_acl_deny_all_operations_exclude_pubkey( def test_extended_acl_deny_all_operations_exclude_pubkey(
self, wallets, client_shell, eacl_container_with_objects self, wallets, eacl_container_with_objects
): ):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
other_wallet, other_wallet_allow = wallets.get_wallets_list(EACLRole.OTHERS)[0:2] other_wallet, other_wallet_allow = wallets.get_wallets_list(EACLRole.OTHERS)[0:2]
@ -164,8 +172,9 @@ class TestEACLContainer:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
create_eacl(cid, eacl, shell=client_shell), create_eacl(cid, eacl, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -176,7 +185,8 @@ class TestEACLContainer:
cid, cid,
object_oids[0], object_oids[0],
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step("Check owner has full access to public container"): with allure.step("Check owner has full access to public container"):
@ -185,7 +195,8 @@ class TestEACLContainer:
cid, cid,
object_oids.pop(), object_oids.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step("Check allowed other has full access to public container"): with allure.step("Check allowed other has full access to public container"):
@ -194,20 +205,20 @@ class TestEACLContainer:
cid, cid,
object_oids.pop(), object_oids.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
@allure.title("Testcase to validate NeoFS replication with eACL deny rules.") @allure.title("Testcase to validate NeoFS replication with eACL deny rules.")
def test_extended_acl_deny_replication( def test_extended_acl_deny_replication(
self, self,
wallets, wallets,
client_shell,
hosting: Hosting,
eacl_full_placement_container_with_object, eacl_full_placement_container_with_object,
file_path,
): ):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
cid, oid, file_path = eacl_full_placement_container_with_object cid, oid, file_path = eacl_full_placement_container_with_object
storage_nodes = self.cluster.storage_nodes
storage_node = self.cluster.storage_nodes[0]
with allure.step("Deny all operations for user via eACL"): with allure.step("Deny all operations for user via eACL"):
eacl_deny = [ eacl_deny = [
@ -221,40 +232,48 @@ class TestEACLContainer:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
create_eacl(cid, eacl_deny, shell=client_shell), create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
with allure.step("Drop object to check replication"): with allure.step("Drop object to check replication"):
drop_object(hosting, node_name=[*NEOFS_NETMAP_DICT][0], cid=cid, oid=oid) drop_object(storage_node, cid=cid, oid=oid)
storage_wallet_path = NEOFS_NETMAP_DICT[[*NEOFS_NETMAP_DICT][0]]["wallet_path"] storage_wallet_path = storage_node.get_wallet_path()
with allure.step("Wait for dropped object replicated"): with allure.step("Wait for dropped object replicated"):
wait_object_replication_on_nodes( wait_object_replication(
storage_wallet_path, cid, oid, self.NODE_COUNT, shell=client_shell cid,
oid,
len(storage_nodes),
self.shell,
storage_nodes,
) )
@allure.title("Testcase to validate NeoFS system operations with extended ACL") @allure.title("Testcase to validate NeoFS system operations with extended ACL")
def test_extended_actions_system(self, wallets, client_shell, eacl_container_with_objects): def test_extended_actions_system(self, wallets, eacl_container_with_objects):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
ir_wallet, storage_wallet = wallets.get_wallets_list(role=EACLRole.SYSTEM)[:2] ir_wallet, storage_wallet = wallets.get_wallets_list(role=EACLRole.SYSTEM)[:2]
cid, object_oids, file_path = eacl_container_with_objects cid, object_oids, file_path = eacl_container_with_objects
endpoint = self.cluster.default_rpc_endpoint
with allure.step("Check IR and STORAGE rules compliance"): with allure.step("Check IR and STORAGE rules compliance"):
assert not can_put_object( assert not can_put_object(
ir_wallet.wallet_path, ir_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_put_object( assert can_put_object(
storage_wallet.wallet_path, storage_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -263,7 +282,8 @@ class TestEACLContainer:
cid, cid,
object_oids[0], object_oids[0],
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_get_object( assert can_get_object(
@ -271,7 +291,8 @@ class TestEACLContainer:
cid, cid,
object_oids[0], object_oids[0],
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -279,28 +300,32 @@ class TestEACLContainer:
ir_wallet.wallet_path, ir_wallet.wallet_path,
cid, cid,
object_oids[0], object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_get_head_object( assert can_get_head_object(
storage_wallet.wallet_path, storage_wallet.wallet_path,
cid, cid,
object_oids[0], object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
assert can_search_object( assert can_search_object(
ir_wallet.wallet_path, ir_wallet.wallet_path,
cid, cid,
shell=client_shell, shell=self.shell,
endpoint=endpoint,
oid=object_oids[0], oid=object_oids[0],
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_search_object( assert can_search_object(
storage_wallet.wallet_path, storage_wallet.wallet_path,
cid, cid,
shell=client_shell, shell=self.shell,
endpoint=endpoint,
oid=object_oids[0], oid=object_oids[0],
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -310,7 +335,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -318,7 +344,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -326,7 +353,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
@ -334,7 +362,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -343,7 +372,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -351,7 +381,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -365,9 +396,10 @@ class TestEACLContainer:
EACLRule(access=EACLAccess.DENY, role=EACLRole.SYSTEM, operation=op) EACLRule(access=EACLAccess.DENY, role=EACLRole.SYSTEM, operation=op)
for op in EACLOperation for op in EACLOperation
], ],
shell=client_shell, shell=self.shell,
), ),
shell=client_shell, shell=self.shell,
endpoint=endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -376,14 +408,16 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert not can_put_object( assert not can_put_object(
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -393,7 +427,8 @@ class TestEACLContainer:
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -402,7 +437,8 @@ class TestEACLContainer:
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -411,7 +447,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -419,7 +456,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -427,7 +465,8 @@ class TestEACLContainer:
assert can_search_object( assert can_search_object(
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
shell=client_shell, shell=self.shell,
endpoint=endpoint,
oid=object_oids[0], oid=object_oids[0],
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
@ -435,7 +474,8 @@ class TestEACLContainer:
assert can_search_object( assert can_search_object(
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
shell=client_shell, shell=self.shell,
endpoint=endpoint,
oid=object_oids[0], oid=object_oids[0],
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -445,7 +485,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -453,7 +494,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -462,7 +504,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -470,7 +513,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -479,7 +523,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -487,7 +532,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -501,9 +547,10 @@ class TestEACLContainer:
EACLRule(access=EACLAccess.ALLOW, role=EACLRole.SYSTEM, operation=op) EACLRule(access=EACLAccess.ALLOW, role=EACLRole.SYSTEM, operation=op)
for op in EACLOperation for op in EACLOperation
], ],
shell=client_shell, shell=self.shell,
), ),
shell=client_shell, shell=self.shell,
endpoint=endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -512,14 +559,16 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_put_object( assert can_put_object(
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -528,7 +577,8 @@ class TestEACLContainer:
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_get_object( assert can_get_object(
@ -536,7 +586,8 @@ class TestEACLContainer:
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
file_name=file_path, file_name=file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -544,29 +595,33 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_get_head_object( assert can_get_head_object(
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
assert can_search_object( assert can_search_object(
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
shell=client_shell, shell=self.shell,
oid=object_oids[0], oid=object_oids[0],
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
assert can_search_object( assert can_search_object(
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
shell=client_shell, shell=self.shell,
oid=object_oids[0], oid=object_oids[0],
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -575,7 +630,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -583,7 +639,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -591,7 +648,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
@ -599,7 +657,8 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )
@ -608,7 +667,8 @@ class TestEACLContainer:
wallet=ir_wallet.wallet_path, wallet=ir_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path, wallet_config=ir_wallet.config_path,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -616,6 +676,7 @@ class TestEACLContainer:
wallet=storage_wallet.wallet_path, wallet=storage_wallet.wallet_path,
cid=cid, cid=cid,
oid=object_oids[0], oid=object_oids[0],
shell=client_shell, shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path, wallet_config=storage_wallet.config_path,
) )

View file

@ -1,5 +1,6 @@
import allure import allure
import pytest import pytest
from cluster_test_base import ClusterTestBase
from python_keywords.acl import ( from python_keywords.acl import (
EACLAccess, EACLAccess,
EACLFilter, EACLFilter,
@ -19,7 +20,7 @@ from python_keywords.container_access import (
check_full_access_to_container, check_full_access_to_container,
check_no_access_to_container, check_no_access_to_container,
) )
from python_keywords.neofs_verbs import put_object from python_keywords.neofs_verbs import put_object_to_random_node
from python_keywords.object_access import can_get_head_object, can_get_object, can_put_object from python_keywords.object_access import can_get_head_object, can_get_object, can_put_object
from wellknown_acl import PUBLIC_ACL from wellknown_acl import PUBLIC_ACL
@ -27,7 +28,7 @@ from wellknown_acl import PUBLIC_ACL
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.acl @pytest.mark.acl
@pytest.mark.acl_filters @pytest.mark.acl_filters
class TestEACLFilters: class TestEACLFilters(ClusterTestBase):
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md # SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
ATTRIBUTE = {"check_key": "check_value"} ATTRIBUTE = {"check_key": "check_value"}
OTHER_ATTRIBUTE = {"check_key": "other_value"} OTHER_ATTRIBUTE = {"check_key": "other_value"}
@ -67,52 +68,66 @@ class TestEACLFilters:
] ]
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def eacl_container_with_objects(self, wallets, client_shell, file_path): def eacl_container_with_objects(self, wallets, file_path):
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"): with allure.step("Create eACL public container"):
cid = create_container( cid = create_container(
user_wallet.wallet_path, basic_acl=PUBLIC_ACL, shell=client_shell user_wallet.wallet_path,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
with allure.step("Add test objects to container"): with allure.step("Add test objects to container"):
objects_with_header = [ objects_with_header = [
put_object( put_object_to_random_node(
user_wallet.wallet_path, user_wallet.wallet_path,
file_path, file_path,
cid, cid,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes={**self.SET_HEADERS, "key": val}, attributes={**self.SET_HEADERS, "key": val},
) )
for val in range(self.OBJECT_COUNT) for val in range(self.OBJECT_COUNT)
] ]
objects_with_other_header = [ objects_with_other_header = [
put_object( put_object_to_random_node(
user_wallet.wallet_path, user_wallet.wallet_path,
file_path, file_path,
cid, cid,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes={**self.OTHER_HEADERS, "key": val}, attributes={**self.OTHER_HEADERS, "key": val},
) )
for val in range(self.OBJECT_COUNT) for val in range(self.OBJECT_COUNT)
] ]
objects_without_header = [ objects_without_header = [
put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell) put_object_to_random_node(
user_wallet.wallet_path,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
)
for _ in range(self.OBJECT_COUNT) for _ in range(self.OBJECT_COUNT)
] ]
yield cid, objects_with_header, objects_with_other_header, objects_without_header, file_path yield cid, objects_with_header, objects_with_other_header, objects_without_header, file_path
with allure.step("Delete eACL public container"): with allure.step("Delete eACL public container"):
delete_container(user_wallet.wallet_path, cid, shell=client_shell) delete_container(
user_wallet.wallet_path,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
) )
def test_extended_acl_filters_request( def test_extended_acl_filters_request(self, wallets, eacl_container_with_objects, match_type):
self, wallets, client_shell, eacl_container_with_objects, match_type
):
allure.dynamic.title(f"Validate NeoFS operations with request filter: {match_type.name}") allure.dynamic.title(f"Validate NeoFS operations with request filter: {match_type.name}")
user_wallet = wallets.get_wallet() user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS) other_wallet = wallets.get_wallet(EACLRole.OTHERS)
@ -139,8 +154,9 @@ class TestEACLFilters:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
create_eacl(cid, eacl_deny, shell=client_shell), create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -163,7 +179,12 @@ class TestEACLFilters:
): ):
with allure.step("Check other has full access when sending request without headers"): with allure.step("Check other has full access when sending request without headers"):
check_full_access_to_container( check_full_access_to_container(
other_wallet.wallet_path, cid, oid.pop(), file_path, shell=client_shell other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
) )
with allure.step( with allure.step(
@ -174,7 +195,8 @@ class TestEACLFilters:
cid, cid,
oid.pop(), oid.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
xhdr=allow_headers, xhdr=allow_headers,
) )
@ -184,7 +206,8 @@ class TestEACLFilters:
cid, cid,
oid.pop(), oid.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
xhdr=deny_headers, xhdr=deny_headers,
) )
@ -199,14 +222,16 @@ class TestEACLFilters:
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in EACLOperation for op in EACLOperation
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
check_full_access_to_container( check_full_access_to_container(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
oid.pop(), oid.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
xhdr=deny_headers, xhdr=deny_headers,
bearer=bearer_other, bearer=bearer_other,
) )
@ -215,7 +240,7 @@ class TestEACLFilters:
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
) )
def test_extended_acl_deny_filters_object( def test_extended_acl_deny_filters_object(
self, wallets, client_shell, eacl_container_with_objects, match_type self, wallets, eacl_container_with_objects, match_type
): ):
allure.dynamic.title( allure.dynamic.title(
f"Validate NeoFS operations with deny user headers filter: {match_type.name}" f"Validate NeoFS operations with deny user headers filter: {match_type.name}"
@ -245,8 +270,9 @@ class TestEACLFilters:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
create_eacl(cid, eacl_deny, shell=client_shell), create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -271,7 +297,8 @@ class TestEACLFilters:
cid, cid,
objs_without_header.pop(), objs_without_header.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
xhdr=xhdr, xhdr=xhdr,
) )
@ -281,7 +308,8 @@ class TestEACLFilters:
cid, cid,
allow_objects.pop(), allow_objects.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
xhdr=xhdr, xhdr=xhdr,
) )
@ -291,7 +319,8 @@ class TestEACLFilters:
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
deny_objects[0], deny_objects[0],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
xhdr=xhdr, xhdr=xhdr,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
@ -300,7 +329,8 @@ class TestEACLFilters:
cid, cid,
deny_objects[0], deny_objects[0],
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
xhdr=xhdr, xhdr=xhdr,
) )
@ -318,14 +348,16 @@ class TestEACLFilters:
) )
for op in EACLOperation for op in EACLOperation
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
check_full_access_to_container( check_full_access_to_container(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
deny_objects.pop(), deny_objects.pop(),
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
xhdr=xhdr, xhdr=xhdr,
bearer=bearer_other, bearer=bearer_other,
) )
@ -338,10 +370,13 @@ class TestEACLFilters:
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes=allow_attribute, attributes=allow_attribute,
) )
assert can_put_object(other_wallet.wallet_path, cid, file_path, shell=client_shell) assert can_put_object(
other_wallet.wallet_path, cid, file_path, shell=self.shell, cluster=self.cluster
)
deny_attribute = ( deny_attribute = (
self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE
@ -352,7 +387,8 @@ class TestEACLFilters:
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute, attributes=deny_attribute,
) )
@ -369,13 +405,15 @@ class TestEACLFilters:
role=EACLRole.OTHERS, role=EACLRole.OTHERS,
) )
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
assert can_put_object( assert can_put_object(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute, attributes=deny_attribute,
bearer=bearer_other_for_put, bearer=bearer_other_for_put,
) )
@ -384,7 +422,7 @@ class TestEACLFilters:
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL] "match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
) )
def test_extended_acl_allow_filters_object( def test_extended_acl_allow_filters_object(
self, wallets, client_shell, eacl_container_with_objects, match_type self, wallets, eacl_container_with_objects, match_type
): ):
allure.dynamic.title( allure.dynamic.title(
"Testcase to validate NeoFS operation with allow eACL user headers filters:" "Testcase to validate NeoFS operation with allow eACL user headers filters:"
@ -420,8 +458,9 @@ class TestEACLFilters:
set_eacl( set_eacl(
user_wallet.wallet_path, user_wallet.wallet_path,
cid, cid,
create_eacl(cid, eacl, shell=client_shell), create_eacl(cid, eacl, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
wait_for_cache_expired() wait_for_cache_expired()
@ -439,13 +478,26 @@ class TestEACLFilters:
with allure.step(f"Check other cannot get and put objects without attributes"): with allure.step(f"Check other cannot get and put objects without attributes"):
oid = objects_without_header.pop() oid = objects_without_header.pop()
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert can_get_head_object(other_wallet.wallet_path, cid, oid, shell=client_shell) assert can_get_head_object(
with pytest.raises(AssertionError): other_wallet.wallet_path,
assert can_get_object( cid,
other_wallet.wallet_path, cid, oid, file_path, shell=client_shell oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert can_put_object(other_wallet.wallet_path, cid, file_path, shell=client_shell) assert can_get_object(
other_wallet.wallet_path,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet.wallet_path, cid, file_path, shell=self.shell, cluster=self.cluster
)
with allure.step( with allure.step(
"Check other can get and put objects without attributes and using bearer token" "Check other can get and put objects without attributes and using bearer token"
@ -461,13 +513,15 @@ class TestEACLFilters:
) )
for op in EACLOperation for op in EACLOperation
], ],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
assert can_get_head_object( assert can_get_head_object(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
objects_without_header[0], objects_without_header[0],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_other, bearer=bearer_other,
) )
assert can_get_object( assert can_get_object(
@ -475,37 +529,62 @@ class TestEACLFilters:
cid, cid,
objects_without_header[0], objects_without_header[0],
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
bearer=bearer_other, bearer=bearer_other,
) )
assert can_put_object( assert can_put_object(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
bearer=bearer_other, bearer=bearer_other,
) )
with allure.step(f"Check other can get objects with attributes matching the filter"): with allure.step(f"Check other can get objects with attributes matching the filter"):
oid = allow_objects.pop() oid = allow_objects.pop()
assert can_get_head_object(other_wallet.wallet_path, cid, oid, shell=client_shell) assert can_get_head_object(
assert can_get_object(other_wallet.wallet_path, cid, oid, file_path, shell=client_shell) other_wallet.wallet_path,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert can_get_object(
other_wallet.wallet_path,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_put_object( assert can_put_object(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes=allow_attribute, attributes=allow_attribute,
) )
with allure.step("Check other cannot get objects without attributes matching the filter"): with allure.step("Check other cannot get objects without attributes matching the filter"):
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert can_get_head_object( assert can_get_head_object(
other_wallet.wallet_path, cid, deny_objects[0], shell=client_shell other_wallet.wallet_path,
cid,
deny_objects[0],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert can_get_object( assert can_get_object(
other_wallet.wallet_path, cid, deny_objects[0], file_path, shell=client_shell other_wallet.wallet_path,
cid,
deny_objects[0],
file_path,
shell=self.shell,
cluster=self.cluster,
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assert can_put_object( assert can_put_object(
@ -513,7 +592,8 @@ class TestEACLFilters:
cid, cid,
file_path, file_path,
attributes=deny_attribute, attributes=deny_attribute,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
) )
with allure.step( with allure.step(
@ -522,21 +602,28 @@ class TestEACLFilters:
): ):
oid = deny_objects.pop() oid = deny_objects.pop()
assert can_get_head_object( assert can_get_head_object(
other_wallet.wallet_path, cid, oid, shell=client_shell, bearer=bearer_other other_wallet.wallet_path,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_other,
) )
assert can_get_object( assert can_get_object(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
oid, oid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
bearer=bearer_other, bearer=bearer_other,
) )
assert can_put_object( assert can_put_object(
other_wallet.wallet_path, other_wallet.wallet_path,
cid, cid,
file_path, file_path,
shell=client_shell, shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute, attributes=deny_attribute,
bearer=bearer_other, bearer=bearer_other,
) )

View file

@ -9,6 +9,7 @@ import allure
import pytest import pytest
import yaml import yaml
from binary_version_helper import get_local_binaries_versions, get_remote_binaries_versions from binary_version_helper import get_local_binaries_versions, get_remote_binaries_versions
from cluster import Cluster
from common import ( from common import (
ASSETS_DIR, ASSETS_DIR,
BACKGROUND_LOAD_MAX_TIME, BACKGROUND_LOAD_MAX_TIME,
@ -20,7 +21,6 @@ from common import (
LOAD_NODE_SSH_PRIVATE_KEY_PATH, LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER, LOAD_NODE_SSH_USER,
LOAD_NODES, LOAD_NODES,
NEOFS_NETMAP_DICT,
STORAGE_NODE_SERVICE_NAME_REGEX, STORAGE_NODE_SERVICE_NAME_REGEX,
WALLET_PASS, WALLET_PASS,
) )
@ -33,8 +33,9 @@ from neofs_testlib.shell import LocalShell, Shell
from neofs_testlib.utils.wallet import init_wallet from neofs_testlib.utils.wallet import init_wallet
from payment_neogo import deposit_gas, transfer_gas from payment_neogo import deposit_gas, transfer_gas
from pytest import FixtureRequest from pytest import FixtureRequest
from python_keywords.node_management import node_healthcheck from python_keywords.node_management import storage_node_healthcheck
from wallet import WalletFactory
from helpers.wallet import WalletFactory
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -66,6 +67,7 @@ def hosting(configure_testlib) -> Hosting:
hosting_instance = Hosting() hosting_instance = Hosting()
hosting_instance.configure(hosting_config) hosting_instance.configure(hosting_config)
yield hosting_instance yield hosting_instance
@ -81,8 +83,13 @@ def require_multiple_hosts(hosting: Hosting):
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def wallet_factory(prepare_tmp_dir: str, client_shell: Shell) -> WalletFactory: def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -> WalletFactory:
return WalletFactory(prepare_tmp_dir, client_shell) return WalletFactory(temp_directory, client_shell, cluster)
@pytest.fixture(scope="session")
def cluster(hosting: Hosting) -> Cluster:
yield Cluster(hosting)
@pytest.fixture(scope="session", autouse=True) @pytest.fixture(scope="session", autouse=True)
@ -97,7 +104,7 @@ def check_binary_versions(request, hosting: Hosting, client_shell: Shell):
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@allure.title("Prepare tmp directory") @allure.title("Prepare tmp directory")
def prepare_tmp_dir(): def temp_directory():
with allure.step("Prepare tmp directory"): with allure.step("Prepare tmp directory"):
full_path = os.path.join(os.getcwd(), ASSETS_DIR) full_path = os.path.join(os.getcwd(), ASSETS_DIR)
shutil.rmtree(full_path, ignore_errors=True) shutil.rmtree(full_path, ignore_errors=True)
@ -111,7 +118,7 @@ def prepare_tmp_dir():
@pytest.fixture(scope="function", autouse=True) @pytest.fixture(scope="function", autouse=True)
@allure.title("Analyze logs") @allure.title("Analyze logs")
def analyze_logs(prepare_tmp_dir: str, hosting: Hosting, request: FixtureRequest): def analyze_logs(temp_directory: str, hosting: Hosting, request: FixtureRequest):
start_time = datetime.utcnow() start_time = datetime.utcnow()
yield yield
end_time = datetime.utcnow() end_time = datetime.utcnow()
@ -123,39 +130,39 @@ def analyze_logs(prepare_tmp_dir: str, hosting: Hosting, request: FixtureRequest
# Test name may exceed os NAME_MAX (255 bytes), so we use test start datetime instead # Test name may exceed os NAME_MAX (255 bytes), so we use test start datetime instead
start_time_str = start_time.strftime("%Y_%m_%d_%H_%M_%S_%f") start_time_str = start_time.strftime("%Y_%m_%d_%H_%M_%S_%f")
logs_dir = os.path.join(prepare_tmp_dir, f"logs_{start_time_str}") logs_dir = os.path.join(temp_directory, f"logs_{start_time_str}")
dump_logs(hosting, logs_dir, start_time, end_time) dump_logs(hosting, logs_dir, start_time, end_time)
check_logs(logs_dir) check_logs(logs_dir)
@pytest.fixture(scope="session", autouse=True) @pytest.fixture(scope="session", autouse=True)
@allure.title("Collect logs") @allure.title("Collect logs")
def collect_logs(prepare_tmp_dir, hosting: Hosting): def collect_logs(temp_directory, hosting: Hosting):
start_time = datetime.utcnow() start_time = datetime.utcnow()
yield yield
end_time = datetime.utcnow() end_time = datetime.utcnow()
# Dump logs to temp directory (because they might be too large to keep in RAM) # Dump logs to temp directory (because they might be too large to keep in RAM)
logs_dir = os.path.join(prepare_tmp_dir, "logs") logs_dir = os.path.join(temp_directory, "logs")
dump_logs(hosting, logs_dir, start_time, end_time) dump_logs(hosting, logs_dir, start_time, end_time)
attach_logs(logs_dir) attach_logs(logs_dir)
@pytest.fixture(scope="session", autouse=True) @pytest.fixture(scope="session", autouse=True)
@allure.title("Run health check for all storage nodes") @allure.title("Run health check for all storage nodes")
def run_health_check(collect_logs, hosting: Hosting): def run_health_check(collect_logs, cluster: Cluster):
failed_nodes = [] failed_nodes = []
for node_name in NEOFS_NETMAP_DICT.keys(): for node in cluster.storage_nodes:
health_check = node_healthcheck(hosting, node_name) health_check = storage_node_healthcheck(node)
if health_check.health_status != "READY" or health_check.network_status != "ONLINE": if health_check.health_status != "READY" or health_check.network_status != "ONLINE":
failed_nodes.append(node_name) failed_nodes.append(node)
if failed_nodes: if failed_nodes:
raise AssertionError(f"Nodes {failed_nodes} are not healthy") raise AssertionError(f"Nodes {failed_nodes} are not healthy")
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def background_grpc_load(client_shell, prepare_wallet_and_deposit): def background_grpc_load(client_shell, default_wallet):
registry_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.bolt") registry_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.bolt")
prepare_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.json") prepare_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.json")
allure.dynamic.title( allure.dynamic.title(
@ -221,21 +228,24 @@ def background_grpc_load(client_shell, prepare_wallet_and_deposit):
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@allure.title("Prepare wallet and deposit") @allure.title("Prepare wallet and deposit")
def prepare_wallet_and_deposit(client_shell, prepare_tmp_dir): def default_wallet(client_shell: Shell, temp_directory: str, cluster: Cluster):
wallet_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") wallet_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
init_wallet(wallet_path, WALLET_PASS) init_wallet(wallet_path, WALLET_PASS)
allure.attach.file(wallet_path, os.path.basename(wallet_path), allure.attachment_type.JSON) allure.attach.file(wallet_path, os.path.basename(wallet_path), allure.attachment_type.JSON)
if not FREE_STORAGE: if not FREE_STORAGE:
main_chain = cluster.main_chain_nodes[0]
deposit = 30 deposit = 30
transfer_gas( transfer_gas(
shell=client_shell, shell=client_shell,
amount=deposit + 1, amount=deposit + 1,
main_chain=main_chain,
wallet_to_path=wallet_path, wallet_to_path=wallet_path,
wallet_to_password=WALLET_PASS, wallet_to_password=WALLET_PASS,
) )
deposit_gas( deposit_gas(
shell=client_shell, shell=client_shell,
main_chain=main_chain,
amount=deposit, amount=deposit,
wallet_from_path=wallet_path, wallet_from_path=wallet_path,
wallet_from_password=WALLET_PASS, wallet_from_password=WALLET_PASS,

View file

@ -14,84 +14,114 @@ from python_keywords.container import (
from utility import placement_policy_from_container from utility import placement_policy_from_container
from wellknown_acl import PRIVATE_ACL_F from wellknown_acl import PRIVATE_ACL_F
from steps.cluster_test_base import ClusterTestBase
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.container @pytest.mark.container
def test_container_creation(client_shell, prepare_wallet_and_deposit, name): @pytest.mark.sanity
scenario_title = f"with name {name}" if name else "without name" @pytest.mark.container
allure.dynamic.title(f"User can create container {scenario_title}") class TestContainer(ClusterTestBase):
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke
def test_container_creation(self, default_wallet, name):
scenario_title = f"with name {name}" if name else "without name"
allure.dynamic.title(f"User can create container {scenario_title}")
wallet = prepare_wallet_and_deposit wallet = default_wallet
with open(wallet) as file: with open(wallet) as file:
json_wallet = json.load(file) json_wallet = json.load(file)
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
cid = create_container(wallet, rule=placement_rule, name=name, shell=client_shell) cid = create_container(
wallet,
rule=placement_rule,
name=name,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
containers = list_containers(wallet, shell=client_shell) containers = list_containers(
assert cid in containers, f"Expected container {cid} in containers: {containers}" wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid in containers, f"Expected container {cid} in containers: {containers}"
container_info: str = get_container(wallet, cid, json_mode=False, shell=client_shell) container_info: str = get_container(
container_info = container_info.casefold() # To ignore case when comparing with expected values wallet,
cid,
json_mode=False,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
container_info = (
container_info.casefold()
) # To ignore case when comparing with expected values
info_to_check = { info_to_check = {
f"basic ACL: {PRIVATE_ACL_F} (private)", f"basic ACL: {PRIVATE_ACL_F} (private)",
f"owner ID: {json_wallet.get('accounts')[0].get('address')}", f"owner ID: {json_wallet.get('accounts')[0].get('address')}",
f"container ID: {cid}", f"container ID: {cid}",
} }
if name: if name:
info_to_check.add(f"Name={name}") info_to_check.add(f"Name={name}")
with allure.step("Check container has correct information"): with allure.step("Check container has correct information"):
expected_policy = placement_rule.casefold() expected_policy = placement_rule.casefold()
actual_policy = placement_policy_from_container(container_info) actual_policy = placement_policy_from_container(container_info)
assert (
actual_policy == expected_policy
), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
for info in info_to_check:
expected_info = info.casefold()
assert ( assert (
expected_info in container_info actual_policy == expected_policy
), f"Expected {expected_info} in container info:\n{container_info}" ), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
with allure.step("Delete container and check it was deleted"): for info in info_to_check:
delete_container(wallet, cid, shell=client_shell) expected_info = info.casefold()
tick_epoch(shell=client_shell) assert (
wait_for_container_deletion(wallet, cid, shell=client_shell) expected_info in container_info
), f"Expected {expected_info} in container info:\n{container_info}"
with allure.step("Delete container and check it was deleted"):
delete_container(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
tick_epoch(self.shell, self.cluster)
wait_for_container_deletion(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
@allure.title("Parallel container creation and deletion") @allure.title("Parallel container creation and deletion")
@pytest.mark.sanity def test_container_creation_deletion_parallel(self, default_wallet):
@pytest.mark.container containers_count = 3
def test_container_creation_deletion_parallel(client_shell, prepare_wallet_and_deposit): wallet = default_wallet
containers_count = 3 placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
wallet = prepare_wallet_and_deposit
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
cids: list[str] = [] cids: list[str] = []
with allure.step(f"Create {containers_count} containers"): with allure.step(f"Create {containers_count} containers"):
for _ in range(containers_count): for _ in range(containers_count):
cids.append( cids.append(
create_container( create_container(
wallet, wallet,
rule=placement_rule, rule=placement_rule,
await_mode=False, await_mode=False,
shell=client_shell, shell=self.shell,
wait_for_creation=False, endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
) )
)
with allure.step(f"Wait for containers occur in container list"): with allure.step(f"Wait for containers occur in container list"):
for cid in cids: for cid in cids:
wait_for_container_creation( wait_for_container_creation(
wallet, cid, sleep_interval=containers_count, shell=client_shell wallet,
) cid,
sleep_interval=containers_count,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with allure.step("Delete containers and check they were deleted"): with allure.step("Delete containers and check they were deleted"):
for cid in cids: for cid in cids:
delete_container(wallet, cid, shell=client_shell) delete_container(
tick_epoch(shell=client_shell) wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
wait_for_container_deletion(wallet, cid, shell=client_shell) )
tick_epoch(self.shell, self.cluster)
wait_for_container_deletion(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)

View file

@ -4,98 +4,107 @@ from time import sleep
import allure import allure
import pytest import pytest
from failover_utils import wait_all_storage_node_returned, wait_object_replication_on_nodes from cluster import StorageNode
from failover_utils import wait_all_storage_nodes_returned, wait_object_replication
from file_helper import generate_file, get_file_hash from file_helper import generate_file, get_file_hash
from iptables_helper import IpTablesHelper from iptables_helper import IpTablesHelper
from neofs_testlib.hosting import Hosting
from python_keywords.container import create_container from python_keywords.container import create_container
from python_keywords.neofs_verbs import get_object, put_object from python_keywords.neofs_verbs import get_object, put_object_to_random_node
from wellknown_acl import PUBLIC_ACL from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
STORAGE_NODE_COMMUNICATION_PORT = "8080" STORAGE_NODE_COMMUNICATION_PORT = "8080"
STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082" STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082"
PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS] PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS]
blocked_hosts = [] blocked_nodes: list[StorageNode] = []
@pytest.fixture(autouse=True)
@allure.step("Restore network")
def restore_network(hosting: Hosting):
yield
not_empty = len(blocked_hosts) != 0
for host_address in list(blocked_hosts):
with allure.step(f"Restore network at host {host_address}"):
host = hosting.get_host_by_address(host_address)
IpTablesHelper.restore_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK)
blocked_hosts.remove(host)
if not_empty:
wait_all_storage_node_returned(hosting)
@allure.title("Block Storage node traffic")
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.failover_network @pytest.mark.failover_network
def test_block_storage_node_traffic( class TestFailoverNetwork(ClusterTestBase):
prepare_wallet_and_deposit, client_shell, require_multiple_hosts, hosting: Hosting @pytest.fixture(autouse=True)
): @allure.step("Restore network")
""" def restore_network(self):
Block storage nodes traffic using iptables and wait for replication for objects. yield
"""
wallet = prepare_wallet_and_deposit
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
nodes_to_block_count = 2
source_file_path = generate_file() not_empty = len(blocked_nodes) != 0
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL) for node in list(blocked_nodes):
oid = put_object(wallet, source_file_path, cid, shell=client_shell) with allure.step(f"Restore network at host for {node.label}"):
IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK)
blocked_nodes.remove(node)
if not_empty:
wait_all_storage_nodes_returned(self.cluster)
# TODO: we need to refactor wait_object_replication_on_nodes so that it returns @allure.title("Block Storage node traffic")
# storage node names rather than endpoints def test_block_storage_node_traffic(self, default_wallet, require_multiple_hosts):
node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) """
Block storage nodes traffic using iptables and wait for replication for objects.
"""
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
nodes_to_block_count = 2
logger.info(f"Nodes are {node_endpoints}") source_file_path = generate_file()
node_endpoints_to_block = node_endpoints cid = create_container(
if nodes_to_block_count > len(node_endpoints): wallet,
# TODO: the intent of this logic is not clear, need to revisit shell=self.shell,
node_endpoints_to_block = choices(node_endpoints, k=2) endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
oid = put_object_to_random_node(
wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster
)
excluded_nodes = [] nodes = wait_object_replication(
for node_endpoint in node_endpoints_to_block: cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
host_address = node_endpoint.split(":")[0] )
host = hosting.get_host_by_address(host_address)
with allure.step(f"Block incoming traffic at host {host_address} on port {PORTS_TO_BLOCK}"): logger.info(f"Nodes are {nodes}")
blocked_hosts.append(host_address) nodes_to_block = nodes
excluded_nodes.append(node_endpoint) if nodes_to_block_count > len(nodes):
IpTablesHelper.drop_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK) # TODO: the intent of this logic is not clear, need to revisit
sleep(wakeup_node_timeout) nodes_to_block = choices(nodes, k=2)
with allure.step(f"Check object is not stored on node {node_endpoint}"): excluded_nodes = []
new_nodes = wait_object_replication_on_nodes( for node in nodes_to_block:
wallet, cid, oid, 2, shell=client_shell, excluded_nodes=excluded_nodes with allure.step(f"Block incoming traffic at node {node} on port {PORTS_TO_BLOCK}"):
) blocked_nodes.append(node)
assert node_endpoint not in new_nodes excluded_nodes.append(node)
IpTablesHelper.drop_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK)
sleep(wakeup_node_timeout)
with allure.step(f"Check object is not stored on node {node}"):
new_nodes = wait_object_replication(
cid,
oid,
2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - set(excluded_nodes)),
)
assert node not in new_nodes
with allure.step(f"Check object data is not corrupted"):
got_file_path = get_object(
wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
for node in nodes_to_block:
with allure.step(f"Unblock incoming traffic at host {node} on port {PORTS_TO_BLOCK}"):
IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK)
blocked_nodes.remove(node)
sleep(wakeup_node_timeout)
with allure.step(f"Check object data is not corrupted"): with allure.step(f"Check object data is not corrupted"):
got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0], shell=client_shell) new_nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
for node_endpoint in node_endpoints_to_block:
host_address = node_endpoint.split(":")[0]
host = hosting.get_host_by_address(host_address)
with allure.step(
f"Unblock incoming traffic at host {host_address} on port {PORTS_TO_BLOCK}"
):
IpTablesHelper.restore_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK)
blocked_hosts.remove(host_address)
sleep(wakeup_node_timeout)
with allure.step(f"Check object data is not corrupted"):
new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0])
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)

View file

@ -2,23 +2,26 @@ import logging
import allure import allure
import pytest import pytest
from failover_utils import wait_all_storage_node_returned, wait_object_replication_on_nodes from cluster import Cluster, StorageNode
from failover_utils import wait_all_storage_nodes_returned, wait_object_replication
from file_helper import generate_file, get_file_hash from file_helper import generate_file, get_file_hash
from neofs_testlib.hosting import Host, Hosting from neofs_testlib.hosting import Host
from neofs_testlib.shell import CommandOptions from neofs_testlib.shell import CommandOptions
from python_keywords.container import create_container from python_keywords.container import create_container
from python_keywords.neofs_verbs import get_object, put_object from python_keywords.neofs_verbs import get_object, put_object_to_random_node
from wellknown_acl import PUBLIC_ACL from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
stopped_hosts = [] stopped_nodes: list[StorageNode] = []
@pytest.fixture(scope="function", autouse=True) @pytest.fixture(scope="function", autouse=True)
@allure.step("Return all stopped hosts") @allure.step("Return all stopped hosts")
def after_run_return_all_stopped_hosts(hosting: Hosting): def after_run_return_all_stopped_hosts(cluster: Cluster):
yield yield
return_stopped_hosts(hosting) return_stopped_hosts(cluster)
def panic_reboot_host(host: Host) -> None: def panic_reboot_host(host: Host) -> None:
@ -29,112 +32,147 @@ def panic_reboot_host(host: Host) -> None:
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
def return_stopped_hosts(hosting: Hosting) -> None: def return_stopped_hosts(cluster: Cluster) -> None:
for host_address in list(stopped_hosts): for node in list(stopped_nodes):
with allure.step(f"Start host {host_address}"): with allure.step(f"Start host {node}"):
host = hosting.get_host_by_address(host_address) node.host.start_host()
host.start_host() stopped_nodes.remove(node)
stopped_hosts.remove(host_address)
wait_all_storage_node_returned(hosting) wait_all_storage_nodes_returned(cluster)
@allure.title("Lose and return storage node's host")
@pytest.mark.parametrize("hard_reboot", [True, False])
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.failover_reboot class TestFailoverStorage(ClusterTestBase):
def test_lose_storage_node_host( @allure.title("Lose and return storage node's host")
prepare_wallet_and_deposit, @pytest.mark.parametrize("hard_reboot", [True, False])
client_shell, @pytest.mark.failover_reboot
hosting: Hosting, def test_lose_storage_node_host(
hard_reboot: bool, self,
require_multiple_hosts, default_wallet,
): hard_reboot: bool,
wallet = prepare_wallet_and_deposit require_multiple_hosts,
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" ):
source_file_path = generate_file() wallet = default_wallet
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL) placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
oid = put_object(wallet, source_file_path, cid, shell=client_shell) source_file_path = generate_file()
node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) cid = create_container(
wallet,
for node_endpoint in node_endpoints: shell=self.shell,
host_address = node_endpoint.split(":")[0] endpoint=self.cluster.default_rpc_endpoint,
host = hosting.get_host_by_address(host_address) rule=placement_rule,
stopped_hosts.append(host.config.address) basic_acl=PUBLIC_ACL,
)
with allure.step(f"Stop host {host_address}"): oid = put_object_to_random_node(
host.stop_host("hard" if hard_reboot else "soft") wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster
)
new_nodes = wait_object_replication_on_nodes( nodes = wait_object_replication(
wallet, cid, oid, 2, shell=client_shell, excluded_nodes=[node_endpoint] cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
) )
assert all(old_node not in new_nodes for old_node in node_endpoints)
with allure.step("Check object data is not corrupted"): for node in nodes:
got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0], shell=client_shell) stopped_nodes.append(node)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with allure.step(f"Return all hosts"): with allure.step(f"Stop host {node}"):
return_stopped_hosts(hosting) node.host.stop_host("hard" if hard_reboot else "soft")
with allure.step("Check object data is not corrupted"): new_nodes = wait_object_replication(
new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) cid,
got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0]) oid,
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) 2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - {node}),
)
assert all(old_node not in new_nodes for old_node in nodes)
with allure.step("Check object data is not corrupted"):
got_file_path = get_object(
wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@allure.title("Panic storage node's host") with allure.step(f"Return all hosts"):
@pytest.mark.parametrize("sequence", [True, False]) return_stopped_hosts(self.cluster)
@pytest.mark.failover
@pytest.mark.failover_panic
def test_panic_storage_node_host(
prepare_wallet_and_deposit,
client_shell,
hosting: Hosting,
require_multiple_hosts,
sequence: bool,
):
wallet = prepare_wallet_and_deposit
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file()
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, source_file_path, cid, shell=client_shell)
node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell) with allure.step("Check object data is not corrupted"):
allure.attach( new_nodes = wait_object_replication(
"\n".join(node_endpoints), cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
"Current nodes with object", )
allure.attachment_type.TEXT, got_file_path = get_object(
) wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
new_nodes: list[str] = [] @allure.title("Panic storage node's host")
for node_endpoint in node_endpoints: @pytest.mark.parametrize("sequence", [True, False])
host_address = node_endpoint.split(":")[0] @pytest.mark.failover_panic
def test_panic_storage_node_host(
self,
default_wallet,
require_multiple_hosts,
sequence: bool,
):
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file()
cid = create_container(
wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
oid = put_object_to_random_node(
wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster
)
with allure.step(f"Hard reboot host {node_endpoint} via magic SysRq option"): nodes = wait_object_replication(
host = hosting.get_host_by_address(host_address) cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
panic_reboot_host(host) )
if sequence:
try:
new_nodes = wait_object_replication_on_nodes(
wallet, cid, oid, 2, shell=client_shell, excluded_nodes=[node_endpoint]
)
except AssertionError:
new_nodes = wait_object_replication_on_nodes(
wallet, cid, oid, 2, shell=client_shell
)
allure.attach(
"\n".join(new_nodes),
f"Nodes with object after {node_endpoint} fail",
allure.attachment_type.TEXT,
)
if not sequence:
new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
allure.attach( allure.attach(
"\n".join(new_nodes), "Nodes with object after nodes fail", allure.attachment_type.TEXT "\n".join(nodes),
"Current nodes with object",
allure.attachment_type.TEXT,
) )
got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0]) new_nodes: list[StorageNode] = []
assert get_file_hash(source_file_path) == get_file_hash(got_file_path) for node in nodes:
with allure.step(f"Hard reboot host {node} via magic SysRq option"):
panic_reboot_host(node.host)
if sequence:
try:
new_nodes = wait_object_replication(
cid,
oid,
2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - {node}),
)
except AssertionError:
new_nodes = wait_object_replication(
cid,
oid,
2,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
allure.attach(
"\n".join(new_nodes),
f"Nodes with object after {node} fail",
allure.attachment_type.TEXT,
)
if not sequence:
new_nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
allure.attach(
"\n".join(new_nodes),
"Nodes with object after nodes fail",
allure.attachment_type.TEXT,
)
got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)

View file

@ -1,541 +1,531 @@
import logging import logging
from random import choice import random
from time import sleep from time import sleep
from typing import Optional from typing import Optional, Tuple
import allure import allure
import pytest import pytest
from common import ( from cluster import StorageNode
COMPLEX_OBJ_SIZE, from cluster_test_base import ClusterTestBase
MORPH_BLOCK_TIME, from common import COMPLEX_OBJ_SIZE, MORPH_BLOCK_TIME, NEOFS_CONTRACT_CACHE_TIMEOUT
NEOFS_CONTRACT_CACHE_TIMEOUT,
NEOFS_NETMAP_DICT,
STORAGE_RPC_ENDPOINT_1,
STORAGE_WALLET_PASS,
)
from data_formatters import get_wallet_public_key
from epoch import tick_epoch from epoch import tick_epoch
from file_helper import generate_file from file_helper import generate_file
from grpc_responses import OBJECT_NOT_FOUND, error_matches_status from grpc_responses import OBJECT_NOT_FOUND, error_matches_status
from neofs_testlib.hosting import Hosting
from neofs_testlib.shell import Shell
from python_keywords.container import create_container, get_container from python_keywords.container import create_container, get_container
from python_keywords.failover_utils import wait_object_replication_on_nodes from python_keywords.failover_utils import wait_object_replication
from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object from python_keywords.neofs_verbs import (
delete_object,
get_object,
get_object_from_random_node,
head_object,
put_object,
put_object_to_random_node,
)
from python_keywords.node_management import ( from python_keywords.node_management import (
check_node_in_map, check_node_in_map,
delete_node_data, delete_node_data,
drop_object, drop_object,
exclude_node_from_network_map, exclude_node_from_network_map,
get_locode, get_locode_from_random_node,
get_netmap_snapshot, get_netmap_snapshot,
include_node_to_network_map, include_node_to_network_map,
node_healthcheck,
node_set_status,
node_shard_list, node_shard_list,
node_shard_set_mode, node_shard_set_mode,
start_nodes, start_storage_nodes,
stop_nodes, storage_node_healthcheck,
storage_node_set_status,
) )
from storage_policy import get_nodes_with_object, get_simple_object_copies from storage_policy import get_nodes_with_object, get_simple_object_copies
from utility import parse_time, placement_policy_from_container, wait_for_gc_pass_on_storage_nodes from utility import parse_time, placement_policy_from_container, wait_for_gc_pass_on_storage_nodes
from wellknown_acl import PUBLIC_ACL from wellknown_acl import PUBLIC_ACL
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
check_nodes = [] check_nodes: list[StorageNode] = []
@pytest.fixture
@allure.title("Create container and pick the node with data")
def create_container_and_pick_node(prepare_wallet_and_deposit, client_shell, hosting: Hosting):
wallet = prepare_wallet_and_deposit
file_path = generate_file()
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, file_path, cid, shell=client_shell)
nodes = get_nodes_with_object(wallet, cid, oid, shell=client_shell)
assert len(nodes) == 1
node = nodes[0]
node_name = choice(
[node_name for node_name, params in NEOFS_NETMAP_DICT.items() if params.get("rpc") == node]
)
yield cid, node_name
shards = node_shard_list(hosting, node_name)
assert shards
for shard in shards:
node_shard_set_mode(hosting, node_name, shard, "read-write")
node_shard_list(hosting, node_name)
@pytest.fixture
def after_run_start_all_nodes(hosting: Hosting):
yield
try:
start_nodes(hosting, list(NEOFS_NETMAP_DICT.keys()))
except Exception as err:
logger.error(f"Node start fails with error:\n{err}")
@pytest.fixture
def return_nodes_after_test_run(client_shell: Shell, hosting: Hosting):
yield
return_nodes(client_shell, hosting)
@allure.step("Tick epoch with retries")
def tick_epoch_with_retries(shell: Shell, attempts: int = 3, timeout: int = 3):
for __attempt in range(attempts):
try:
tick_epoch(shell=shell)
except RuntimeError:
sleep(timeout)
continue
return
raise
@allure.step("Return node to cluster")
def return_nodes(shell: Shell, hosting: Hosting, alive_node: Optional[str] = None) -> None:
for node in list(check_nodes):
with allure.step(f"Start node {node}"):
host = hosting.get_host_by_service(node)
host.start_service(node)
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(hosting, node)
# We need to wait for node to establish notifications from morph-chain
# Otherwise it will hang up when we will try to set status
sleep(parse_time(MORPH_BLOCK_TIME))
with allure.step(f"Move node {node} to online state"):
node_set_status(hosting, node, status="online", retries=2)
check_nodes.remove(node)
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch_with_retries(attempts=3)
check_node_in_map(node, shell=shell, alive_node=alive_node)
@allure.title("Add one node to cluster") @allure.title("Add one node to cluster")
@pytest.mark.add_nodes @pytest.mark.add_nodes
@pytest.mark.node_mgmt @pytest.mark.node_mgmt
def test_add_nodes( class TestNodeManagement(ClusterTestBase):
prepare_tmp_dir, @pytest.fixture
client_shell, @allure.title("Create container and pick the node with data")
prepare_wallet_and_deposit, def create_container_and_pick_node(self, default_wallet: str) -> Tuple[str, StorageNode]:
return_nodes_after_test_run, default_wallet
hosting: Hosting, file_path = generate_file()
): placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
wallet = prepare_wallet_and_deposit endpoint = self.cluster.default_rpc_endpoint
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
source_file_path = generate_file()
additional_node = choice(
[
node
for node, node_config in NEOFS_NETMAP_DICT.items()
if node_config.get("rpc") != STORAGE_RPC_ENDPOINT_1
]
)
alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != additional_node])
check_node_in_map(additional_node, shell=client_shell, alive_node=alive_node)
# Add node to recovery list before messing with it
check_nodes.append(additional_node)
exclude_node_from_network_map(hosting, additional_node, alive_node, shell=client_shell)
delete_node_data(hosting, additional_node)
cid = create_container(wallet, rule=placement_rule_3, basic_acl=PUBLIC_ACL, shell=client_shell)
oid = put_object(
wallet,
source_file_path,
cid,
endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc"),
shell=client_shell,
)
wait_object_replication_on_nodes(wallet, cid, oid, 3, shell=client_shell)
return_nodes(shell=client_shell, hosting=hosting, alive_node=alive_node)
with allure.step("Check data could be replicated to new node"):
random_node = choice(
[node for node in NEOFS_NETMAP_DICT if node not in (additional_node, alive_node)]
)
exclude_node_from_network_map(hosting, random_node, alive_node, shell=client_shell)
wait_object_replication_on_nodes(
wallet, cid, oid, 3, excluded_nodes=[random_node], shell=client_shell
)
include_node_to_network_map(hosting, random_node, alive_node, shell=client_shell)
wait_object_replication_on_nodes(wallet, cid, oid, 3, shell=client_shell)
with allure.step("Check container could be created with new node"):
cid = create_container( cid = create_container(
wallet, rule=placement_rule_4, basic_acl=PUBLIC_ACL, shell=client_shell default_wallet,
shell=self.shell,
endpoint=endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(nodes) == 1
node = nodes[0]
yield cid, node
shards = node_shard_list(node)
assert shards
for shard in shards:
node_shard_set_mode(node, shard, "read-write")
node_shard_list(node)
@allure.step("Tick epoch with retries")
def tick_epoch_with_retries(self, attempts: int = 3, timeout: int = 3):
for attempt in range(attempts):
try:
self.tick_epoch()
except RuntimeError:
sleep(timeout)
if attempt >= attempts - 1:
raise
continue
return
@pytest.fixture
def after_run_start_all_nodes(self):
yield
self.return_nodes()
@pytest.fixture
def return_nodes_after_test_run(self):
yield
self.return_nodes()
@allure.step("Return node to cluster")
def return_nodes(self, alive_node: Optional[StorageNode] = None) -> None:
for node in list(check_nodes):
with allure.step(f"Start node {node}"):
node.start_service()
with allure.step(f"Waiting status ready for node {node}"):
self.wait_for_node_to_be_ready(node)
# We need to wait for node to establish notifications from morph-chain
# Otherwise it will hang up when we will try to set status
sleep(parse_time(MORPH_BLOCK_TIME))
with allure.step(f"Move node {node} to online state"):
storage_node_set_status(node, status="online", retries=2)
check_nodes.remove(node)
sleep(parse_time(MORPH_BLOCK_TIME))
self.tick_epoch_with_retries(3)
check_node_in_map(node, shell=self.shell, alive_node=alive_node)
@allure.title("Add one node to cluster")
@pytest.mark.add_nodes
def test_add_nodes(
self,
default_wallet,
return_nodes_after_test_run,
):
wallet = default_wallet
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
source_file_path = generate_file()
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
alive_node = random.choice(
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
)
check_node_in_map(random_node, shell=self.shell, alive_node=alive_node)
# Add node to recovery list before messing with it
check_nodes.append(random_node)
exclude_node_from_network_map(
random_node, alive_node, shell=self.shell, cluster=self.cluster
)
delete_node_data(random_node)
cid = create_container(
wallet,
rule=placement_rule_3,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
) )
oid = put_object( oid = put_object(
wallet, wallet,
source_file_path, source_file_path,
cid, cid,
endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc"), shell=self.shell,
shell=client_shell, endpoint=alive_node.get_rpc_endpoint(),
) )
wait_object_replication_on_nodes(wallet, cid, oid, 4, shell=client_shell) wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
self.return_nodes(alive_node)
@allure.title("Control Operations with storage nodes") with allure.step("Check data could be replicated to new node"):
@pytest.mark.node_mgmt random_node = random.choice(list(set(storage_nodes) - {random_node, alive_node}))
def test_nodes_management(prepare_tmp_dir, client_shell, hosting: Hosting): # Add node to recovery list before messing with it
""" check_nodes.append(random_node)
This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status). exclude_node_from_network_map(
""" random_node, alive_node, shell=self.shell, cluster=self.cluster
random_node = choice(list(NEOFS_NETMAP_DICT)) )
alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != random_node])
# Calculate public key that identifies node in netmap wait_object_replication(
random_node_wallet_path = NEOFS_NETMAP_DICT[random_node]["wallet_path"] cid,
random_node_netmap_key = get_wallet_public_key(random_node_wallet_path, STORAGE_WALLET_PASS) oid,
3,
shell=self.shell,
nodes=list(set(storage_nodes) - {random_node}),
)
include_node_to_network_map(
random_node, alive_node, shell=self.shell, cluster=self.cluster
)
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
with allure.step("Check node {random_node} is in netmap"): with allure.step("Check container could be created with new node"):
snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell) cid = create_container(
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap" wallet,
rule=placement_rule_4,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
oid = put_object(
wallet,
source_file_path,
cid,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes)
with allure.step("Run health check for all storage nodes"): @allure.title("Control Operations with storage nodes")
for node_name in NEOFS_NETMAP_DICT.keys(): @pytest.mark.node_mgmt
health_check = node_healthcheck(hosting, node_name) def test_nodes_management(self, temp_directory):
"""
This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
"""
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes)
alive_node = random.choice(list(set(storage_nodes) - {random_node}))
# Calculate public key that identifies node in netmap
random_node_netmap_key = random_node.get_wallet_public_key()
with allure.step(f"Check node ({random_node}) is in netmap"):
snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell)
assert (
random_node_netmap_key in snapshot
), f"Expected node {random_node} to be in netmap"
with allure.step("Run health check for all storage nodes"):
for node in self.cluster.storage_nodes:
health_check = storage_node_healthcheck(node)
assert (
health_check.health_status == "READY"
and health_check.network_status == "ONLINE"
)
with allure.step(f"Move node ({random_node}) to offline state"):
storage_node_set_status(random_node, status="offline")
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch(self.shell, self.cluster)
with allure.step(f"Check node {random_node} went to offline"):
health_check = storage_node_healthcheck(random_node)
assert (
health_check.health_status == "READY" and health_check.network_status == "OFFLINE"
)
snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell)
assert (
random_node_netmap_key not in snapshot
), f"Expected node {random_node} not in netmap"
with allure.step(f"Check node {random_node} went to online"):
storage_node_set_status(random_node, status="online")
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch(self.shell, self.cluster)
with allure.step(f"Check node {random_node} went to online"):
health_check = storage_node_healthcheck(random_node)
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE" assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell)
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap"
with allure.step(f"Move node {random_node} to offline state"): @pytest.mark.parametrize(
node_set_status(hosting, random_node, status="offline") "placement_rule,expected_copies",
[
sleep(parse_time(MORPH_BLOCK_TIME)) ("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", 2),
tick_epoch(shell=client_shell) ("REP 2 IN X CBF 1 SELECT 2 FROM * AS X", 2),
("REP 3 IN X CBF 1 SELECT 3 FROM * AS X", 3),
with allure.step(f"Check node {random_node} went to offline"): ("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", 1),
health_check = node_healthcheck(hosting, random_node) ("REP 1 IN X CBF 2 SELECT 1 FROM * AS X", 1),
assert health_check.health_status == "READY" and health_check.network_status == "OFFLINE" ("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4),
snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell) ("REP 2 IN X CBF 1 SELECT 4 FROM * AS X", 2),
assert random_node_netmap_key not in snapshot, f"Expected node {random_node} not in netmap" ],
with allure.step(f"Check node {random_node} went to online"):
node_set_status(hosting, random_node, status="online")
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch(shell=client_shell)
with allure.step(f"Check node {random_node} went to online"):
health_check = node_healthcheck(hosting, random_node)
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell)
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap"
@pytest.mark.parametrize(
"placement_rule,expected_copies",
[
("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", 2),
("REP 2 IN X CBF 1 SELECT 2 FROM * AS X", 2),
("REP 3 IN X CBF 1 SELECT 3 FROM * AS X", 3),
("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", 1),
("REP 1 IN X CBF 2 SELECT 1 FROM * AS X", 1),
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4),
("REP 2 IN X CBF 1 SELECT 4 FROM * AS X", 2),
],
)
@pytest.mark.node_mgmt
@allure.title("Test object copies based on placement policy")
def test_placement_policy(
prepare_wallet_and_deposit, placement_rule, expected_copies, client_shell: Shell
):
"""
This test checks object's copies based on container's placement policy.
"""
wallet = prepare_wallet_and_deposit
file_path = generate_file()
validate_object_copies(wallet, placement_rule, file_path, expected_copies, shell=client_shell)
@pytest.mark.parametrize(
"placement_rule,expected_copies,nodes",
[
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4, ["s01", "s02", "s03", "s04"]),
(
"REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW",
1,
["s03"],
),
("REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB", 1, ["s02"]),
(
"REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE "
"SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE "
"FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK",
2,
["s01", "s02"],
),
(
"REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU",
4,
["s01", "s02", "s03", "s04"],
),
(
"REP 1 CBF 1 SELECT 1 FROM LOC_SPB "
"FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB",
1,
["s02"],
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU",
2,
["s01", "s02"],
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU",
2,
["s01", "s02"],
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU",
2,
["s03", "s04"],
),
],
)
@pytest.mark.node_mgmt
@allure.title("Test object copies and storage nodes based on placement policy")
def test_placement_policy_with_nodes(
prepare_wallet_and_deposit, placement_rule, expected_copies, nodes, client_shell: Shell
):
"""
Based on container's placement policy check that storage nodes are piked correctly and object has
correct copies amount.
"""
wallet = prepare_wallet_and_deposit
file_path = generate_file()
cid, oid, found_nodes = validate_object_copies(
wallet, placement_rule, file_path, expected_copies, shell=client_shell
) )
expected_nodes = [NEOFS_NETMAP_DICT[node_name].get("rpc") for node_name in nodes] @pytest.mark.node_mgmt
assert set(found_nodes) == set( @allure.title("Test object copies based on placement policy")
expected_nodes def test_placement_policy(self, default_wallet, placement_rule, expected_copies):
), f"Expected nodes {expected_nodes}, got {found_nodes}" """
This test checks object's copies based on container's placement policy.
"""
wallet = default_wallet
file_path = generate_file()
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.parametrize(
@pytest.mark.parametrize( "placement_rule,expected_copies,expected_nodes_id",
"placement_rule,expected_copies", [
[ ("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4, {1, 2, 3, 4}),
("REP 2 IN X CBF 2 SELECT 6 FROM * AS X", 2), (
], "REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW",
) 1,
@pytest.mark.node_mgmt {3},
@allure.title("Negative cases for placement policy") ),
def test_placement_policy_negative( (
prepare_wallet_and_deposit, placement_rule, expected_copies, client_shell: Shell "REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB",
): 1,
""" {2},
Negative test for placement policy. ),
""" (
wallet = prepare_wallet_and_deposit "REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE "
file_path = generate_file() "SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE "
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"): "FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK",
validate_object_copies( 2,
wallet, placement_rule, file_path, expected_copies, shell=client_shell {1, 2},
),
(
"REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU",
4,
{1, 2, 3, 4},
),
(
"REP 1 CBF 1 SELECT 1 FROM LOC_SPB "
"FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB",
1,
{2},
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU",
2,
{1, 2},
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU",
2,
{1, 2},
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU",
2,
{3, 4},
),
],
)
@pytest.mark.node_mgmt
@allure.title("Test object copies and storage nodes based on placement policy")
def test_placement_policy_with_nodes(
self, default_wallet, placement_rule, expected_copies, expected_nodes_id: set[int]
):
"""
Based on container's placement policy check that storage nodes are piked correctly and object has
correct copies amount.
"""
wallet = default_wallet
file_path = generate_file()
cid, oid, found_nodes = self.validate_object_copies(
wallet, placement_rule, file_path, expected_copies
) )
assert (
found_nodes == expected_nodes_id
), f"Expected nodes {expected_nodes_id}, got {found_nodes}"
@pytest.mark.skip(reason="We cover this scenario in failover tests") @pytest.mark.parametrize(
@pytest.mark.sanity "placement_rule,expected_copies",
@pytest.mark.node_mgmt [
@allure.title("NeoFS object replication on node failover") ("REP 2 IN X CBF 2 SELECT 6 FROM * AS X", 2),
def test_replication( ],
prepare_wallet_and_deposit, client_shell: Shell, after_run_start_all_nodes, hosting: Hosting
):
"""
Test checks object replication on storage not failover and come back.
"""
wallet = prepare_wallet_and_deposit
file_path = generate_file()
expected_nodes_count = 2
cid = create_container(wallet, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, file_path, cid)
nodes = get_nodes_with_object(wallet, cid, oid)
assert (
len(nodes) == expected_nodes_count
), f"Expected {expected_nodes_count} copies, got {len(nodes)}"
node_names = [name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes]
stopped_nodes = stop_nodes(hosting, 1, node_names)
wait_for_expected_object_copies(client_shell, wallet, cid, oid)
start_nodes(hosting, stopped_nodes)
tick_epoch(shell=client_shell)
for node_name in node_names:
wait_for_node_go_online(hosting, node_name)
wait_for_expected_object_copies(client_shell, wallet, cid, oid)
@pytest.mark.node_mgmt
@allure.title("NeoFS object could be dropped using control command")
def test_drop_object(prepare_wallet_and_deposit, client_shell: Shell, hosting: Hosting):
"""
Test checks object could be dropped using `neofs-cli control drop-objects` command.
"""
wallet = prepare_wallet_and_deposit
file_path_simple, file_path_complex = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
locode = get_locode()
rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
cid = create_container(wallet, rule=rule, shell=client_shell)
oid_simple = put_object(wallet, file_path_simple, cid, shell=client_shell)
oid_complex = put_object(wallet, file_path_complex, cid, shell=client_shell)
for oid in (oid_simple, oid_complex):
get_object(wallet, cid, oid, client_shell)
head_object(wallet, cid, oid, client_shell)
nodes = get_nodes_with_object(wallet, cid, oid_simple, shell=client_shell)
node_name = choice(
[name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes]
) )
@pytest.mark.node_mgmt
@allure.title("Negative cases for placement policy")
def test_placement_policy_negative(self, default_wallet, placement_rule, expected_copies):
"""
Negative test for placement policy.
"""
wallet = default_wallet
file_path = generate_file()
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"):
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
for oid in (oid_simple, oid_complex): @pytest.mark.node_mgmt
with allure.step(f"Drop object {oid}"): @allure.title("NeoFS object could be dropped using control command")
get_object(wallet, cid, oid, shell=client_shell) def test_drop_object(self, default_wallet):
head_object(wallet, cid, oid, shell=client_shell) """
drop_object(hosting, node_name, cid, oid) Test checks object could be dropped using `neofs-cli control drop-objects` command.
wait_for_obj_dropped(wallet, cid, oid, client_shell, get_object) """
wait_for_obj_dropped(wallet, cid, oid, client_shell, head_object) wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
file_path_simple, file_path_complex = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
locode = get_locode_from_random_node(self.cluster)
rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
cid = create_container(wallet, rule=rule, shell=self.shell, endpoint=endpoint)
oid_simple = put_object_to_random_node(
wallet, file_path_simple, cid, shell=self.shell, cluster=self.cluster
)
oid_complex = put_object_to_random_node(
wallet, file_path_complex, cid, shell=self.shell, cluster=self.cluster
)
@pytest.mark.node_mgmt for oid in (oid_simple, oid_complex):
@pytest.mark.skip(reason="Need to clarify scenario") get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster)
@allure.title("Control Operations with storage nodes") head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
def test_shards(
prepare_wallet_and_deposit,
create_container_and_pick_node,
client_shell: Shell,
hosting: Hosting,
):
wallet = prepare_wallet_and_deposit
file_path = generate_file()
cid, node_name = create_container_and_pick_node nodes_with_object = get_nodes_with_object(
original_oid = put_object(wallet, file_path, cid, shell=client_shell) cid, oid_simple, shell=self.shell, nodes=self.cluster.storage_nodes
)
random_node = random.choice(nodes_with_object)
# for mode in ('read-only', 'degraded'): for oid in (oid_simple, oid_complex):
for mode in ("degraded",): with allure.step(f"Drop object {oid}"):
shards = node_shard_list(hosting, node_name) get_object_from_random_node(
assert shards wallet, cid, oid, shell=self.shell, cluster=self.cluster
)
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
drop_object(random_node, cid, oid)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object)
for shard in shards: @pytest.mark.node_mgmt
node_shard_set_mode(hosting, node_name, shard, mode) @pytest.mark.skip(reason="Need to clarify scenario")
@allure.title("Control Operations with storage nodes")
def test_shards(
self,
default_wallet,
create_container_and_pick_node,
):
wallet = default_wallet
file_path = generate_file()
shards = node_shard_list(hosting, node_name) cid, node = create_container_and_pick_node
assert shards original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
with pytest.raises(RuntimeError): # for mode in ('read-only', 'degraded'):
put_object(wallet, file_path, cid, shell=client_shell) for mode in ("degraded",):
shards = node_shard_list(node)
assert shards
with pytest.raises(RuntimeError): for shard in shards:
delete_object(wallet, cid, original_oid, shell=client_shell) node_shard_set_mode(node, shard, mode)
get_object(wallet, cid, original_oid, shell=client_shell) shards = node_shard_list(node)
assert shards
for shard in shards: with pytest.raises(RuntimeError):
node_shard_set_mode(hosting, node_name, shard, "read-write") put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
shards = node_shard_list(hosting, node_name) with pytest.raises(RuntimeError):
assert shards delete_object(
wallet, cid, original_oid, self.shell, self.cluster.default_rpc_endpoint
)
oid = put_object(wallet, file_path, cid, shell=client_shell) get_object_from_random_node(wallet, cid, original_oid, self.shell, self.cluster)
delete_object(wallet, cid, oid, shell=client_shell)
for shard in shards:
node_shard_set_mode(node, shard, "read-write")
@allure.step("Validate object has {expected_copies} copies") shards = node_shard_list(node)
def validate_object_copies( assert shards
wallet: str, placement_rule: str, file_path: str, expected_copies: int, shell: Shell
):
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=shell)
got_policy = placement_policy_from_container(
get_container(wallet, cid, json_mode=False, shell=shell)
)
assert got_policy == placement_rule.replace(
"'", ""
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
oid = put_object(wallet, file_path, cid, shell=shell)
nodes = get_nodes_with_object(wallet, cid, oid, shell=shell)
assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}"
return cid, oid, nodes
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
@allure.step("Wait for node {node_name} goes online") @allure.step("Validate object has {expected_copies} copies")
def wait_for_node_go_online(hosting: Hosting, node_name: str) -> None: def validate_object_copies(
timeout, attempts = 5, 20 self, wallet: str, placement_rule: str, file_path: str, expected_copies: int
for _ in range(attempts): ) -> set[int]:
try: endpoint = self.cluster.default_rpc_endpoint
health_check = node_healthcheck(hosting, node_name) cid = create_container(
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE" wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
return )
except Exception as err: got_policy = placement_policy_from_container(
logger.warning(f"Node {node_name} is not online:\n{err}") get_container(wallet, cid, json_mode=False, shell=self.shell, endpoint=endpoint)
sleep(timeout) )
raise AssertionError( assert got_policy == placement_rule.replace(
f"Node {node_name} hasn't gone to the READY and ONLINE state after {timeout * attempts} second" "'", ""
) ), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
oid = put_object_to_random_node(
wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
nodes_id = {node.id for node in nodes}
assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}"
return cid, oid, nodes_id
@allure.step("Wait for node {node} goes online")
@allure.step("Wait for node {node_name} is ready") def wait_for_node_go_online(self, node: StorageNode) -> None:
def wait_for_node_to_be_ready(hosting: Hosting, node_name: str) -> None: timeout, attempts = 5, 20
timeout, attempts = 30, 6 for _ in range(attempts):
for _ in range(attempts): try:
try: health_check = storage_node_healthcheck(node)
health_check = node_healthcheck(hosting, node_name) assert (
if health_check.health_status == "READY": health_check.health_status == "READY"
and health_check.network_status == "ONLINE"
)
return return
except Exception as err: except Exception as err:
logger.warning(f"Node {node_name} is not ready:\n{err}") logger.warning(f"Node {node} is not online:\n{err}")
sleep(timeout) sleep(timeout)
raise AssertionError( raise AssertionError(
f"Node {node_name} hasn't gone to the READY state after {timeout * attempts} seconds" f"Node {node} hasn't gone to the READY and ONLINE state after {timeout * attempts} second"
) )
@allure.step("Wait for node {node} is ready")
def wait_for_node_to_be_ready(self, node: StorageNode) -> None:
timeout, attempts = 30, 6
for _ in range(attempts):
try:
health_check = storage_node_healthcheck(node)
if health_check.health_status == "READY":
return
except Exception as err:
logger.warning(f"Node {node} is not ready:\n{err}")
sleep(timeout)
raise AssertionError(
f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds"
)
@allure.step("Wait for {expected_copies} object copies in the wallet") @allure.step("Wait for {expected_copies} object copies in the wallet")
def wait_for_expected_object_copies( def wait_for_expected_object_copies(
shell: Shell, wallet: str, cid: str, oid: str, expected_copies: int = 2 self, wallet: str, cid: str, oid: str, expected_copies: int = 2
) -> None: ) -> None:
for i in range(2): nodes = self.cluster.storage_nodes
copies = get_simple_object_copies(wallet, cid, oid) for _ in range(2):
if copies == expected_copies: copies = get_simple_object_copies(wallet, cid, oid, self.shell, nodes)
break if copies == expected_copies:
tick_epoch(shell=shell) break
sleep(parse_time(NEOFS_CONTRACT_CACHE_TIMEOUT)) tick_epoch(self.shell, self.cluster)
else: sleep(parse_time(NEOFS_CONTRACT_CACHE_TIMEOUT))
raise AssertionError(f"There are no {expected_copies} copies during time") else:
raise AssertionError(f"There are no {expected_copies} copies during time")
@allure.step("Wait for object to be dropped")
def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker) -> None:
for _ in range(3):
try:
checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
wait_for_gc_pass_on_storage_nodes()
except Exception as err:
if error_matches_status(err, OBJECT_NOT_FOUND):
return
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
@allure.step("Wait for object to be dropped") raise AssertionError(f"Object {oid} was not dropped from node")
def wait_for_obj_dropped(wallet: str, cid: str, oid: str, shell: Shell, checker) -> None:
for _ in range(3):
try:
checker(wallet, cid, oid, shell=shell)
wait_for_gc_pass_on_storage_nodes()
except Exception as err:
if error_matches_status(err, OBJECT_NOT_FOUND):
return
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
raise AssertionError(f"Object {oid} was not dropped from node")

View file

@ -4,6 +4,7 @@ import sys
import allure import allure
import pytest import pytest
from cluster import Cluster
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from container import create_container from container import create_container
from file_helper import generate_file, get_file_content, get_file_hash from file_helper import generate_file, get_file_content, get_file_hash
@ -12,16 +13,17 @@ from neofs_testlib.shell import Shell
from pytest import FixtureRequest from pytest import FixtureRequest
from python_keywords.neofs_verbs import ( from python_keywords.neofs_verbs import (
get_netmap_netinfo, get_netmap_netinfo,
get_object, get_object_from_random_node,
get_range, get_range,
get_range_hash, get_range_hash,
head_object, head_object,
put_object, put_object_to_random_node,
search_object, search_object,
) )
from python_keywords.storage_policy import get_complex_object_copies, get_simple_object_copies from python_keywords.storage_policy import get_complex_object_copies, get_simple_object_copies
from helpers.storage_object_info import StorageObjectInfo from helpers.storage_object_info import StorageObjectInfo
from steps.cluster_test_base import ClusterTestBase
from steps.storage_object import delete_objects from steps.storage_object import delete_objects
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -87,11 +89,11 @@ def generate_ranges(file_size: int, max_object_size: int) -> list[(int, int)]:
scope="module", scope="module",
) )
def storage_objects( def storage_objects(
prepare_wallet_and_deposit: str, client_shell: Shell, request: FixtureRequest default_wallet: str, client_shell: Shell, cluster: Cluster, request: FixtureRequest
) -> list[StorageObjectInfo]: ) -> list[StorageObjectInfo]:
wallet = prepare_wallet_and_deposit wallet = default_wallet
# Separate containers for complex/simple objects to avoid side-effects # Separate containers for complex/simple objects to avoid side-effects
cid = create_container(wallet, shell=client_shell) cid = create_container(wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
file_path = generate_file(request.param) file_path = generate_file(request.param)
file_hash = get_file_hash(file_path) file_hash = get_file_hash(file_path)
@ -101,11 +103,12 @@ def storage_objects(
with allure.step("Put objects"): with allure.step("Put objects"):
# We need to upload objects multiple times with different attributes # We need to upload objects multiple times with different attributes
for attributes in OBJECT_ATTRIBUTES: for attributes in OBJECT_ATTRIBUTES:
storage_object_id = put_object( storage_object_id = put_object_to_random_node(
wallet=wallet, wallet=wallet,
path=file_path, path=file_path,
cid=cid, cid=cid,
shell=client_shell, shell=client_shell,
cluster=cluster,
attributes=attributes, attributes=attributes,
) )
@ -121,357 +124,390 @@ def storage_objects(
yield storage_objects yield storage_objects
# Teardown after all tests done with current param # Teardown after all tests done with current param
delete_objects(storage_objects, client_shell) delete_objects(storage_objects, client_shell, cluster)
@allure.title("Validate object storage policy by native API")
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
def test_object_storage_policies( class TestObjectApi(ClusterTestBase):
client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] @allure.title("Validate object storage policy by native API")
): def test_object_storage_policies(
""" self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
Validate object storage policy ):
""" """
allure.dynamic.title( Validate object storage policy
f"Validate object storage policy by native API for {request.node.callspec.id}" """
) allure.dynamic.title(
f"Validate object storage policy by native API for {request.node.callspec.id}"
)
with allure.step("Validate storage policy for objects"): with allure.step("Validate storage policy for objects"):
for storage_object in storage_objects: for storage_object in storage_objects:
if storage_object.size == SIMPLE_OBJ_SIZE: if storage_object.size == SIMPLE_OBJ_SIZE:
copies = get_simple_object_copies( copies = get_simple_object_copies(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
else:
copies = get_complex_object_copies(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
assert copies == 2, "Expected 2 copies"
@allure.title("Validate get object native API")
def test_get_object_api(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate get object native API
"""
allure.dynamic.title(f"Validate get object native API for {request.node.callspec.id}")
with allure.step("Get objects and compare hashes"):
for storage_object in storage_objects:
file_path = get_object_from_random_node(
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell=client_shell, self.shell,
cluster=self.cluster,
) )
else: file_hash = get_file_hash(file_path)
copies = get_complex_object_copies( assert storage_object.file_hash == file_hash
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=client_shell,
)
assert copies == 2, "Expected 2 copies"
@allure.title("Validate head object native API")
def test_head_object_api(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate head object native API
"""
allure.dynamic.title(f"Validate head object by native API for {request.node.callspec.id}")
@allure.title("Validate get object native API") storage_object_1 = storage_objects[0]
@pytest.mark.sanity storage_object_2 = storage_objects[1]
@pytest.mark.grpc_api
def test_get_object_api(
client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate get object native API
"""
allure.dynamic.title(f"Validate get object native API for {request.node.callspec.id}")
with allure.step("Get objects and compare hashes"): with allure.step("Head object and validate"):
for storage_object in storage_objects: head_object(
file_path = get_object( storage_object_1.wallet_file_path,
storage_object.wallet_file_path, storage_object_1.cid,
storage_object.cid, storage_object_1.oid,
storage_object.oid, shell=self.shell,
client_shell, endpoint=self.cluster.default_rpc_endpoint,
) )
file_hash = get_file_hash(file_path) head_info = head_object(
assert storage_object.file_hash == file_hash storage_object_2.wallet_file_path,
storage_object_2.cid,
storage_object_2.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
self.check_header_is_presented(head_info, storage_object_2.attributes)
@allure.title("Validate object search by native API")
def test_search_object_api(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate object search by native API
"""
allure.dynamic.title(f"Validate object search by native API for {request.node.callspec.id}")
@allure.title("Validate head object native API") oids = [storage_object.oid for storage_object in storage_objects]
@pytest.mark.sanity wallet = storage_objects[0].wallet_file_path
@pytest.mark.grpc_api cid = storage_objects[0].cid
def test_head_object_api(
client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate head object native API
"""
allure.dynamic.title(f"Validate head object by native API for {request.node.callspec.id}")
storage_object_1 = storage_objects[0] test_table = [
storage_object_2 = storage_objects[1] (OBJECT_ATTRIBUTES[1], oids[1:2]),
(OBJECT_ATTRIBUTES[2], oids[2:3]),
(COMMON_ATTRIBUTE, oids[1:3]),
]
with allure.step("Head object and validate"): with allure.step("Search objects"):
head_object( # Search with no attributes
storage_object_1.wallet_file_path,
storage_object_1.cid,
storage_object_1.oid,
shell=client_shell,
)
head_info = head_object(
storage_object_2.wallet_file_path,
storage_object_2.cid,
storage_object_2.oid,
shell=client_shell,
)
check_header_is_presented(head_info, storage_object_2.attributes)
@allure.title("Validate object search by native API")
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_search_object_api(
client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate object search by native API
"""
allure.dynamic.title(f"Validate object search by native API for {request.node.callspec.id}")
oids = [storage_object.oid for storage_object in storage_objects]
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
test_table = [
(OBJECT_ATTRIBUTES[1], oids[1:2]),
(OBJECT_ATTRIBUTES[2], oids[2:3]),
(COMMON_ATTRIBUTE, oids[1:3]),
]
with allure.step("Search objects"):
# Search with no attributes
result = search_object(
wallet, cid, shell=client_shell, expected_objects_list=oids, root=True
)
assert sorted(oids) == sorted(result)
# search by test table
for filter, expected_oids in test_table:
result = search_object( result = search_object(
wallet, wallet,
cid, cid,
shell=client_shell, shell=self.shell,
filters=filter, endpoint=self.cluster.default_rpc_endpoint,
expected_objects_list=expected_oids, expected_objects_list=oids,
root=True, root=True,
) )
assert sorted(expected_oids) == sorted(result) assert sorted(oids) == sorted(result)
# search by test table
for filter, expected_oids in test_table:
result = search_object(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
filters=filter,
expected_objects_list=expected_oids,
root=True,
)
assert sorted(expected_oids) == sorted(result)
@allure.title("Validate object search with removed items") @allure.title("Validate object search with removed items")
@pytest.mark.sanity @pytest.mark.parametrize(
@pytest.mark.grpc_api "object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
@pytest.mark.parametrize(
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
)
def test_object_search_should_return_tombstone_items(
prepare_wallet_and_deposit: str, client_shell: Shell, request: FixtureRequest, object_size: int
):
"""
Validate object search with removed items
"""
allure.dynamic.title(
f"Validate object search with removed items for {request.node.callspec.id}"
) )
def test_object_search_should_return_tombstone_items(
wallet = prepare_wallet_and_deposit self, default_wallet: str, request: FixtureRequest, object_size: int
cid = create_container(wallet, shell=client_shell) ):
"""
with allure.step("Upload file"): Validate object search with removed items
file_path = generate_file(object_size) """
file_hash = get_file_hash(file_path) allure.dynamic.title(
f"Validate object search with removed items for {request.node.callspec.id}"
storage_object = StorageObjectInfo(
cid=cid,
oid=put_object(wallet, file_path, cid, shell=client_shell),
size=object_size,
wallet_file_path=wallet,
file_path=file_path,
file_hash=file_hash,
) )
with allure.step("Search object"): wallet = default_wallet
# Root Search object should return root object oid cid = create_container(wallet, self.shell, self.cluster.default_rpc_endpoint)
result = search_object(wallet, cid, shell=client_shell, root=True)
assert result == [storage_object.oid]
with allure.step("Delete file"): with allure.step("Upload file"):
delete_objects([storage_object], client_shell) file_path = generate_file(object_size)
file_hash = get_file_hash(file_path)
with allure.step("Search deleted object with --root"): storage_object = StorageObjectInfo(
# Root Search object should return nothing cid=cid,
result = search_object(wallet, cid, shell=client_shell, root=True) oid=put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster),
assert len(result) == 0 size=object_size,
wallet_file_path=wallet,
file_path=file_path,
file_hash=file_hash,
)
with allure.step("Search deleted object with --phy should return only tombstones"): with allure.step("Search object"):
# Physical Search object should return only tombstones # Root Search object should return root object oid
result = search_object(wallet, cid, shell=client_shell, phy=True) result = search_object(
assert ( wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True
storage_object.tombstone in result )
), f"Search result should contain tombstone of removed object" assert result == [storage_object.oid]
assert (
storage_object.oid not in result with allure.step("Delete file"):
), f"Search result should not contain ObjectId of removed object" delete_objects([storage_object], self.shell, self.cluster)
for tombstone_oid in result:
header = head_object(wallet, cid, tombstone_oid, shell=client_shell)["header"] with allure.step("Search deleted object with --root"):
object_type = header["objectType"] # Root Search object should return nothing
result = search_object(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True
)
assert len(result) == 0
with allure.step("Search deleted object with --phy should return only tombstones"):
# Physical Search object should return only tombstones
result = search_object(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, phy=True
)
assert ( assert (
object_type == "TOMBSTONE" storage_object.tombstone in result
), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}" ), "Search result should contain tombstone of removed object"
assert (
storage_object.oid not in result
@allure.title("Validate native object API get_range_hash") ), "Search result should not contain ObjectId of removed object"
@pytest.mark.sanity for tombstone_oid in result:
@pytest.mark.grpc_api header = head_object(
def test_object_get_range_hash( wallet,
client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] cid,
): tombstone_oid,
""" shell=self.shell,
Validate get_range_hash for object by common gRPC API endpoint=self.cluster.default_rpc_endpoint,
""" )["header"]
allure.dynamic.title( object_type = header["objectType"]
f"Validate native get_range_hash object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
net_info = get_netmap_netinfo(wallet, client_shell)
max_object_size = net_info["maximum_object_size"]
file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_end in file_ranges_to_test:
range_len = range_end - range_start
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range hash ({range_cut})"):
for oid in oids:
range_hash = get_range_hash(
wallet, cid, oid, shell=client_shell, range_cut=range_cut
)
assert ( assert (
get_file_hash(file_path, range_len, range_start) == range_hash object_type == "TOMBSTONE"
), f"Expected range hash to match {range_cut} slice of file payload" ), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
@allure.title("Validate native object API get_range_hash")
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range_hash(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate get_range_hash for object by common gRPC API
"""
allure.dynamic.title(
f"Validate native get_range_hash object API for {request.node.callspec.id}"
)
@allure.title("Validate native object API get_range") wallet = storage_objects[0].wallet_file_path
@pytest.mark.sanity cid = storage_objects[0].cid
@pytest.mark.grpc_api oids = [storage_object.oid for storage_object in storage_objects[:2]]
def test_object_get_range( file_path = storage_objects[0].file_path
client_shell: Shell, request: FixtureRequest, storage_objects: list[StorageObjectInfo] net_info = get_netmap_netinfo(
): wallet, self.shell, endpoint=self.cluster.default_rpc_endpoint
""" )
Validate get_range for object by common gRPC API max_object_size = net_info["maximum_object_size"]
"""
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
wallet = storage_objects[0].wallet_file_path file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size)
cid = storage_objects[0].cid logging.info(f"Ranges used in test {file_ranges_to_test}")
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
net_info = get_netmap_netinfo(wallet, client_shell)
max_object_size = net_info["maximum_object_size"]
file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size) for range_start, range_end in file_ranges_to_test:
logging.info(f"Ranges used in test {file_ranges_to_test}") range_len = range_end - range_start
range_cut = f"{range_start}:{range_len}"
for range_start, range_end in file_ranges_to_test: with allure.step(f"Get range hash ({range_cut})"):
range_len = range_end - range_start for oid in oids:
range_cut = f"{range_start}:{range_len}" range_hash = get_range_hash(
with allure.step(f"Get range ({range_cut})"): wallet,
for oid in oids: cid,
_, range_content = get_range( oid,
wallet, cid, oid, shell=client_shell, range_cut=range_cut shell=self.shell,
) endpoint=self.cluster.default_rpc_endpoint,
assert ( range_cut=range_cut,
get_file_content(
file_path, content_len=range_len, mode="rb", offset=range_start
) )
== range_content assert (
), f"Expected range content to match {range_cut} slice of file payload" get_file_hash(file_path, range_len, range_start) == range_hash
), f"Expected range hash to match {range_cut} slice of file payload"
@allure.title("Validate native object API get_range")
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate get_range for object by common gRPC API
"""
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
@allure.title("Validate native object API get_range negative cases") wallet = storage_objects[0].wallet_file_path
@pytest.mark.sanity cid = storage_objects[0].cid
@pytest.mark.grpc_api oids = [storage_object.oid for storage_object in storage_objects[:2]]
def test_object_get_range_negatives( file_path = storage_objects[0].file_path
client_shell: Shell, net_info = get_netmap_netinfo(
request: FixtureRequest, wallet, self.shell, endpoint=self.cluster.default_rpc_endpoint
storage_objects: list[StorageObjectInfo], )
): max_object_size = net_info["maximum_object_size"]
"""
Validate get_range negative for object by common gRPC API
"""
allure.dynamic.title(
f"Validate native get_range negative object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size)
cid = storage_objects[0].cid logging.info(f"Ranges used in test {file_ranges_to_test}")
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert ( for range_start, range_end in file_ranges_to_test:
RANGE_MIN_LEN < file_size range_len = range_end - range_start
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})" range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
_, range_content = get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
assert (
get_file_content(
file_path, content_len=range_len, mode="rb", offset=range_start
)
== range_content
), f"Expected range content to match {range_cut} slice of file payload"
file_ranges_to_test = [ @allure.title("Validate native object API get_range negative cases")
# Offset is bigger than the file size, the length is small. @pytest.mark.sanity
(file_size + 1, RANGE_MIN_LEN), @pytest.mark.grpc_api
# Offset is ok, but offset+length is too big. def test_object_get_range_negatives(
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2), self,
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid". request: FixtureRequest,
(RANGE_MIN_LEN, sys.maxsize * 2 + 1), storage_objects: list[StorageObjectInfo],
] ):
"""
Validate get_range negative for object by common gRPC API
"""
allure.dynamic.title(
f"Validate native get_range negative object API for {request.node.callspec.id}"
)
for range_start, range_len in file_ranges_to_test: wallet = storage_objects[0].wallet_file_path
range_cut = f"{range_start}:{range_len}" cid = storage_objects[0].cid
with allure.step(f"Get range ({range_cut})"): oids = [storage_object.oid for storage_object in storage_objects[:2]]
for oid in oids: file_size = storage_objects[0].size
with pytest.raises(Exception, match=OUT_OF_RANGE):
get_range(wallet, cid, oid, shell=client_shell, range_cut=range_cut)
@allure.title("Validate native object API get_range_hash negative cases")
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range_hash_negatives(
client_shell: Shell,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range_hash negative for object by common gRPC API
"""
allure.dynamic.title(
f"Validate native get_range_hash negative object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1),
]
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=OUT_OF_RANGE):
get_range_hash(wallet, cid, oid, shell=client_shell, range_cut=range_cut)
def check_header_is_presented(head_info: dict, object_header: dict) -> None:
for key_to_check, val_to_check in object_header.items():
assert ( assert (
key_to_check in head_info["header"]["attributes"] RANGE_MIN_LEN < file_size
), f"Key {key_to_check} is found in {head_object}" ), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
assert head_info["header"]["attributes"].get(key_to_check) == str(
val_to_check file_ranges_to_test = [
), f"Value {val_to_check} is equal" # Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1),
]
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=OUT_OF_RANGE):
get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
@allure.title("Validate native object API get_range_hash negative cases")
def test_object_get_range_hash_negatives(
self,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range_hash negative for object by common gRPC API
"""
allure.dynamic.title(
f"Validate native get_range_hash negative object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1),
]
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=OUT_OF_RANGE):
get_range_hash(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
for key_to_check, val_to_check in object_header.items():
assert (
key_to_check in head_info["header"]["attributes"]
), f"Key {key_to_check} is found in {head_object}"
assert head_info["header"]["attributes"].get(key_to_check) == str(
val_to_check
), f"Value {val_to_check} is equal"

View file

@ -7,46 +7,52 @@ from container import create_container
from epoch import get_epoch, tick_epoch from epoch import get_epoch, tick_epoch
from file_helper import generate_file, get_file_hash from file_helper import generate_file, get_file_hash
from grpc_responses import OBJECT_NOT_FOUND from grpc_responses import OBJECT_NOT_FOUND
from neofs_testlib.shell import Shell
from pytest import FixtureRequest from pytest import FixtureRequest
from python_keywords.neofs_verbs import get_object, put_object from python_keywords.neofs_verbs import get_object_from_random_node, put_object_to_random_node
from utility import wait_for_gc_pass_on_storage_nodes from utility import wait_for_gc_pass_on_storage_nodes
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@allure.title("Test object life time")
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
@pytest.mark.parametrize( class ObjectApiLifetimeTest(ClusterTestBase):
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"] @allure.title("Test object life time")
) @pytest.mark.parametrize(
def test_object_api_lifetime( "object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
prepare_wallet_and_deposit: str, client_shell: Shell, request: FixtureRequest, object_size: int )
): def test_object_api_lifetime(
""" self, default_wallet: str, request: FixtureRequest, object_size: int
Test object deleted after expiration epoch. ):
""" """
wallet = prepare_wallet_and_deposit Test object deleted after expiration epoch.
cid = create_container(wallet, shell=client_shell) """
allure.dynamic.title(f"Test object life time for {request.node.callspec.id}") allure.dynamic.title(f"Test object life time for {request.node.callspec.id}")
file_path = generate_file(object_size) wallet = default_wallet
file_hash = get_file_hash(file_path) endpoint = self.cluster.default_rpc_endpoint
epoch = get_epoch(shell=client_shell) cid = create_container(wallet, self.shell, endpoint)
oid = put_object(wallet, file_path, cid, shell=client_shell, expire_at=epoch + 1) file_path = generate_file(object_size)
got_file = get_object(wallet, cid, oid, shell=client_shell) file_hash = get_file_hash(file_path)
assert get_file_hash(got_file) == file_hash epoch = get_epoch(self.shell, self.cluster)
with allure.step("Tick two epochs"): oid = put_object_to_random_node(
for _ in range(2): wallet, file_path, cid, self.shell, self.cluster, expire_at=epoch + 1
tick_epoch(shell=client_shell) )
got_file = get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
assert get_file_hash(got_file) == file_hash
# Wait for GC, because object with expiration is counted as alive until GC removes it with allure.step("Tick two epochs"):
wait_for_gc_pass_on_storage_nodes() for _ in range(2):
tick_epoch(self.shell, self.cluster)
with allure.step("Check object deleted because it expires-on epoch"): # Wait for GC, because object with expiration is counted as alive until GC removes it
with pytest.raises(Exception, match=OBJECT_NOT_FOUND): wait_for_gc_pass_on_storage_nodes()
get_object(wallet, cid, oid, shell=client_shell)
with allure.step("Check object deleted because it expires-on epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)

View file

@ -3,6 +3,8 @@ import re
import allure import allure
import pytest import pytest
from cluster import Cluster
from cluster_test_base import ClusterTestBase
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, STORAGE_GC_TIME from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, STORAGE_GC_TIME
from complex_object_actions import get_link_object from complex_object_actions import get_link_object
from container import create_container from container import create_container
@ -22,9 +24,11 @@ from python_keywords.neofs_verbs import delete_object, head_object, lock_object
from test_control import expect_not_raises, wait_for_success from test_control import expect_not_raises, wait_for_success
from utility import parse_time, wait_for_gc_pass_on_storage_nodes from utility import parse_time, wait_for_gc_pass_on_storage_nodes
import steps
from helpers.container import StorageContainer, StorageContainerInfo from helpers.container import StorageContainer, StorageContainerInfo
from helpers.storage_object_info import LockObjectInfo, StorageObjectInfo from helpers.storage_object_info import LockObjectInfo, StorageObjectInfo
from helpers.wallet import WalletFactory, WalletFile from helpers.wallet import WalletFactory, WalletFile
from steps.cluster_test_base import ClusterTestBase
from steps.storage_object import delete_objects from steps.storage_object import delete_objects
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -33,25 +37,6 @@ FIXTURE_LOCK_LIFETIME = 5
FIXTURE_OBJECT_LIFETIME = 10 FIXTURE_OBJECT_LIFETIME = 10
def get_storage_object_chunks(storage_object: StorageObjectInfo, shell: Shell):
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell,
is_direct=False,
)
head = head_object(
storage_object.wallet_file_path, storage_object.cid, split_object_id, shell
)
chunks_object_ids = []
if "split" in head["header"] and "children" in head["header"]["split"]:
chunks_object_ids = head["header"]["split"]["children"]
return chunks_object_ids
@pytest.fixture( @pytest.fixture(
scope="module", scope="module",
) )
@ -64,9 +49,11 @@ def user_wallet(wallet_factory: WalletFactory):
@pytest.fixture( @pytest.fixture(
scope="module", scope="module",
) )
def user_container(user_wallet: WalletFile, client_shell: Shell): def user_container(user_wallet: WalletFile, client_shell: Shell, cluster: Cluster):
container_id = create_container(user_wallet.path, shell=client_shell) container_id = create_container(
return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell) user_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
)
return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell, cluster)
@pytest.fixture( @pytest.fixture(
@ -75,10 +62,11 @@ def user_container(user_wallet: WalletFile, client_shell: Shell):
def locked_storage_object( def locked_storage_object(
user_container: StorageContainer, user_container: StorageContainer,
client_shell: Shell, client_shell: Shell,
cluster: Cluster,
request: FixtureRequest, request: FixtureRequest,
): ):
with allure.step(f"Creating locked object"): with allure.step("Creating locked object"):
current_epoch = ensure_fresh_epoch(client_shell) current_epoch = ensure_fresh_epoch(client_shell, cluster)
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
storage_object = user_container.generate_object( storage_object = user_container.generate_object(
@ -89,6 +77,7 @@ def locked_storage_object(
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, client_shell,
cluster.default_rpc_endpoint,
lifetime=FIXTURE_LOCK_LIFETIME, lifetime=FIXTURE_LOCK_LIFETIME,
) )
storage_object.locks = [ storage_object.locks = [
@ -99,20 +88,21 @@ def locked_storage_object(
yield storage_object yield storage_object
with allure.step(f"Delete created locked object"): with allure.step("Delete created locked object"):
current_epoch = get_epoch(client_shell) current_epoch = get_epoch(client_shell, cluster)
epoch_diff = expiration_epoch - current_epoch + 1 epoch_diff = expiration_epoch - current_epoch + 1
if epoch_diff > 0: if epoch_diff > 0:
with allure.step(f"Tick {epoch_diff} epochs"): with allure.step(f"Tick {epoch_diff} epochs"):
for _ in range(epoch_diff): for _ in range(epoch_diff):
tick_epoch(client_shell) tick_epoch(client_shell, cluster)
try: try:
delete_object( delete_object(
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, client_shell,
cluster.default_rpc_endpoint,
) )
except Exception as ex: except Exception as ex:
ex_message = str(ex) ex_message = str(ex)
@ -126,7 +116,30 @@ def locked_storage_object(
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_object_lock @pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc: class TestObjectLockWithGrpc(ClusterTestBase):
def get_storage_object_chunks(self, storage_object: StorageObjectInfo):
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.storage_nodes,
is_direct=False,
)
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
split_object_id,
self.shell,
self.cluster.default_rpc_endpoint,
)
chunks_object_ids = []
if "split" in head["header"] and "children" in head["header"]["split"]:
chunks_object_ids = head["header"]["split"]["children"]
return chunks_object_ids
@allure.title("Locked object should be protected from deletion") @allure.title("Locked object should be protected from deletion")
@pytest.mark.parametrize( @pytest.mark.parametrize(
"locked_storage_object", "locked_storage_object",
@ -136,7 +149,6 @@ class TestObjectLockWithGrpc:
) )
def test_locked_object_cannot_be_deleted( def test_locked_object_cannot_be_deleted(
self, self,
client_shell: Shell,
request: FixtureRequest, request: FixtureRequest,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
): ):
@ -152,7 +164,8 @@ class TestObjectLockWithGrpc:
locked_storage_object.wallet_file_path, locked_storage_object.wallet_file_path,
locked_storage_object.cid, locked_storage_object.cid,
locked_storage_object.oid, locked_storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
) )
@allure.title("Lock object itself should be protected from deletion") @allure.title("Lock object itself should be protected from deletion")
@ -160,7 +173,6 @@ class TestObjectLockWithGrpc:
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True) @pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
def test_lock_object_itself_cannot_be_deleted( def test_lock_object_itself_cannot_be_deleted(
self, self,
client_shell: Shell,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
): ):
""" """
@ -171,14 +183,19 @@ class TestObjectLockWithGrpc:
wallet_path = locked_storage_object.wallet_file_path wallet_path = locked_storage_object.wallet_file_path
with pytest.raises(Exception, match=LOCK_OBJECT_REMOVAL): with pytest.raises(Exception, match=LOCK_OBJECT_REMOVAL):
delete_object(wallet_path, lock_object.cid, lock_object.oid, client_shell) delete_object(
wallet_path,
lock_object.cid,
lock_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Lock object itself cannot be locked") @allure.title("Lock object itself cannot be locked")
# We operate with only lock object here so no complex object needed in this test # We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True) @pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
def test_lock_object_cannot_be_locked( def test_lock_object_cannot_be_locked(
self, self,
client_shell: Shell,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
): ):
""" """
@ -189,7 +206,14 @@ class TestObjectLockWithGrpc:
wallet_path = locked_storage_object.wallet_file_path wallet_path = locked_storage_object.wallet_file_path
with pytest.raises(Exception, match=LOCK_NON_REGULAR_OBJECT): with pytest.raises(Exception, match=LOCK_NON_REGULAR_OBJECT):
lock_object(wallet_path, lock_object_info.cid, lock_object_info.oid, client_shell, 1) lock_object(
wallet_path,
lock_object_info.cid,
lock_object_info.oid,
self.shell,
self.cluster.default_rpc_endpoint,
1,
)
@allure.title("Cannot lock object without lifetime and expire_at fields") @allure.title("Cannot lock object without lifetime and expire_at fields")
# We operate with only lock object here so no complex object needed in this test # We operate with only lock object here so no complex object needed in this test
@ -207,7 +231,6 @@ class TestObjectLockWithGrpc:
) )
def test_cannot_lock_object_without_lifetime( def test_cannot_lock_object_without_lifetime(
self, self,
client_shell: Shell,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
wrong_lifetime: int, wrong_lifetime: int,
wrong_expire_at: int, wrong_expire_at: int,
@ -228,7 +251,8 @@ class TestObjectLockWithGrpc:
wallet_path, wallet_path,
lock_object_info.cid, lock_object_info.cid,
lock_object_info.oid, lock_object_info.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
lifetime=wrong_lifetime, lifetime=wrong_lifetime,
expire_at=wrong_expire_at, expire_at=wrong_expire_at,
) )
@ -239,7 +263,6 @@ class TestObjectLockWithGrpc:
) )
def test_expired_object_should_be_deleted_after_locks_are_expired( def test_expired_object_should_be_deleted_after_locks_are_expired(
self, self,
client_shell: Shell,
request: FixtureRequest, request: FixtureRequest,
user_container: StorageContainer, user_container: StorageContainer,
object_size: int, object_size: int,
@ -251,7 +274,7 @@ class TestObjectLockWithGrpc:
f"Expired object should be deleted after locks are expired for {request.node.callspec.id}" f"Expired object should be deleted after locks are expired for {request.node.callspec.id}"
) )
current_epoch = ensure_fresh_epoch(client_shell) current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1) storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
with allure.step("Lock object for couple epochs"): with allure.step("Lock object for couple epochs"):
@ -259,20 +282,22 @@ class TestObjectLockWithGrpc:
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
lifetime=3, lifetime=3,
) )
lock_object( lock_object(
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 3, expire_at=current_epoch + 3,
) )
with allure.step("Check object is not deleted at expiration time"): with allure.step("Check object is not deleted at expiration time"):
tick_epoch(client_shell) self.tick_epoch()
tick_epoch(client_shell) self.tick_epoch()
# Must wait to ensure object is not deleted # Must wait to ensure object is not deleted
wait_for_gc_pass_on_storage_nodes() wait_for_gc_pass_on_storage_nodes()
with expect_not_raises(): with expect_not_raises():
@ -280,7 +305,8 @@ class TestObjectLockWithGrpc:
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
) )
@wait_for_success(parse_time(STORAGE_GC_TIME)) @wait_for_success(parse_time(STORAGE_GC_TIME))
@ -290,11 +316,12 @@ class TestObjectLockWithGrpc:
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
) )
with allure.step("Wait for object to be deleted after third epoch"): with allure.step("Wait for object to be deleted after third epoch"):
tick_epoch(client_shell) self.tick_epoch()
check_object_not_found() check_object_not_found()
@allure.title("Should be possible to lock multiple objects at once") @allure.title("Should be possible to lock multiple objects at once")
@ -305,7 +332,6 @@ class TestObjectLockWithGrpc:
) )
def test_should_be_possible_to_lock_multiple_objects_at_once( def test_should_be_possible_to_lock_multiple_objects_at_once(
self, self,
client_shell: Shell,
request: FixtureRequest, request: FixtureRequest,
user_container: StorageContainer, user_container: StorageContainer,
object_size: int, object_size: int,
@ -317,7 +343,7 @@ class TestObjectLockWithGrpc:
f"Should be possible to lock multiple objects at once for {request.node.callspec.id}" f"Should be possible to lock multiple objects at once for {request.node.callspec.id}"
) )
current_epoch = ensure_fresh_epoch(client_shell) current_epoch = ensure_fresh_epoch(self.shell, self.cluster)
storage_objects: list[StorageObjectInfo] = [] storage_objects: list[StorageObjectInfo] = []
with allure.step("Generate three objects"): with allure.step("Generate three objects"):
@ -330,7 +356,8 @@ class TestObjectLockWithGrpc:
storage_objects[0].wallet_file_path, storage_objects[0].wallet_file_path,
storage_objects[0].cid, storage_objects[0].cid,
",".join([storage_object.oid for storage_object in storage_objects]), ",".join([storage_object.oid for storage_object in storage_objects]),
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 1, expire_at=current_epoch + 1,
) )
@ -341,15 +368,16 @@ class TestObjectLockWithGrpc:
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
) )
with allure.step("Tick two epochs"): with allure.step("Tick two epochs"):
tick_epoch(client_shell) self.tick_epoch()
tick_epoch(client_shell) self.tick_epoch()
with expect_not_raises(): with expect_not_raises():
delete_objects(storage_objects, client_shell) delete_objects(storage_objects, self.shell, self.cluster)
@allure.title("Already outdated lock should not be applied") @allure.title("Already outdated lock should not be applied")
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -359,7 +387,6 @@ class TestObjectLockWithGrpc:
) )
def test_already_outdated_lock_should_not_be_applied( def test_already_outdated_lock_should_not_be_applied(
self, self,
client_shell: Shell,
request: FixtureRequest, request: FixtureRequest,
user_container: StorageContainer, user_container: StorageContainer,
object_size: int, object_size: int,
@ -371,7 +398,7 @@ class TestObjectLockWithGrpc:
f"Already outdated lock should not be applied for {request.node.callspec.id}" f"Already outdated lock should not be applied for {request.node.callspec.id}"
) )
current_epoch = ensure_fresh_epoch(client_shell) current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1) storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
@ -386,7 +413,8 @@ class TestObjectLockWithGrpc:
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
expire_at=expiration_epoch, expire_at=expiration_epoch,
) )
@ -399,7 +427,6 @@ class TestObjectLockWithGrpc:
@expect_not_raises() @expect_not_raises()
def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object( def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object(
self, self,
client_shell: Shell,
request: FixtureRequest, request: FixtureRequest,
user_container: StorageContainer, user_container: StorageContainer,
object_size: int, object_size: int,
@ -411,22 +438,27 @@ class TestObjectLockWithGrpc:
f"After lock expiration with lifetime user should be able to delete object for {request.node.callspec.id}" f"After lock expiration with lifetime user should be able to delete object for {request.node.callspec.id}"
) )
current_epoch = ensure_fresh_epoch(client_shell) current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1) storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
lock_object( lock_object(
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
lifetime=1, lifetime=1,
) )
tick_epoch(client_shell) self.tick_epoch()
with expect_not_raises():
delete_object( delete_object(
storage_object.wallet_file_path, storage_object.cid, storage_object.oid, client_shell storage_object.wallet_file_path,
) storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("After lock expiration with expire_at user should be able to delete object") @allure.title("After lock expiration with expire_at user should be able to delete object")
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -437,7 +469,6 @@ class TestObjectLockWithGrpc:
@expect_not_raises() @expect_not_raises()
def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object( def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object(
self, self,
client_shell: Shell,
request: FixtureRequest, request: FixtureRequest,
user_container: StorageContainer, user_container: StorageContainer,
object_size: int, object_size: int,
@ -449,7 +480,7 @@ class TestObjectLockWithGrpc:
f"After lock expiration with expire_at user should be able to delete object for {request.node.callspec.id}" f"After lock expiration with expire_at user should be able to delete object for {request.node.callspec.id}"
) )
current_epoch = ensure_fresh_epoch(client_shell) current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5) storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5)
@ -457,15 +488,21 @@ class TestObjectLockWithGrpc:
storage_object.wallet_file_path, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
client_shell, self.shell,
endpoint=self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 1, expire_at=current_epoch + 1,
) )
tick_epoch(client_shell) self.tick_epoch()
delete_object( with expect_not_raises():
storage_object.wallet_file_path, storage_object.cid, storage_object.oid, client_shell delete_object(
) storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Complex object chunks should also be protected from deletion") @allure.title("Complex object chunks should also be protected from deletion")
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -476,14 +513,13 @@ class TestObjectLockWithGrpc:
) )
def test_complex_object_chunks_should_also_be_protected_from_deletion( def test_complex_object_chunks_should_also_be_protected_from_deletion(
self, self,
client_shell: Shell,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
): ):
""" """
Complex object chunks should also be protected from deletion Complex object chunks should also be protected from deletion
""" """
chunk_object_ids = get_storage_object_chunks(locked_storage_object, client_shell) chunk_object_ids = self.get_storage_object_chunks(locked_storage_object)
for chunk_object_id in chunk_object_ids: for chunk_object_id in chunk_object_ids:
with allure.step(f"Try to delete chunk object {chunk_object_id}"): with allure.step(f"Try to delete chunk object {chunk_object_id}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED): with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
@ -491,7 +527,8 @@ class TestObjectLockWithGrpc:
locked_storage_object.wallet_file_path, locked_storage_object.wallet_file_path,
locked_storage_object.cid, locked_storage_object.cid,
chunk_object_id, chunk_object_id,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
) )
@allure.title("Link object of complex object should also be protected from deletion") @allure.title("Link object of complex object should also be protected from deletion")
@ -503,7 +540,6 @@ class TestObjectLockWithGrpc:
) )
def test_link_object_of_complex_object_should_also_be_protected_from_deletion( def test_link_object_of_complex_object_should_also_be_protected_from_deletion(
self, self,
client_shell: Shell,
locked_storage_object: StorageObjectInfo, locked_storage_object: StorageObjectInfo,
): ):
""" """
@ -514,7 +550,8 @@ class TestObjectLockWithGrpc:
locked_storage_object.wallet_file_path, locked_storage_object.wallet_file_path,
locked_storage_object.cid, locked_storage_object.cid,
locked_storage_object.oid, locked_storage_object.oid,
client_shell, self.shell,
self.cluster.storage_nodes,
is_direct=False, is_direct=False,
) )
with allure.step(f"Try to delete link object {link_object_id}"): with allure.step(f"Try to delete link object {link_object_id}"):
@ -523,5 +560,6 @@ class TestObjectLockWithGrpc:
locked_storage_object.wallet_file_path, locked_storage_object.wallet_file_path,
locked_storage_object.cid, locked_storage_object.cid,
link_object_id, link_object_id,
client_shell, self.shell,
self.cluster.default_rpc_endpoint,
) )

View file

@ -4,7 +4,8 @@ import os
import allure import allure
import pytest import pytest
import yaml import yaml
from common import FREE_STORAGE, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG from cluster_test_base import ClusterTestBase
from common import FREE_STORAGE, NEOFS_CLI_EXEC, WALLET_CONFIG
from neofs_testlib.cli import NeofsCli from neofs_testlib.cli import NeofsCli
from neofs_testlib.shell import CommandResult, Shell from neofs_testlib.shell import CommandResult, Shell
from wallet import WalletFactory, WalletFile from wallet import WalletFactory, WalletFile
@ -16,7 +17,7 @@ DEPOSIT_AMOUNT = 30
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.payments @pytest.mark.payments
@pytest.mark.skipif(FREE_STORAGE, reason="Test only works on public network with paid storage") @pytest.mark.skipif(FREE_STORAGE, reason="Test only works on public network with paid storage")
class TestBalanceAccounting: class TestBalanceAccounting(ClusterTestBase):
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def main_wallet(self, wallet_factory: WalletFactory) -> WalletFile: def main_wallet(self, wallet_factory: WalletFactory) -> WalletFile:
return wallet_factory.create_wallet() return wallet_factory.create_wallet()
@ -61,7 +62,7 @@ class TestBalanceAccounting:
def test_balance_wallet_address(self, main_wallet: WalletFile, cli: NeofsCli): def test_balance_wallet_address(self, main_wallet: WalletFile, cli: NeofsCli):
result = cli.accounting.balance( result = cli.accounting.balance(
wallet=main_wallet.path, wallet=main_wallet.path,
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=self.cluster.default_rpc_endpoint,
address=main_wallet.get_address(), address=main_wallet.get_address(),
) )
@ -69,7 +70,9 @@ class TestBalanceAccounting:
@allure.title("Test balance request with wallet only") @allure.title("Test balance request with wallet only")
def test_balance_wallet(self, main_wallet: WalletFile, cli: NeofsCli): def test_balance_wallet(self, main_wallet: WalletFile, cli: NeofsCli):
result = cli.accounting.balance(wallet=main_wallet.path, rpc_endpoint=NEOFS_ENDPOINT) result = cli.accounting.balance(
wallet=main_wallet.path, rpc_endpoint=self.cluster.default_rpc_endpoint
)
self.check_amount(result) self.check_amount(result)
@allure.title("Test balance request with wallet and wrong address") @allure.title("Test balance request with wallet and wrong address")
@ -79,14 +82,16 @@ class TestBalanceAccounting:
with pytest.raises(Exception, match="address option must be specified and valid"): with pytest.raises(Exception, match="address option must be specified and valid"):
cli.accounting.balance( cli.accounting.balance(
wallet=main_wallet.path, wallet=main_wallet.path,
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=self.cluster.default_rpc_endpoint,
address=other_wallet.get_address(), address=other_wallet.get_address(),
) )
@allure.title("Test balance request with config file") @allure.title("Test balance request with config file")
def test_balance_api(self, prepare_tmp_dir: str, main_wallet: WalletFile, client_shell: Shell): def test_balance_api(self, temp_directory: str, main_wallet: WalletFile, client_shell: Shell):
config_file = self.write_api_config( config_file = self.write_api_config(
config_dir=prepare_tmp_dir, endpoint=NEOFS_ENDPOINT, wallet=main_wallet.path config_dir=temp_directory,
endpoint=self.cluster.default_rpc_endpoint,
wallet=main_wallet.path,
) )
logger.info(f"Config with API endpoint: {config_file}") logger.info(f"Config with API endpoint: {config_file}")

View file

@ -4,6 +4,7 @@ from random import choice, choices
import allure import allure
import pytest import pytest
from aws_cli_client import AwsCliClient
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from epoch import tick_epoch from epoch import tick_epoch
from file_helper import ( from file_helper import (
@ -22,7 +23,6 @@ from s3_helper import (
) )
from steps import s3_gate_bucket, s3_gate_object from steps import s3_gate_bucket, s3_gate_object
from steps.aws_cli_client import AwsCliClient
from steps.s3_gate_base import TestS3GateBase from steps.s3_gate_base import TestS3GateBase
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -39,7 +39,7 @@ def pytest_generate_tests(metafunc):
@pytest.mark.s3_gate_base @pytest.mark.s3_gate_base
class TestS3Gate(TestS3GateBase): class TestS3Gate(TestS3GateBase):
@allure.title("Test S3 Bucket API") @allure.title("Test S3 Bucket API")
def test_s3_buckets(self, client_shell): def test_s3_buckets(self):
""" """
Test base S3 Bucket API (Create/List/Head/Delete). Test base S3 Bucket API (Create/List/Head/Delete).
""" """
@ -83,7 +83,7 @@ class TestS3Gate(TestS3GateBase):
with allure.step(f"Delete empty bucket {bucket_2}"): with allure.step(f"Delete empty bucket {bucket_2}"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_2) s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_2)
tick_epoch(shell=client_shell) tick_epoch(self.shell, self.cluster)
with allure.step(f"Check bucket {bucket_2} deleted"): with allure.step(f"Check bucket {bucket_2} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"): with pytest.raises(Exception, match=r".*Not Found.*"):
@ -99,7 +99,7 @@ class TestS3Gate(TestS3GateBase):
with allure.step(f"Delete bucket {bucket_1}"): with allure.step(f"Delete bucket {bucket_1}"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1) s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1)
tick_epoch(shell=client_shell) tick_epoch(self.shell, self.cluster)
with allure.step(f"Check bucket {bucket_1} deleted"): with allure.step(f"Check bucket {bucket_1} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"): with pytest.raises(Exception, match=r".*Not Found.*"):

View file

@ -6,6 +6,7 @@ from random import choices, sample
import allure import allure
import pytest import pytest
from aws_cli_client import AwsCliClient
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE, WALLET_PASS from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE, WALLET_PASS
from data_formatters import get_wallet_public_key from data_formatters import get_wallet_public_key
from file_helper import concat_files, generate_file, generate_file_with_content, get_file_hash from file_helper import concat_files, generate_file, generate_file_with_content, get_file_hash
@ -14,7 +15,6 @@ from python_keywords.payment_neogo import deposit_gas, transfer_gas
from s3_helper import assert_object_lock_mode, check_objects_in_bucket, set_bucket_versioning from s3_helper import assert_object_lock_mode, check_objects_in_bucket, set_bucket_versioning
from steps import s3_gate_bucket, s3_gate_object from steps import s3_gate_bucket, s3_gate_object
from steps.aws_cli_client import AwsCliClient
from steps.s3_gate_base import TestS3GateBase from steps.s3_gate_base import TestS3GateBase
@ -653,23 +653,26 @@ class TestS3GateObject(TestS3GateBase):
], "Tags must be the same" ], "Tags must be the same"
@pytest.fixture @pytest.fixture
def prepare_two_wallets(self, prepare_wallet_and_deposit, client_shell): def prepare_two_wallets(self, default_wallet, client_shell):
self.main_wallet = prepare_wallet_and_deposit self.main_wallet = default_wallet
self.main_public_key = get_wallet_public_key(self.main_wallet, WALLET_PASS) self.main_public_key = get_wallet_public_key(self.main_wallet, WALLET_PASS)
self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json") self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
init_wallet(self.other_wallet, WALLET_PASS) init_wallet(self.other_wallet, WALLET_PASS)
self.other_public_key = get_wallet_public_key(self.other_wallet, WALLET_PASS) self.other_public_key = get_wallet_public_key(self.other_wallet, WALLET_PASS)
if not FREE_STORAGE: if not FREE_STORAGE:
main_chain = self.cluster.main_chain_nodes[0]
deposit = 30 deposit = 30
transfer_gas( transfer_gas(
shell=client_shell, shell=client_shell,
amount=deposit + 1, amount=deposit + 1,
main_chain=main_chain,
wallet_to_path=self.other_wallet, wallet_to_path=self.other_wallet,
wallet_to_password=WALLET_PASS, wallet_to_password=WALLET_PASS,
) )
deposit_gas( deposit_gas(
shell=client_shell, shell=client_shell,
main_chain=main_chain,
amount=deposit, amount=deposit,
wallet_from_path=self.other_wallet, wallet_from_path=self.other_wallet,
wallet_from_password=WALLET_PASS, wallet_from_password=WALLET_PASS,
@ -906,9 +909,9 @@ class TestS3GateObject(TestS3GateBase):
# ], "Permission for all groups is FULL_CONTROL" # ], "Permission for all groups is FULL_CONTROL"
@allure.title("Test S3 Put 10 nested level object") @allure.title("Test S3 Put 10 nested level object")
def test_s3_put_10_folder(self, bucket, prepare_tmp_dir): def test_s3_put_10_folder(self, bucket, temp_directory):
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)]) path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
file_path_1 = os.path.join(prepare_tmp_dir, path, "test_file_1") file_path_1 = os.path.join(temp_directory, path, "test_file_1")
generate_file_with_content(file_path=file_path_1) generate_file_with_content(file_path=file_path_1)
file_name = self.object_key_from_file_path(file_path_1) file_name = self.object_key_from_file_path(file_path_1)
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket) objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)

View file

@ -35,7 +35,7 @@ def pytest_generate_tests(metafunc):
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GatePolicy(TestS3GateBase): class TestS3GatePolicy(TestS3GateBase):
@allure.title("Test S3: Verify bucket creation with retention policy applied") @allure.title("Test S3: Verify bucket creation with retention policy applied")
def test_s3_bucket_location(self, client_shell): def test_s3_bucket_location(self):
file_path_1 = generate_file() file_path_1 = generate_file()
file_name_1 = object_key_from_file_path(file_path_1) file_name_1 = object_key_from_file_path(file_path_1)
file_path_2 = generate_file() file_path_2 = generate_file()
@ -72,14 +72,26 @@ class TestS3GatePolicy(TestS3GateBase):
assert bucket_loc_2 == "rep-3" assert bucket_loc_2 == "rep-3"
with allure.step("Check object policy"): with allure.step("Check object policy"):
cid_1 = search_container_by_name(self.wallet, bucket_1, shell=client_shell) cid_1 = search_container_by_name(
self.wallet, bucket_1, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
copies_1 = get_simple_object_copies( copies_1 = get_simple_object_copies(
wallet=self.wallet, cid=cid_1, oid=version_id_1, shell=client_shell wallet=self.wallet,
cid=cid_1,
oid=version_id_1,
shell=self.shell,
nodes=self.cluster.storage_nodes,
) )
assert copies_1 == 1 assert copies_1 == 1
cid_2 = search_container_by_name(self.wallet, bucket_2, shell=client_shell) cid_2 = search_container_by_name(
self.wallet, bucket_2, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
copies_2 = get_simple_object_copies( copies_2 = get_simple_object_copies(
wallet=self.wallet, cid=cid_2, oid=version_id_2, shell=client_shell wallet=self.wallet,
cid=cid_2,
oid=version_id_2,
shell=self.shell,
nodes=self.cluster.storage_nodes,
) )
assert copies_2 == 3 assert copies_2 == 3

View file

@ -1,6 +1,6 @@
import logging import logging
import os import os
from random import choice import random
from time import sleep from time import sleep
import allure import allure
@ -9,7 +9,6 @@ from common import COMPLEX_OBJ_SIZE
from container import create_container from container import create_container
from epoch import get_epoch, tick_epoch from epoch import get_epoch, tick_epoch
from file_helper import generate_file, get_file_hash from file_helper import generate_file, get_file_hash
from neofs_testlib.shell import Shell
from python_keywords.http_gate import ( from python_keywords.http_gate import (
get_via_http_curl, get_via_http_curl,
get_via_http_gate, get_via_http_gate,
@ -18,11 +17,13 @@ from python_keywords.http_gate import (
upload_via_http_gate, upload_via_http_gate,
upload_via_http_gate_curl, upload_via_http_gate_curl,
) )
from python_keywords.neofs_verbs import get_object, put_object from python_keywords.neofs_verbs import get_object, put_object_to_random_node
from python_keywords.storage_policy import get_nodes_without_object from python_keywords.storage_policy import get_nodes_without_object
from utility import wait_for_gc_pass_on_storage_nodes from utility import wait_for_gc_pass_on_storage_nodes
from wellknown_acl import PUBLIC_ACL from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
OBJECT_NOT_FOUND_ERROR = "not found" OBJECT_NOT_FOUND_ERROR = "not found"
@ -39,17 +40,17 @@ OBJECT_UPLOAD_DELAY = 10
@allure.link("https://github.com/nspcc-dev/neofs-http-gw#downloading", name="downloading") @allure.link("https://github.com/nspcc-dev/neofs-http-gw#downloading", name="downloading")
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.http_gate @pytest.mark.http_gate
class TestHttpGate: class TestHttpGate(ClusterTestBase):
PLACEMENT_RULE_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" PLACEMENT_RULE_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
PLACEMENT_RULE_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" PLACEMENT_RULE_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
@pytest.fixture(scope="class", autouse=True) @pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit") @allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, prepare_wallet_and_deposit): def prepare_wallet(self, default_wallet):
TestHttpGate.wallet = prepare_wallet_and_deposit TestHttpGate.wallet = default_wallet
@allure.title("Test Put over gRPC, Get over HTTP") @allure.title("Test Put over gRPC, Get over HTTP")
def test_put_grpc_get_http(self, client_shell): def test_put_grpc_get_http(self):
""" """
Test that object can be put using gRPC interface and get using HTTP. Test that object can be put using gRPC interface and get using HTTP.
@ -65,26 +66,38 @@ class TestHttpGate:
Hashes must be the same. Hashes must be the same.
""" """
cid = create_container( cid = create_container(
self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_1, basic_acl=PUBLIC_ACL self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_1,
basic_acl=PUBLIC_ACL,
) )
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
with allure.step("Put objects using gRPC"): with allure.step("Put objects using gRPC"):
oid_simple = put_object( oid_simple = put_object_to_random_node(
wallet=self.wallet, path=file_path_simple, cid=cid, shell=client_shell wallet=self.wallet,
path=file_path_simple,
cid=cid,
shell=self.shell,
cluster=self.cluster,
) )
oid_large = put_object( oid_large = put_object_to_random_node(
wallet=self.wallet, path=file_path_large, cid=cid, shell=client_shell wallet=self.wallet,
path=file_path_large,
cid=cid,
shell=self.shell,
cluster=self.cluster,
) )
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid, shell=client_shell) self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid)
@allure.link("https://github.com/nspcc-dev/neofs-http-gw#uploading", name="uploading") @allure.link("https://github.com/nspcc-dev/neofs-http-gw#uploading", name="uploading")
@allure.link("https://github.com/nspcc-dev/neofs-http-gw#downloading", name="downloading") @allure.link("https://github.com/nspcc-dev/neofs-http-gw#downloading", name="downloading")
@allure.title("Test Put over HTTP, Get over HTTP") @allure.title("Test Put over HTTP, Get over HTTP")
@pytest.mark.smoke @pytest.mark.smoke
def test_put_http_get_http(self, client_shell): def test_put_http_get_http(self):
""" """
Test that object can be put and get using HTTP interface. Test that object can be put and get using HTTP interface.
@ -98,16 +111,24 @@ class TestHttpGate:
Hashes must be the same. Hashes must be the same.
""" """
cid = create_container( cid = create_container(
self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
) )
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
with allure.step("Put objects using HTTP"): with allure.step("Put objects using HTTP"):
oid_simple = upload_via_http_gate(cid=cid, path=file_path_simple) oid_simple = upload_via_http_gate(
oid_large = upload_via_http_gate(cid=cid, path=file_path_large) cid=cid, path=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
)
oid_large = upload_via_http_gate(
cid=cid, path=file_path_large, endpoint=self.cluster.default_http_gate_endpoint
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid, shell=client_shell) self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid)
@allure.link( @allure.link(
"https://github.com/nspcc-dev/neofs-http-gw#by-attributes", name="download by attributes" "https://github.com/nspcc-dev/neofs-http-gw#by-attributes", name="download by attributes"
@ -122,7 +143,7 @@ class TestHttpGate:
], ],
ids=["simple", "hyphen", "percent"], ids=["simple", "hyphen", "percent"],
) )
def test_put_http_get_http_with_headers(self, client_shell, attributes: dict): def test_put_http_get_http_with_headers(self, attributes: dict):
""" """
Test that object can be downloaded using different attributes in HTTP header. Test that object can be downloaded using different attributes in HTTP header.
@ -136,43 +157,63 @@ class TestHttpGate:
Hashes must be the same. Hashes must be the same.
""" """
cid = create_container( cid = create_container(
self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
) )
file_path = generate_file() file_path = generate_file()
with allure.step("Put objects using HTTP with attribute"): with allure.step("Put objects using HTTP with attribute"):
headers = self._attr_into_header(attributes) headers = self._attr_into_header(attributes)
oid = upload_via_http_gate(cid=cid, path=file_path, headers=headers) oid = upload_via_http_gate(
cid=cid,
path=file_path,
headers=headers,
endpoint=self.cluster.default_http_gate_endpoint,
)
sleep(OBJECT_UPLOAD_DELAY) sleep(OBJECT_UPLOAD_DELAY)
self.get_object_by_attr_and_verify_hashes(oid, file_path, cid, attributes) self.get_object_by_attr_and_verify_hashes(oid, file_path, cid, attributes)
@allure.title("Test Expiration-Epoch in HTTP header") @allure.title("Test Expiration-Epoch in HTTP header")
def test_expiration_epoch_in_http(self, client_shell): def test_expiration_epoch_in_http(self):
endpoint = self.cluster.default_rpc_endpoint
http_endpoint = self.cluster.default_http_gate_endpoint
cid = create_container( cid = create_container(
self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL self.wallet,
shell=self.shell,
endpoint=endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
) )
file_path = generate_file() file_path = generate_file()
oids = [] oids = []
curr_epoch = get_epoch(client_shell) curr_epoch = get_epoch(self.shell, self.cluster)
epochs = (curr_epoch, curr_epoch + 1, curr_epoch + 2, curr_epoch + 100) epochs = (curr_epoch, curr_epoch + 1, curr_epoch + 2, curr_epoch + 100)
for epoch in epochs: for epoch in epochs:
headers = {"X-Attribute-Neofs-Expiration-Epoch": str(epoch)} headers = {"X-Attribute-Neofs-Expiration-Epoch": str(epoch)}
with allure.step("Put objects using HTTP with attribute Expiration-Epoch"): with allure.step("Put objects using HTTP with attribute Expiration-Epoch"):
oids.append(upload_via_http_gate(cid=cid, path=file_path, headers=headers)) oids.append(
upload_via_http_gate(
cid=cid, path=file_path, headers=headers, endpoint=http_endpoint
)
)
assert len(oids) == len(epochs), "Expected all objects have been put successfully" assert len(oids) == len(epochs), "Expected all objects have been put successfully"
with allure.step("All objects can be get"): with allure.step("All objects can be get"):
for oid in oids: for oid in oids:
get_via_http_gate(cid=cid, oid=oid) get_via_http_gate(cid=cid, oid=oid, endpoint=http_endpoint)
for expired_objects, not_expired_objects in [(oids[:1], oids[1:]), (oids[:2], oids[2:])]: for expired_objects, not_expired_objects in [(oids[:1], oids[1:]), (oids[:2], oids[2:])]:
tick_epoch(shell=client_shell) tick_epoch(self.shell, self.cluster)
# Wait for GC, because object with expiration is counted as alive until GC removes it # Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes() wait_for_gc_pass_on_storage_nodes()
@ -184,12 +225,16 @@ class TestHttpGate:
with allure.step("Other objects can be get"): with allure.step("Other objects can be get"):
for oid in not_expired_objects: for oid in not_expired_objects:
get_via_http_gate(cid=cid, oid=oid) get_via_http_gate(cid=cid, oid=oid, endpoint=http_endpoint)
@allure.title("Test Zip in HTTP header") @allure.title("Test Zip in HTTP header")
def test_zip_in_http(self, client_shell): def test_zip_in_http(self):
cid = create_container( cid = create_container(
self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
) )
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
common_prefix = "my_files" common_prefix = "my_files"
@ -197,12 +242,24 @@ class TestHttpGate:
headers1 = {"X-Attribute-FilePath": f"{common_prefix}/file1"} headers1 = {"X-Attribute-FilePath": f"{common_prefix}/file1"}
headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"} headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"}
upload_via_http_gate(cid=cid, path=file_path_simple, headers=headers1) upload_via_http_gate(
upload_via_http_gate(cid=cid, path=file_path_large, headers=headers2) cid=cid,
path=file_path_simple,
headers=headers1,
endpoint=self.cluster.default_http_gate_endpoint,
)
upload_via_http_gate(
cid=cid,
path=file_path_large,
headers=headers2,
endpoint=self.cluster.default_http_gate_endpoint,
)
sleep(OBJECT_UPLOAD_DELAY) sleep(OBJECT_UPLOAD_DELAY)
dir_path = get_via_zip_http_gate(cid=cid, prefix=common_prefix) dir_path = get_via_zip_http_gate(
cid=cid, prefix=common_prefix, endpoint=self.cluster.default_http_gate_endpoint
)
with allure.step("Verify hashes"): with allure.step("Verify hashes"):
assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple) assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple)
@ -210,45 +267,63 @@ class TestHttpGate:
@pytest.mark.long @pytest.mark.long
@allure.title("Test Put over HTTP/Curl, Get over HTTP/Curl for large object") @allure.title("Test Put over HTTP/Curl, Get over HTTP/Curl for large object")
def test_put_http_get_http_large_file(self, client_shell): def test_put_http_get_http_large_file(self):
""" """
This test checks upload and download using curl with 'large' object. This test checks upload and download using curl with 'large' object.
Large is object with size up to 20Mb. Large is object with size up to 20Mb.
""" """
cid = create_container( cid = create_container(
self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
) )
obj_size = int(os.getenv("BIG_OBJ_SIZE", COMPLEX_OBJ_SIZE)) obj_size = int(os.getenv("BIG_OBJ_SIZE", COMPLEX_OBJ_SIZE))
file_path = generate_file(obj_size) file_path = generate_file(obj_size)
with allure.step("Put objects using HTTP"): with allure.step("Put objects using HTTP"):
oid_gate = upload_via_http_gate(cid=cid, path=file_path) oid_gate = upload_via_http_gate(
oid_curl = upload_via_http_gate_curl(cid=cid, filepath=file_path, large_object=True) cid=cid, path=file_path, endpoint=self.cluster.default_http_gate_endpoint
)
oid_curl = upload_via_http_gate_curl(
cid=cid,
filepath=file_path,
large_object=True,
endpoint=self.cluster.default_http_gate_endpoint,
)
self.get_object_and_verify_hashes(oid_gate, file_path, self.wallet, cid, shell=client_shell) self.get_object_and_verify_hashes(oid_gate, file_path, self.wallet, cid)
self.get_object_and_verify_hashes( self.get_object_and_verify_hashes(
oid_curl, oid_curl,
file_path, file_path,
self.wallet, self.wallet,
cid, cid,
shell=client_shell,
object_getter=get_via_http_curl, object_getter=get_via_http_curl,
) )
@allure.title("Test Put/Get over HTTP using Curl utility") @allure.title("Test Put/Get over HTTP using Curl utility")
def test_put_http_get_http_curl(self, client_shell): def test_put_http_get_http_curl(self):
""" """
Test checks upload and download over HTTP using curl utility. Test checks upload and download over HTTP using curl utility.
""" """
cid = create_container( cid = create_container(
self.wallet, shell=client_shell, rule=self.PLACEMENT_RULE_2, basic_acl=PUBLIC_ACL self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
) )
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE) file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
with allure.step("Put objects using curl utility"): with allure.step("Put objects using curl utility"):
oid_simple = upload_via_http_gate_curl(cid=cid, filepath=file_path_simple) oid_simple = upload_via_http_gate_curl(
oid_large = upload_via_http_gate_curl(cid=cid, filepath=file_path_large) cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
)
oid_large = upload_via_http_gate_curl(
cid=cid, filepath=file_path_large, endpoint=self.cluster.default_http_gate_endpoint
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)): for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes( self.get_object_and_verify_hashes(
@ -256,45 +331,57 @@ class TestHttpGate:
file_path, file_path,
self.wallet, self.wallet,
cid, cid,
shell=client_shell,
object_getter=get_via_http_curl, object_getter=get_via_http_curl,
) )
@staticmethod
@allure.step("Try to get object and expect error") @allure.step("Try to get object and expect error")
def try_to_get_object_and_expect_error(cid: str, oid: str, error_pattern: str) -> None: def try_to_get_object_and_expect_error(self, cid: str, oid: str, error_pattern: str) -> None:
try: try:
get_via_http_gate(cid=cid, oid=oid) get_via_http_gate(cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint)
raise AssertionError(f"Expected error on getting object with cid: {cid}") raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err: except Exception as err:
match = error_pattern.casefold() in str(err).casefold() match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}" assert match, f"Expected {err} to match {error_pattern}"
@staticmethod
@allure.step("Verify object can be get using HTTP header attribute") @allure.step("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes( def get_object_by_attr_and_verify_hashes(
oid: str, file_name: str, cid: str, attrs: dict self, oid: str, file_name: str, cid: str, attrs: dict
) -> None: ) -> None:
got_file_path_http = get_via_http_gate(cid=cid, oid=oid) got_file_path_http = get_via_http_gate(
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs) cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint
)
got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=self.cluster.default_http_gate_endpoint
)
TestHttpGate._assert_hashes_are_equal( TestHttpGate._assert_hashes_are_equal(
file_name, got_file_path_http, got_file_path_http_attr file_name, got_file_path_http, got_file_path_http_attr
) )
@staticmethod
@allure.step("Verify object can be get using HTTP") @allure.step("Verify object can be get using HTTP")
def get_object_and_verify_hashes( def get_object_and_verify_hashes(
oid: str, file_name: str, wallet: str, cid: str, shell: Shell, object_getter=None self, oid: str, file_name: str, wallet: str, cid: str, object_getter=None
) -> None: ) -> None:
nodes = get_nodes_without_object(wallet=wallet, cid=cid, oid=oid, shell=shell) nodes = get_nodes_without_object(
random_node = choice(nodes) wallet=wallet,
cid=cid,
oid=oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
random_node = random.choice(nodes)
object_getter = object_getter or get_via_http_gate object_getter = object_getter or get_via_http_gate
got_file_path = get_object( got_file_path = get_object(
wallet=wallet, cid=cid, oid=oid, shell=shell, endpoint=random_node wallet=wallet,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=random_node.get_rpc_endpoint(),
)
got_file_path_http = object_getter(
cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint
) )
got_file_path_http = object_getter(cid=cid, oid=oid)
TestHttpGate._assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) TestHttpGate._assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)

View file

@ -2,131 +2,146 @@ import random
import allure import allure
import pytest import pytest
from common import COMPLEX_OBJ_SIZE, NEOFS_NETMAP_DICT, SIMPLE_OBJ_SIZE, WALLET_PASS from cluster_test_base import ClusterTestBase
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, WALLET_PASS
from file_helper import generate_file from file_helper import generate_file
from grpc_responses import SESSION_NOT_FOUND from grpc_responses import SESSION_NOT_FOUND
from neofs_testlib.shell import Shell
from neofs_testlib.utils.wallet import get_last_address_from_wallet from neofs_testlib.utils.wallet import get_last_address_from_wallet
from python_keywords.container import create_container from python_keywords.container import create_container
from python_keywords.neofs_verbs import delete_object, put_object from python_keywords.neofs_verbs import delete_object, put_object, put_object_to_random_node
from steps.session_token import create_session_token from steps.session_token import create_session_token
@allure.title("Test Object Operations with Session Token")
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.session_token @pytest.mark.session_token
@pytest.mark.parametrize( class TestDynamicObjectSession(ClusterTestBase):
"object_size", @allure.title("Test Object Operations with Session Token")
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], @pytest.mark.parametrize(
ids=["simple object", "complex object"], "object_size",
) [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
def test_object_session_token(prepare_wallet_and_deposit, client_shell: Shell, object_size): ids=["simple object", "complex object"],
""" )
Test how operations over objects are executed with a session token def test_object_session_token(self, default_wallet, object_size):
"""
Test how operations over objects are executed with a session token
Steps: Steps:
1. Create a private container 1. Create a private container
2. Obj operation requests to the node which IS NOT in the container but granted 2. Obj operation requests to the node which IS NOT in the container but granted
with a session token with a session token
3. Obj operation requests to the node which IS in the container and NOT granted 3. Obj operation requests to the node which IS in the container and NOT granted
with a session token with a session token
4. Obj operation requests to the node which IS NOT in the container and NOT granted 4. Obj operation requests to the node which IS NOT in the container and NOT granted
with a session token with a session token
""" """
with allure.step("Init wallet"): with allure.step("Init wallet"):
wallet = prepare_wallet_and_deposit wallet = default_wallet
address = get_last_address_from_wallet(wallet, "") address = get_last_address_from_wallet(wallet, "")
with allure.step("Nodes Settlements"): with allure.step("Nodes Settlements"):
( (
session_token_node_name, session_token_node,
container_node_name, container_node,
noncontainer_node_name, non_container_node,
) = random.sample(list(NEOFS_NETMAP_DICT.keys()), 3) ) = random.sample(self.cluster.storage_nodes, 3)
session_token_node = NEOFS_NETMAP_DICT[session_token_node_name]["rpc"]
container_node = NEOFS_NETMAP_DICT[container_node_name]["rpc"]
noncontainer_node = NEOFS_NETMAP_DICT[noncontainer_node_name]["rpc"]
with allure.step("Create Session Token"): with allure.step("Create Session Token"):
session_token = create_session_token( session_token = create_session_token(
shell=client_shell, shell=self.shell,
owner=address, owner=address,
wallet_path=wallet, wallet_path=wallet,
wallet_password=WALLET_PASS, wallet_password=WALLET_PASS,
rpc_endpoint=session_token_node, rpc_endpoint=session_token_node.get_rpc_endpoint(),
) )
with allure.step("Create Private Container"): with allure.step("Create Private Container"):
un_locode = NEOFS_NETMAP_DICT[container_node_name]["UN-LOCODE"] un_locode = container_node.get_un_locode()
locode = "SPB" if un_locode == "RU LED" else un_locode.split()[1] locode = "SPB" if un_locode == "RU LED" else un_locode.split()[1]
placement_policy = ( placement_policy = (
f"REP 1 IN LOC_{locode}_PLACE CBF 1 SELECT 1 FROM LOC_{locode} " f"REP 1 IN LOC_{locode}_PLACE CBF 1 SELECT 1 FROM LOC_{locode} "
f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" ' f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" '
f'EQ "{un_locode}" AS LOC_{locode}' f'EQ "{un_locode}" AS LOC_{locode}'
) )
cid = create_container(wallet, shell=client_shell, rule=placement_policy) cid = create_container(
wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_policy,
)
with allure.step("Put Objects"): with allure.step("Put Objects"):
file_path = generate_file(object_size) file_path = generate_file(object_size)
oid = put_object(wallet=wallet, path=file_path, cid=cid, shell=client_shell) oid = put_object_to_random_node(
oid_delete = put_object(wallet=wallet, path=file_path, cid=cid, shell=client_shell) wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
oid_delete = put_object_to_random_node(
wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
with allure.step("Node not in container but granted a session token"): with allure.step("Node not in container but granted a session token"):
put_object(
wallet=wallet,
path=file_path,
cid=cid,
shell=client_shell,
endpoint=session_token_node,
session=session_token,
)
delete_object(
wallet=wallet,
cid=cid,
oid=oid_delete,
shell=client_shell,
endpoint=session_token_node,
session=session_token,
)
with allure.step("Node in container and not granted a session token"):
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
put_object( put_object(
wallet=wallet, wallet=wallet,
path=file_path, path=file_path,
cid=cid, cid=cid,
shell=client_shell, shell=self.shell,
endpoint=container_node, endpoint=session_token_node.get_rpc_endpoint(),
session=session_token, session=session_token,
) )
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
delete_object( delete_object(
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid_delete,
shell=client_shell, shell=self.shell,
endpoint=container_node, endpoint=session_token_node.get_rpc_endpoint(),
session=session_token, session=session_token,
) )
with allure.step("Node not in container and not granted a session token"): with allure.step("Node in container and not granted a session token"):
with pytest.raises(Exception, match=SESSION_NOT_FOUND): with pytest.raises(Exception, match=SESSION_NOT_FOUND):
put_object( put_object(
wallet=wallet, wallet=wallet,
path=file_path, path=file_path,
cid=cid, cid=cid,
shell=client_shell, shell=self.shell,
endpoint=noncontainer_node, endpoint=container_node.get_rpc_endpoint(),
session=session_token, session=session_token,
) )
with pytest.raises(Exception, match=SESSION_NOT_FOUND): with pytest.raises(Exception, match=SESSION_NOT_FOUND):
delete_object( delete_object(
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
shell=client_shell, shell=self.shell,
endpoint=noncontainer_node, endpoint=container_node.get_rpc_endpoint(),
session=session_token, session=session_token,
) )
with allure.step("Node not in container and not granted a session token"):
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
put_object(
wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
endpoint=non_container_node.get_rpc_endpoint(),
session=session_token,
)
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
delete_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=non_container_node.get_rpc_endpoint(),
session=session_token,
)

View file

@ -21,31 +21,31 @@ from python_keywords.object_access import can_put_object
from wallet import WalletFile from wallet import WalletFile
from wellknown_acl import PUBLIC_ACL from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
from steps.session_token import ContainerVerb, get_container_signed_token from steps.session_token import ContainerVerb, get_container_signed_token
class TestSessionTokenContainer: class TestSessionTokenContainer(ClusterTestBase):
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def static_sessions( def static_sessions(
self, self,
owner_wallet: WalletFile, owner_wallet: WalletFile,
user_wallet: WalletFile, user_wallet: WalletFile,
client_shell: Shell, client_shell: Shell,
prepare_tmp_dir: str, temp_directory: str,
) -> dict[ContainerVerb, str]: ) -> dict[ContainerVerb, str]:
""" """
Returns dict with static session token file paths for all verbs with default lifetime Returns dict with static session token file paths for all verbs with default lifetime
""" """
return { return {
verb: get_container_signed_token( verb: get_container_signed_token(
owner_wallet, user_wallet, verb, client_shell, prepare_tmp_dir owner_wallet, user_wallet, verb, client_shell, temp_directory
) )
for verb in ContainerVerb for verb in ContainerVerb
} }
def test_static_session_token_container_create( def test_static_session_token_container_create(
self, self,
client_shell: Shell,
owner_wallet: WalletFile, owner_wallet: WalletFile,
user_wallet: WalletFile, user_wallet: WalletFile,
static_sessions: dict[ContainerVerb, str], static_sessions: dict[ContainerVerb, str],
@ -57,21 +57,26 @@ class TestSessionTokenContainer:
cid = create_container( cid = create_container(
user_wallet.path, user_wallet.path,
session_token=static_sessions[ContainerVerb.CREATE], session_token=static_sessions[ContainerVerb.CREATE],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False, wait_for_creation=False,
) )
container_info: dict[str, str] = get_container(owner_wallet.path, cid, shell=client_shell) container_info: dict[str, str] = get_container(
owner_wallet.path, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert container_info["ownerID"] == owner_wallet.get_address() assert container_info["ownerID"] == owner_wallet.get_address()
assert cid not in list_containers(user_wallet.path, shell=client_shell) assert cid not in list_containers(
assert cid in list_containers(owner_wallet.path, shell=client_shell) user_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid in list_containers(
owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
@pytest.mark.skip("Failed with timeout") @pytest.mark.skip("Failed with timeout")
def test_static_session_token_container_create_with_other_verb( def test_static_session_token_container_create_with_other_verb(
self, self,
client_shell: Shell,
owner_wallet: WalletFile,
user_wallet: WalletFile, user_wallet: WalletFile,
static_sessions: dict[ContainerVerb, str], static_sessions: dict[ContainerVerb, str],
): ):
@ -84,15 +89,14 @@ class TestSessionTokenContainer:
create_container( create_container(
user_wallet.path, user_wallet.path,
session_token=static_sessions[verb], session_token=static_sessions[verb],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False, wait_for_creation=False,
) )
@pytest.mark.skip("Failed with timeout") @pytest.mark.skip("Failed with timeout")
def test_static_session_token_container_create_with_other_wallet( def test_static_session_token_container_create_with_other_wallet(
self, self,
client_shell: Shell,
owner_wallet: WalletFile,
stranger_wallet: WalletFile, stranger_wallet: WalletFile,
static_sessions: dict[ContainerVerb, str], static_sessions: dict[ContainerVerb, str],
): ):
@ -104,13 +108,13 @@ class TestSessionTokenContainer:
create_container( create_container(
stranger_wallet.path, stranger_wallet.path,
session_token=static_sessions[ContainerVerb.CREATE], session_token=static_sessions[ContainerVerb.CREATE],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False, wait_for_creation=False,
) )
def test_static_session_token_container_delete( def test_static_session_token_container_delete(
self, self,
client_shell: Shell,
owner_wallet: WalletFile, owner_wallet: WalletFile,
user_wallet: WalletFile, user_wallet: WalletFile,
static_sessions: dict[ContainerVerb, str], static_sessions: dict[ContainerVerb, str],
@ -119,20 +123,27 @@ class TestSessionTokenContainer:
Validate static session with delete operation Validate static session with delete operation
""" """
with allure.step("Create container"): with allure.step("Create container"):
cid = create_container(owner_wallet.path, shell=client_shell, wait_for_creation=False) cid = create_container(
owner_wallet.path,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
with allure.step("Delete container with static session token"): with allure.step("Delete container with static session token"):
delete_container( delete_container(
wallet=user_wallet.path, wallet=user_wallet.path,
cid=cid, cid=cid,
session_token=static_sessions[ContainerVerb.DELETE], session_token=static_sessions[ContainerVerb.DELETE],
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
) )
assert cid not in list_containers(owner_wallet.path, shell=client_shell) assert cid not in list_containers(
owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
def test_static_session_token_container_set_eacl( def test_static_session_token_container_set_eacl(
self, self,
client_shell: Shell,
owner_wallet: WalletFile, owner_wallet: WalletFile,
user_wallet: WalletFile, user_wallet: WalletFile,
stranger_wallet: WalletFile, stranger_wallet: WalletFile,
@ -142,9 +153,14 @@ class TestSessionTokenContainer:
Validate static session with set eacl operation Validate static session with set eacl operation
""" """
with allure.step("Create container"): with allure.step("Create container"):
cid = create_container(owner_wallet.path, basic_acl=PUBLIC_ACL, shell=client_shell) cid = create_container(
owner_wallet.path,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
file_path = generate_file() file_path = generate_file()
assert can_put_object(stranger_wallet.path, cid, file_path, client_shell) assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)
with allure.step(f"Deny all operations for other via eACL"): with allure.step(f"Deny all operations for other via eACL"):
eacl_deny = [ eacl_deny = [
@ -154,10 +170,11 @@ class TestSessionTokenContainer:
set_eacl( set_eacl(
user_wallet.path, user_wallet.path,
cid, cid,
create_eacl(cid, eacl_deny, shell=client_shell), create_eacl(cid, eacl_deny, shell=self.shell),
shell=client_shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
session_token=static_sessions[ContainerVerb.SETEACL], session_token=static_sessions[ContainerVerb.SETEACL],
) )
wait_for_cache_expired() wait_for_cache_expired()
assert not can_put_object(stranger_wallet.path, cid, file_path, client_shell) assert not can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)

View file

@ -10,7 +10,7 @@ from typing import Any, Dict, List, Optional, Union
import allure import allure
import base58 import base58
from common import ASSETS_DIR, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG from common import ASSETS_DIR, NEOFS_CLI_EXEC, WALLET_CONFIG
from data_formatters import get_wallet_public_key from data_formatters import get_wallet_public_key
from neofs_testlib.cli import NeofsCli from neofs_testlib.cli import NeofsCli
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
@ -116,10 +116,10 @@ class EACLRule:
@allure.title("Get extended ACL") @allure.title("Get extended ACL")
def get_eacl(wallet_path: str, cid: str, shell: Shell) -> Optional[str]: def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
try: try:
result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=NEOFS_ENDPOINT, cid=cid) result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
except RuntimeError as exc: except RuntimeError as exc:
logger.info("Extended ACL table is not set for this container") logger.info("Extended ACL table is not set for this container")
logger.info(f"Got exception while getting eacl: {exc}") logger.info(f"Got exception while getting eacl: {exc}")
@ -135,12 +135,13 @@ def set_eacl(
cid: str, cid: str,
eacl_table_path: str, eacl_table_path: str,
shell: Shell, shell: Shell,
endpoint: str,
session_token: Optional[str] = None, session_token: Optional[str] = None,
) -> None: ) -> None:
cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
cli.container.set_eacl( cli.container.set_eacl(
wallet=wallet_path, wallet=wallet_path,
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=endpoint,
cid=cid, cid=cid,
table=eacl_table_path, table=eacl_table_path,
await_mode=True, await_mode=True,
@ -166,7 +167,11 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
def form_bearertoken_file( def form_bearertoken_file(
wif: str, cid: str, eacl_rule_list: List[Union[EACLRule, EACLPubKey]], shell: Shell wif: str,
cid: str,
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
shell: Shell,
endpoint: str,
) -> str: ) -> str:
""" """
This function fetches eACL for given <cid> on behalf of <wif>, This function fetches eACL for given <cid> on behalf of <wif>,
@ -176,7 +181,7 @@ def form_bearertoken_file(
enc_cid = _encode_cid_for_eacl(cid) enc_cid = _encode_cid_for_eacl(cid)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
eacl = get_eacl(wif, cid, shell=shell) eacl = get_eacl(wif, cid, shell, endpoint)
json_eacl = dict() json_eacl = dict()
if eacl: if eacl:
eacl = eacl.replace("eACL: ", "").split("Signature")[0] eacl = eacl.replace("eACL: ", "").split("Signature")[0]

View file

@ -15,7 +15,8 @@ from typing import Optional
import allure import allure
import neofs_verbs import neofs_verbs
from common import NEOFS_NETMAP, WALLET_CONFIG from cluster import StorageNode
from common import WALLET_CONFIG
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -27,6 +28,7 @@ def get_link_object(
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
nodes: list[StorageNode],
bearer: str = "", bearer: str = "",
wallet_config: str = WALLET_CONFIG, wallet_config: str = WALLET_CONFIG,
is_direct: bool = True, is_direct: bool = True,
@ -38,6 +40,7 @@ def get_link_object(
cid (str): Container ID which stores the Large Object cid (str): Container ID which stores the Large Object
oid (str): Large Object ID oid (str): Large Object ID
shell: executor for cli command shell: executor for cli command
nodes: list of nodes to do search on
bearer (optional, str): path to Bearer token file bearer (optional, str): path to Bearer token file
wallet_config (optional, str): path to the neofs-cli config file wallet_config (optional, str): path to the neofs-cli config file
is_direct: send request directly to the node or not; this flag is_direct: send request directly to the node or not; this flag
@ -47,14 +50,15 @@ def get_link_object(
When no Link Object ID is found after all Storage Nodes polling, When no Link Object ID is found after all Storage Nodes polling,
the function throws an error. the function throws an error.
""" """
for node in NEOFS_NETMAP: for node in nodes:
endpoint = node.get_rpc_endpoint()
try: try:
resp = neofs_verbs.head_object( resp = neofs_verbs.head_object(
wallet, wallet,
cid, cid,
oid, oid,
shell=shell, shell=shell,
endpoint=node, endpoint=endpoint,
is_raw=True, is_raw=True,
is_direct=is_direct, is_direct=is_direct,
bearer=bearer, bearer=bearer,
@ -63,13 +67,15 @@ def get_link_object(
if resp["link"]: if resp["link"]:
return resp["link"] return resp["link"]
except Exception: except Exception:
logger.info(f"No Link Object found on {node}; continue") logger.info(f"No Link Object found on {endpoint}; continue")
logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes") logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes")
return None return None
@allure.step("Get Last Object") @allure.step("Get Last Object")
def get_last_object(wallet: str, cid: str, oid: str, shell: Shell) -> Optional[str]: def get_last_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> Optional[str]:
""" """
Args: Args:
wallet (str): path to the wallet on whose behalf the Storage Nodes wallet (str): path to the wallet on whose behalf the Storage Nodes
@ -77,19 +83,21 @@ def get_last_object(wallet: str, cid: str, oid: str, shell: Shell) -> Optional[s
cid (str): Container ID which stores the Large Object cid (str): Container ID which stores the Large Object
oid (str): Large Object ID oid (str): Large Object ID
shell: executor for cli command shell: executor for cli command
nodes: list of nodes to do search on
Returns: Returns:
(str): Last Object ID (str): Last Object ID
When no Last Object ID is found after all Storage Nodes polling, When no Last Object ID is found after all Storage Nodes polling,
the function throws an error. the function throws an error.
""" """
for node in NEOFS_NETMAP: for node in nodes:
endpoint = node.get_rpc_endpoint()
try: try:
resp = neofs_verbs.head_object( resp = neofs_verbs.head_object(
wallet, cid, oid, shell=shell, endpoint=node, is_raw=True, is_direct=True wallet, cid, oid, shell=shell, endpoint=endpoint, is_raw=True, is_direct=True
) )
if resp["lastPart"]: if resp["lastPart"]:
return resp["lastPart"] return resp["lastPart"]
except Exception: except Exception:
logger.info(f"No Last Object found on {node}; continue") logger.info(f"No Last Object found on {endpoint}; continue")
logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes") logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes")
return None return None

View file

@ -11,7 +11,7 @@ from typing import Optional, Union
import allure import allure
import json_transformers import json_transformers
from common import NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG from common import NEOFS_CLI_EXEC, WALLET_CONFIG
from neofs_testlib.cli import NeofsCli from neofs_testlib.cli import NeofsCli
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
@ -24,6 +24,7 @@ DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
def create_container( def create_container(
wallet: str, wallet: str,
shell: Shell, shell: Shell,
endpoint: str,
rule: str = DEFAULT_PLACEMENT_RULE, rule: str = DEFAULT_PLACEMENT_RULE,
basic_acl: str = "", basic_acl: str = "",
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
@ -49,6 +50,7 @@ def create_container(
the session token; this parameter makes sense the session token; this parameter makes sense
when paired with `session_token` when paired with `session_token`
shell: executor for cli command shell: executor for cli command
endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
options (optional, dict): any other options to pass to the call options (optional, dict): any other options to pass to the call
name (optional, str): container name attribute name (optional, str): container name attribute
await_mode (bool): block execution until container is persisted await_mode (bool): block execution until container is persisted
@ -60,7 +62,7 @@ def create_container(
cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
result = cli.container.create( result = cli.container.create(
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=endpoint,
wallet=session_wallet if session_wallet else wallet, wallet=session_wallet if session_wallet else wallet,
policy=rule, policy=rule,
basic_acl=basic_acl, basic_acl=basic_acl,
@ -76,16 +78,16 @@ def create_container(
logger.info("Container created; waiting until it is persisted in the sidechain") logger.info("Container created; waiting until it is persisted in the sidechain")
if wait_for_creation: if wait_for_creation:
wait_for_container_creation(wallet, cid, shell=shell) wait_for_container_creation(wallet, cid, shell, endpoint)
return cid return cid
def wait_for_container_creation( def wait_for_container_creation(
wallet: str, cid: str, shell: Shell, attempts: int = 15, sleep_interval: int = 1 wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
): ):
for _ in range(attempts): for _ in range(attempts):
containers = list_containers(wallet, shell=shell) containers = list_containers(wallet, shell, endpoint)
if cid in containers: if cid in containers:
return return
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
@ -96,11 +98,11 @@ def wait_for_container_creation(
def wait_for_container_deletion( def wait_for_container_deletion(
wallet: str, cid: str, shell: Shell, attempts: int = 30, sleep_interval: int = 1 wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
): ):
for _ in range(attempts): for _ in range(attempts):
try: try:
get_container(wallet, cid, shell=shell) get_container(wallet, cid, shell=shell, endpoint=endpoint)
sleep(sleep_interval) sleep(sleep_interval)
continue continue
except Exception as err: except Exception as err:
@ -111,18 +113,19 @@ def wait_for_container_deletion(
@allure.step("List Containers") @allure.step("List Containers")
def list_containers(wallet: str, shell: Shell) -> list[str]: def list_containers(wallet: str, shell: Shell, endpoint: str) -> list[str]:
""" """
A wrapper for `neofs-cli container list` call. It returns all the A wrapper for `neofs-cli container list` call. It returns all the
available containers for the given wallet. available containers for the given wallet.
Args: Args:
wallet (str): a wallet on whose behalf we list the containers wallet (str): a wallet on whose behalf we list the containers
shell: executor for cli command shell: executor for cli command
endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
Returns: Returns:
(list): list of containers (list): list of containers
""" """
cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
result = cli.container.list(rpc_endpoint=NEOFS_ENDPOINT, wallet=wallet) result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet)
logger.info(f"Containers: \n{result}") logger.info(f"Containers: \n{result}")
return result.stdout.split() return result.stdout.split()
@ -132,6 +135,7 @@ def get_container(
wallet: str, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str,
json_mode: bool = True, json_mode: bool = True,
) -> Union[dict, str]: ) -> Union[dict, str]:
""" """
@ -141,14 +145,14 @@ def get_container(
wallet (str): path to a wallet on whose behalf we get the container wallet (str): path to a wallet on whose behalf we get the container
cid (str): ID of the container to get cid (str): ID of the container to get
shell: executor for cli command shell: executor for cli command
endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
json_mode (bool): return container in JSON format json_mode (bool): return container in JSON format
Returns: Returns:
(dict, str): dict of container attributes (dict, str): dict of container attributes
""" """
cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
result = cli.container.get( result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode)
rpc_endpoint=NEOFS_ENDPOINT, wallet=wallet, cid=cid, json_mode=json_mode
)
if not json_mode: if not json_mode:
return result.stdout return result.stdout
@ -166,7 +170,12 @@ def get_container(
# TODO: make the error message about a non-found container more user-friendly # TODO: make the error message about a non-found container more user-friendly
# https://github.com/nspcc-dev/neofs-contract/issues/121 # https://github.com/nspcc-dev/neofs-contract/issues/121
def delete_container( def delete_container(
wallet: str, cid: str, shell: Shell, force: bool = False, session_token: Optional[str] = None wallet: str,
cid: str,
shell: Shell,
endpoint: str,
force: bool = False,
session_token: Optional[str] = None,
) -> None: ) -> None:
""" """
A wrapper for `neofs-cli container delete` call. A wrapper for `neofs-cli container delete` call.
@ -174,6 +183,7 @@ def delete_container(
wallet (str): path to a wallet on whose behalf we delete the container wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete cid (str): ID of the container to delete
shell: executor for cli command shell: executor for cli command
endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file session_token: a path to session token file
This function doesn't return anything. This function doesn't return anything.
@ -181,7 +191,7 @@ def delete_container(
cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
cli.container.delete( cli.container.delete(
wallet=wallet, cid=cid, rpc_endpoint=NEOFS_ENDPOINT, force=force, session=session_token wallet=wallet, cid=cid, rpc_endpoint=endpoint, force=force, session=session_token
) )
@ -212,10 +222,10 @@ def _parse_cid(output: str) -> str:
@allure.step("Search container by name") @allure.step("Search container by name")
def search_container_by_name(wallet: str, name: str, shell: Shell): def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str):
list_cids = list_containers(wallet, shell) list_cids = list_containers(wallet, shell, endpoint)
for cid in list_cids: for cid in list_cids:
cont_info = get_container(wallet, cid, shell, True) cont_info = get_container(wallet, cid, shell, endpoint, True)
if cont_info.get("attributes").get("Name", None) == name: if cont_info.get("attributes").get("Name", None) == name:
return cid return cid
return None return None

View file

@ -1,6 +1,7 @@
from typing import List, Optional from typing import List, Optional
from acl import EACLOperation from acl import EACLOperation
from cluster import Cluster
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from python_keywords.object_access import ( from python_keywords.object_access import (
can_delete_object, can_delete_object,
@ -19,17 +20,21 @@ def check_full_access_to_container(
oid: str, oid: str,
file_name: str, file_name: str,
shell: Shell, shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
): ):
assert can_put_object(wallet, cid, file_name, shell, bearer, wallet_config, xhdr) endpoint = cluster.default_rpc_endpoint
assert can_get_head_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) assert can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr)
assert can_get_range_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) assert can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
assert can_get_range_hash_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) assert can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
assert can_search_object(wallet, cid, shell, oid, bearer, wallet_config, xhdr) assert can_get_range_hash_of_object(
assert can_get_object(wallet, cid, oid, file_name, shell, bearer, wallet_config, xhdr) wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
assert can_delete_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) )
assert can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr)
assert can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr)
assert can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
def check_no_access_to_container( def check_no_access_to_container(
@ -38,17 +43,25 @@ def check_no_access_to_container(
oid: str, oid: str,
file_name: str, file_name: str,
shell: Shell, shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
): ):
assert not can_put_object(wallet, cid, file_name, shell, bearer, wallet_config, xhdr) endpoint = cluster.default_rpc_endpoint
assert not can_get_head_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) assert not can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr)
assert not can_get_range_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) assert not can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
assert not can_get_range_hash_of_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) assert not can_get_range_of_object(
assert not can_search_object(wallet, cid, shell, oid, bearer, wallet_config, xhdr) wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
assert not can_get_object(wallet, cid, oid, file_name, shell, bearer, wallet_config, xhdr) )
assert not can_delete_object(wallet, cid, oid, shell, bearer, wallet_config, xhdr) assert not can_get_range_hash_of_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
assert not can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr)
assert not can_get_object(
wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr
)
assert not can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
def check_custom_access_to_container( def check_custom_access_to_container(
@ -57,42 +70,44 @@ def check_custom_access_to_container(
oid: str, oid: str,
file_name: str, file_name: str,
shell: Shell, shell: Shell,
cluster: Cluster,
deny_operations: Optional[List[EACLOperation]] = None, deny_operations: Optional[List[EACLOperation]] = None,
ignore_operations: Optional[List[EACLOperation]] = None, ignore_operations: Optional[List[EACLOperation]] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
): ):
endpoint = cluster.default_rpc_endpoint
deny_operations = [op.value for op in deny_operations or []] deny_operations = [op.value for op in deny_operations or []]
ignore_operations = [op.value for op in ignore_operations or []] ignore_operations = [op.value for op in ignore_operations or []]
checks: dict = {} checks: dict = {}
if EACLOperation.PUT.value not in ignore_operations: if EACLOperation.PUT.value not in ignore_operations:
checks[EACLOperation.PUT.value] = can_put_object( checks[EACLOperation.PUT.value] = can_put_object(
wallet, cid, file_name, shell, bearer, wallet_config, xhdr wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr
) )
if EACLOperation.HEAD.value not in ignore_operations: if EACLOperation.HEAD.value not in ignore_operations:
checks[EACLOperation.HEAD.value] = can_get_head_object( checks[EACLOperation.HEAD.value] = can_get_head_object(
wallet, cid, oid, shell, bearer, wallet_config, xhdr wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
) )
if EACLOperation.GET_RANGE.value not in ignore_operations: if EACLOperation.GET_RANGE.value not in ignore_operations:
checks[EACLOperation.GET_RANGE.value] = can_get_range_of_object( checks[EACLOperation.GET_RANGE.value] = can_get_range_of_object(
wallet, cid, oid, shell, bearer, wallet_config, xhdr wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
) )
if EACLOperation.GET_RANGE_HASH.value not in ignore_operations: if EACLOperation.GET_RANGE_HASH.value not in ignore_operations:
checks[EACLOperation.GET_RANGE_HASH.value] = can_get_range_hash_of_object( checks[EACLOperation.GET_RANGE_HASH.value] = can_get_range_hash_of_object(
wallet, cid, oid, shell, bearer, wallet_config, xhdr wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
) )
if EACLOperation.SEARCH.value not in ignore_operations: if EACLOperation.SEARCH.value not in ignore_operations:
checks[EACLOperation.SEARCH.value] = can_search_object( checks[EACLOperation.SEARCH.value] = can_search_object(
wallet, cid, shell, oid, bearer, wallet_config, xhdr wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr
) )
if EACLOperation.GET.value not in ignore_operations: if EACLOperation.GET.value not in ignore_operations:
checks[EACLOperation.GET.value] = can_get_object( checks[EACLOperation.GET.value] = can_get_object(
wallet, cid, oid, file_name, shell, bearer, wallet_config, xhdr wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr
) )
if EACLOperation.DELETE.value not in ignore_operations: if EACLOperation.DELETE.value not in ignore_operations:
checks[EACLOperation.DELETE.value] = can_delete_object( checks[EACLOperation.DELETE.value] = can_delete_object(
wallet, cid, oid, shell, bearer, wallet_config, xhdr wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
) )
failed_checks = [ failed_checks = [
@ -114,6 +129,7 @@ def check_read_only_container(
oid: str, oid: str,
file_name: str, file_name: str,
shell: Shell, shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -128,4 +144,5 @@ def check_read_only_container(
wallet_config=wallet_config, wallet_config=wallet_config,
xhdr=xhdr, xhdr=xhdr,
shell=shell, shell=shell,
cluster=cluster,
) )

View file

@ -3,15 +3,8 @@ import logging
from time import sleep from time import sleep
import allure import allure
from common import ( from cluster import Cluster
IR_WALLET_PASS, from common import MAINNET_BLOCK_TIME, NEOFS_ADM_CONFIG_PATH, NEOFS_ADM_EXEC, NEOGO_EXECUTABLE
IR_WALLET_PATH,
MAINNET_BLOCK_TIME,
MORPH_ENDPOINT,
NEOFS_ADM_CONFIG_PATH,
NEOFS_ADM_EXEC,
NEOGO_EXECUTABLE,
)
from neofs_testlib.cli import NeofsAdm, NeoGo from neofs_testlib.cli import NeofsAdm, NeoGo
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from neofs_testlib.utils.wallet import get_last_address_from_wallet from neofs_testlib.utils.wallet import get_last_address_from_wallet
@ -22,28 +15,32 @@ logger = logging.getLogger("NeoLogger")
@allure.step("Ensure fresh epoch") @allure.step("Ensure fresh epoch")
def ensure_fresh_epoch(shell: Shell) -> int: def ensure_fresh_epoch(shell: Shell, cluster: Cluster) -> int:
# ensure new fresh epoch to avoid epoch switch during test session # ensure new fresh epoch to avoid epoch switch during test session
current_epoch = get_epoch(shell) current_epoch = get_epoch(shell, cluster)
tick_epoch(shell) tick_epoch(shell, cluster)
epoch = get_epoch(shell) epoch = get_epoch(shell, cluster)
assert epoch > current_epoch, "Epoch wasn't ticked" assert epoch > current_epoch, "Epoch wasn't ticked"
return epoch return epoch
@allure.step("Get Epoch") @allure.step("Get Epoch")
def get_epoch(shell: Shell): def get_epoch(shell: Shell, cluster: Cluster):
morph_chain = cluster.morph_chain_nodes[0]
morph_endpoint = morph_chain.get_endpoint()
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.testinvokefunction( out = neogo.contract.testinvokefunction(
scripthash=get_contract_hash("netmap.neofs", shell=shell), scripthash=get_contract_hash(morph_chain, "netmap.neofs", shell=shell),
method="epoch", method="epoch",
rpc_endpoint=MORPH_ENDPOINT, rpc_endpoint=morph_endpoint,
) )
return int(json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]) return int(json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"])
@allure.step("Tick Epoch") @allure.step("Tick Epoch")
def tick_epoch(shell: Shell): def tick_epoch(shell: Shell, cluster: Cluster):
if NEOFS_ADM_EXEC and NEOFS_ADM_CONFIG_PATH: if NEOFS_ADM_EXEC and NEOFS_ADM_CONFIG_PATH:
# If neofs-adm is available, then we tick epoch with it (to be consistent with UAT tests) # If neofs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
neofsadm = NeofsAdm( neofsadm = NeofsAdm(
@ -52,21 +49,30 @@ def tick_epoch(shell: Shell):
neofsadm.morph.force_new_epoch() neofsadm.morph.force_new_epoch()
return return
# Otherwise we tick epoch using transaction # Use first node by default
cur_epoch = get_epoch(shell)
ir_address = get_last_address_from_wallet(IR_WALLET_PATH, IR_WALLET_PASS) # Otherwise we tick epoch using transaction
cur_epoch = get_epoch(shell, cluster)
ir_node = cluster.ir_nodes[0]
# In case if no local_wallet_path is provided, we use wallet_path
ir_wallet_path = ir_node.get_wallet_path()
ir_wallet_pass = ir_node.get_wallet_password()
ir_address = get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass)
morph_chain = cluster.morph_chain_nodes[0]
morph_endpoint = morph_chain.get_endpoint()
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
neogo.contract.invokefunction( neogo.contract.invokefunction(
wallet=IR_WALLET_PATH, wallet=ir_wallet_path,
wallet_password=IR_WALLET_PASS, wallet_password=ir_wallet_pass,
scripthash=get_contract_hash("netmap.neofs", shell=shell), scripthash=get_contract_hash(morph_chain, "netmap.neofs", shell=shell),
method="newEpoch", method="newEpoch",
arguments=f"int:{cur_epoch + 1}", arguments=f"int:{cur_epoch + 1}",
multisig_hash=f"{ir_address}:Global", multisig_hash=f"{ir_address}:Global",
address=ir_address, address=ir_address,
rpc_endpoint=MORPH_ENDPOINT, rpc_endpoint=morph_endpoint,
force=True, force=True,
gas=1, gas=1,
) )

View file

@ -1,55 +1,51 @@
import logging import logging
from time import sleep from time import sleep
from typing import Optional
import allure import allure
from common import NEOFS_NETMAP_DICT from cluster import Cluster, StorageNode
from neofs_testlib.hosting import Hosting
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from python_keywords.node_management import node_healthcheck from python_keywords.node_management import storage_node_healthcheck
from storage_policy import get_nodes_with_object from storage_policy import get_nodes_with_object
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@allure.step("Wait for object replication") @allure.step("Wait for object replication")
def wait_object_replication_on_nodes( def wait_object_replication(
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
expected_copies: int, expected_copies: int,
shell: Shell, shell: Shell,
excluded_nodes: Optional[list[str]] = None, nodes: list[StorageNode],
) -> list[str]: ) -> list[StorageNode]:
excluded_nodes = excluded_nodes or []
sleep_interval, attempts = 15, 20 sleep_interval, attempts = 15, 20
nodes = [] nodes_with_object = []
for __attempt in range(attempts): for _ in range(attempts):
nodes = get_nodes_with_object(wallet, cid, oid, shell=shell, skip_nodes=excluded_nodes) nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes)
if len(nodes) >= expected_copies: if len(nodes_with_object) >= expected_copies:
return nodes return nodes_with_object
sleep(sleep_interval) sleep(sleep_interval)
raise AssertionError( raise AssertionError(
f"Expected {expected_copies} copies of object, but found {len(nodes)}. " f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. "
f"Waiting time {sleep_interval * attempts}" f"Waiting time {sleep_interval * attempts}"
) )
@allure.step("Wait for storage node returned to cluster") @allure.step("Wait for storage nodes returned to cluster")
def wait_all_storage_node_returned(hosting: Hosting) -> None: def wait_all_storage_nodes_returned(cluster: Cluster) -> None:
sleep_interval, attempts = 15, 20 sleep_interval, attempts = 15, 20
for __attempt in range(attempts): for __attempt in range(attempts):
if is_all_storage_node_returned(hosting): if is_all_storage_nodes_returned(cluster):
return return
sleep(sleep_interval) sleep(sleep_interval)
raise AssertionError("Storage node(s) is broken") raise AssertionError("Storage node(s) is broken")
def is_all_storage_node_returned(hosting: Hosting) -> bool: def is_all_storage_nodes_returned(cluster: Cluster) -> bool:
with allure.step("Run health check for all storage nodes"): with allure.step("Run health check for all storage nodes"):
for node_name in NEOFS_NETMAP_DICT.keys(): for node in cluster.storage_nodes:
try: try:
health_check = node_healthcheck(hosting, node_name) health_check = storage_node_healthcheck(node)
except Exception as err: except Exception as err:
logger.warning(f"Node healthcheck fails with error {err}") logger.warning(f"Node healthcheck fails with error {err}")
return False return False

View file

@ -9,7 +9,6 @@ from urllib.parse import quote_plus
import allure import allure
import requests import requests
from cli_helpers import _cmd_run from cli_helpers import _cmd_run
from common import HTTP_GATE
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -17,13 +16,14 @@ ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@allure.step("Get via HTTP Gate") @allure.step("Get via HTTP Gate")
def get_via_http_gate(cid: str, oid: str): def get_via_http_gate(cid: str, oid: str, endpoint: str):
""" """
This function gets given object from HTTP gate This function gets given object from HTTP gate
:param cid: CID to get object from cid: container id to get object from
:param oid: object OID oid: object ID
endpoint: http gate endpoint
""" """
request = f"{HTTP_GATE}/get/{cid}/{oid}" request = f"{endpoint}/get/{cid}/{oid}"
resp = requests.get(request, stream=True) resp = requests.get(request, stream=True)
if not resp.ok: if not resp.ok:
@ -44,13 +44,14 @@ def get_via_http_gate(cid: str, oid: str):
@allure.step("Get via Zip HTTP Gate") @allure.step("Get via Zip HTTP Gate")
def get_via_zip_http_gate(cid: str, prefix: str): def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str):
""" """
This function gets given object from HTTP gate This function gets given object from HTTP gate
:param cid: CID to get object from cid: container id to get object from
:param prefix: common prefix prefix: common prefix
endpoint: http gate endpoint
""" """
request = f"{HTTP_GATE}/zip/{cid}/{prefix}" request = f"{endpoint}/zip/{cid}/{prefix}"
resp = requests.get(request, stream=True) resp = requests.get(request, stream=True)
if not resp.ok: if not resp.ok:
@ -75,15 +76,16 @@ def get_via_zip_http_gate(cid: str, prefix: str):
@allure.step("Get via HTTP Gate by attribute") @allure.step("Get via HTTP Gate by attribute")
def get_via_http_gate_by_attribute(cid: str, attribute: dict): def get_via_http_gate_by_attribute(cid: str, attribute: dict, endpoint: str):
""" """
This function gets given object from HTTP gate This function gets given object from HTTP gate
:param cid: CID to get object from cid: CID to get object from
:param attribute: attribute name: attribute value pair attribute: attribute {name: attribute} value pair
endpoint: http gate endpoint
""" """
attr_name = list(attribute.keys())[0] attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name))) attr_value = quote_plus(str(attribute.get(attr_name)))
request = f"{HTTP_GATE}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
resp = requests.get(request, stream=True) resp = requests.get(request, stream=True)
if not resp.ok: if not resp.ok:
@ -104,14 +106,15 @@ def get_via_http_gate_by_attribute(cid: str, attribute: dict):
@allure.step("Upload via HTTP Gate") @allure.step("Upload via HTTP Gate")
def upload_via_http_gate(cid: str, path: str, headers: dict = None) -> str: def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: dict = None) -> str:
""" """
This function upload given object through HTTP gate This function upload given object through HTTP gate
:param cid: CID to get object from cid: CID to get object from
:param path: File path to upload path: File path to upload
:param headers: Object header endpoint: http gate endpoint
headers: Object header
""" """
request = f"{HTTP_GATE}/upload/{cid}" request = f"{endpoint}/upload/{cid}"
files = {"upload_file": open(path, "rb")} files = {"upload_file": open(path, "rb")}
body = {"filename": path} body = {"filename": path}
resp = requests.post(request, files=files, data=body, headers=headers) resp = requests.post(request, files=files, data=body, headers=headers)
@ -134,15 +137,16 @@ def upload_via_http_gate(cid: str, path: str, headers: dict = None) -> str:
@allure.step("Upload via HTTP Gate using Curl") @allure.step("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl( def upload_via_http_gate_curl(
cid: str, filepath: str, large_object=False, headers: dict = None cid: str, filepath: str, endpoint: str, large_object=False, headers: dict = None
) -> str: ) -> str:
""" """
This function upload given object through HTTP gate using curl utility. This function upload given object through HTTP gate using curl utility.
:param cid: CID to get object from cid: CID to get object from
:param filepath: File path to upload filepath: File path to upload
:param headers: Object header headers: Object header
endpoint: http gate endpoint
""" """
request = f"{HTTP_GATE}/upload/{cid}" request = f"{endpoint}/upload/{cid}"
files = f"file=@{filepath};filename={os.path.basename(filepath)}" files = f"file=@{filepath};filename={os.path.basename(filepath)}"
cmd = f"curl -F '{files}' {request}" cmd = f"curl -F '{files}' {request}"
if large_object: if large_object:
@ -156,13 +160,14 @@ def upload_via_http_gate_curl(
@allure.step("Get via HTTP Gate using Curl") @allure.step("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str) -> str: def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str:
""" """
This function gets given object from HTTP gate using curl utility. This function gets given object from HTTP gate using curl utility.
:param cid: CID to get object from cid: CID to get object from
:param oid: object OID oid: object OID
endpoint: http gate endpoint
""" """
request = f"{HTTP_GATE}/get/{cid}/{oid}" request = f"{endpoint}/get/{cid}/{oid}"
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
cmd = f"curl {request} > {file_path}" cmd = f"curl {request} > {file_path}"

View file

@ -1,29 +1,77 @@
import json import json
import logging import logging
import os import os
import random
import re import re
import uuid import uuid
from typing import Any, Optional from typing import Any, Optional
import allure import allure
import json_transformers import json_transformers
from common import ASSETS_DIR, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, NEOFS_NETMAP, WALLET_CONFIG from cluster import Cluster
from common import ASSETS_DIR, NEOFS_CLI_EXEC, WALLET_CONFIG
from neofs_testlib.cli import NeofsCli from neofs_testlib.cli import NeofsCli
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@allure.step("Get object") @allure.step("Get object from random node")
def get_object_from_random_node(
wallet: str,
cid: str,
oid: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True,
session: Optional[str] = None,
) -> str:
"""
GET from NeoFS random storage node
Args:
wallet: wallet on whose behalf GET is done
cid: ID of Container where we get the Object from
oid: Object ID
shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key
endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
Returns:
(str): path to downloaded file
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return get_object(
wallet,
cid,
oid,
shell,
endpoint,
bearer,
write_object,
xhdr,
wallet_config,
no_progress,
session,
)
@allure.step("Get object from {endpoint}")
def get_object( def get_object(
wallet: str, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
write_object: str = "", write_object: Optional[str] = None,
endpoint: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
no_progress: bool = True, no_progress: bool = True,
@ -37,9 +85,9 @@ def get_object(
cid (str): ID of Container where we get the Object from cid (str): ID of Container where we get the Object from
oid (str): Object ID oid (str): Object ID
shell: executor for cli command shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key write_object: path to downloaded file, appends to `--file` key
endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr (optional, dict): Request X-Headers in form of Key=Value
@ -52,12 +100,9 @@ def get_object(
write_object = str(uuid.uuid4()) write_object = str(uuid.uuid4())
file_path = os.path.join(ASSETS_DIR, write_object) file_path = os.path.join(ASSETS_DIR, write_object)
if not endpoint:
endpoint = random.sample(NEOFS_NETMAP, 1)[0]
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
cli.object.get( cli.object.get(
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
@ -71,15 +116,15 @@ def get_object(
return file_path return file_path
@allure.step("Get Range Hash") @allure.step("Get Range Hash from {endpoint}")
def get_range_hash( def get_range_hash(
wallet: str, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
range_cut: str, range_cut: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
endpoint: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
@ -102,10 +147,9 @@ def get_range_hash(
Returns: Returns:
None None
""" """
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.hash( result = cli.object.hash(
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
@ -119,16 +163,69 @@ def get_range_hash(
return result.stdout.split(":")[1].strip() return result.stdout.split(":")[1].strip()
@allure.step("Put object") @allure.step("Put object to random node")
def put_object_to_random_node(
wallet: str,
path: str,
cid: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
):
"""
PUT of given file to a random storage node.
Args:
wallet: wallet on whose behalf PUT is done
path: path to file to be PUT
cid: ID of Container where we get the Object from
shell: executor for cli command
cluster: cluster under test
bearer: path to Bearer Token file, appends to `--bearer` key
attributes: User attributes in form of Key1=Value1,Key2=Value2
cluster: cluster under test
wallet_config: path to the wallet config
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
Returns:
ID of uploaded Object
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return put_object(
wallet,
path,
cid,
shell,
endpoint,
bearer,
attributes,
xhdr,
wallet_config,
expire_at,
no_progress,
session,
)
@allure.step("Put object at {endpoint} in container {cid}")
def put_object( def put_object(
wallet: str, wallet: str,
path: str, path: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
endpoint: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
no_progress: bool = True, no_progress: bool = True,
@ -138,25 +235,21 @@ def put_object(
PUT of given file. PUT of given file.
Args: Args:
wallet (str): wallet on whose behalf PUT is done wallet: wallet on whose behalf PUT is done
path (str): path to file to be PUT path: path to file to be PUT
cid (str): ID of Container where we get the Object from cid: ID of Container where we get the Object from
shell: executor for cli command shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
attributes (optional, str): User attributes in form of Key1=Value1,Key2=Value2 attributes: User attributes in form of Key1=Value1,Key2=Value2
endpoint(optional, str): NeoFS endpoint to send request to endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config wallet_config: path to the wallet config
no_progress(optional, bool): do not show progress bar no_progress: do not show progress bar
expire_at (optional, int): Last epoch in the life of the object expire_at: Last epoch in the life of the object
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session: path to a JSON-encoded container session token
Returns: Returns:
(str): ID of uploaded Object (str): ID of uploaded Object
""" """
if not endpoint:
endpoint = random.sample(NEOFS_NETMAP, 1)[0]
if not endpoint:
logger.info(f"---DEB:\n{NEOFS_NETMAP}")
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.put( result = cli.object.put(
@ -178,13 +271,13 @@ def put_object(
return oid.strip() return oid.strip()
@allure.step("Delete object") @allure.step("Delete object {cid}/{oid} from {endpoint}")
def delete_object( def delete_object(
wallet: str, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: Optional[str] = None, endpoint: str = None,
bearer: str = "", bearer: str = "",
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -194,21 +287,22 @@ def delete_object(
DELETE an Object. DELETE an Object.
Args: Args:
wallet (str): wallet on whose behalf DELETE is done wallet: wallet on whose behalf DELETE is done
cid (str): ID of Container where we get the Object from cid: ID of Container where we get the Object from
oid (str): ID of Object we are going to delete oid: ID of Object we are going to delete
shell: executor for cli command shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config wallet_config: path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session: path to a JSON-encoded container session token
Returns: Returns:
(str): Tombstone ID (str): Tombstone ID
""" """
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.delete( result = cli.object.delete(
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
@ -229,7 +323,7 @@ def get_range(
oid: str, oid: str,
range_cut: str, range_cut: str,
shell: Shell, shell: Shell,
endpoint: Optional[str] = None, endpoint: str = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
bearer: str = "", bearer: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -239,16 +333,16 @@ def get_range(
GETRANGE an Object. GETRANGE an Object.
Args: Args:
wallet (str): wallet on whose behalf GETRANGE is done wallet: wallet on whose behalf GETRANGE is done
cid (str): ID of Container where we get the Object from cid: ID of Container where we get the Object from
oid (str): ID of Object we are going to request oid: ID of Object we are going to request
range_cut (str): range to take data from in the form offset:length range_cut: range to take data from in the form offset:length
shell: executor for cli command shell: executor for cli command
endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
wallet_config(optional, str): path to the wallet config wallet_config: path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session: path to a JSON-encoded container session token
Returns: Returns:
(str, bytes) - path to the file with range content and content of this file as bytes (str, bytes) - path to the file with range content and content of this file as bytes
""" """
@ -256,7 +350,7 @@ def get_range(
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
cli.object.range( cli.object.range(
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
@ -278,9 +372,9 @@ def lock_object(
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str,
lifetime: Optional[int] = None, lifetime: Optional[int] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
endpoint: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
@ -298,7 +392,8 @@ def lock_object(
oid: Object ID. oid: Object ID.
lifetime: Lock lifetime. lifetime: Lock lifetime.
expire_at: Lock expiration epoch. expire_at: Lock expiration epoch.
endpoint: Remote node address. shell: executor for cli command
endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
session: Path to a JSON-encoded container session token. session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
@ -310,7 +405,7 @@ def lock_object(
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.lock( result = cli.object.lock(
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
lifetime=lifetime, lifetime=lifetime,
expire_at=expire_at, expire_at=expire_at,
address=address, address=address,
@ -334,8 +429,8 @@ def search_object(
wallet: str, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: str = "", bearer: str = "",
endpoint: Optional[str] = None,
filters: Optional[dict] = None, filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None, expected_objects_list: Optional[list] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
@ -348,26 +443,26 @@ def search_object(
SEARCH an Object. SEARCH an Object.
Args: Args:
wallet (str): wallet on whose behalf SEARCH is done wallet: wallet on whose behalf SEARCH is done
cid (str): ID of Container where we get the Object from cid: ID of Container where we get the Object from
shell: executor for cli command shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
filters (optional, dict): key=value pairs to filter Objects filters: key=value pairs to filter Objects
expected_objects_list (optional, list): a list of ObjectIDs to compare found Objects with expected_objects_list: a list of ObjectIDs to compare found Objects with
wallet_config(optional, str): path to the wallet config wallet_config: path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session: path to a JSON-encoded container session token
phy: Search physically stored objects. phy: Search physically stored objects.
root: Search for user objects. root: Search for user objects.
Returns: Returns:
(list): list of found ObjectIDs list of found ObjectIDs
""" """
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.search( result = cli.object.search(
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
bearer=bearer, bearer=bearer,
@ -401,8 +496,8 @@ def search_object(
def get_netmap_netinfo( def get_netmap_netinfo(
wallet: str, wallet: str,
shell: Shell, shell: Shell,
endpoint: str,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
endpoint: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -411,7 +506,7 @@ def get_netmap_netinfo(
Get netmap netinfo output from node Get netmap netinfo output from node
Args: Args:
wallet (str): wallet on whose behalf SEARCH is done wallet (str): wallet on whose behalf request is done
shell: executor for cli command shell: executor for cli command
endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
address: Address of wallet account address: Address of wallet account
@ -426,7 +521,7 @@ def get_netmap_netinfo(
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
output = cli.netmap.netinfo( output = cli.netmap.netinfo(
wallet=wallet, wallet=wallet,
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
address=address, address=address,
ttl=ttl, ttl=ttl,
xhdr=xhdr, xhdr=xhdr,
@ -452,9 +547,9 @@ def head_object(
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: str = "", bearer: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
endpoint: Optional[str] = None,
json_output: bool = True, json_output: bool = True,
is_raw: bool = False, is_raw: bool = False,
is_direct: bool = False, is_direct: bool = False,
@ -489,7 +584,7 @@ def head_object(
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG) cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.head( result = cli.object.head(
rpc_endpoint=endpoint or NEOFS_ENDPOINT, rpc_endpoint=endpoint,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,

View file

@ -6,17 +6,10 @@ from dataclasses import dataclass
from typing import Optional from typing import Optional
import allure import allure
from common import ( from cluster import Cluster, StorageNode
MORPH_BLOCK_TIME, from common import MORPH_BLOCK_TIME, NEOFS_CLI_EXEC
NEOFS_CLI_EXEC,
NEOFS_NETMAP_DICT,
STORAGE_WALLET_CONFIG,
STORAGE_WALLET_PASS,
)
from data_formatters import get_wallet_public_key
from epoch import tick_epoch from epoch import tick_epoch
from neofs_testlib.cli import NeofsCli from neofs_testlib.cli import NeofsCli
from neofs_testlib.hosting import Hosting
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from utility import parse_time from utility import parse_time
@ -39,183 +32,189 @@ class HealthStatus:
return HealthStatus(network, health) return HealthStatus(network, health)
@allure.step("Stop storage nodes") @allure.step("Stop random storage nodes")
def stop_nodes(hosting: Hosting, number: int, nodes: list[str]) -> list[str]: def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]:
""" """
Shuts down the given number of randomly selected storage nodes. Shuts down the given number of randomly selected storage nodes.
Args: Args:
number (int): the number of nodes to shut down number: the number of storage nodes to stop
nodes (list): the list of nodes for possible shut down nodes: the list of storage nodes to stop
Returns: Returns:
(list): the list of nodes that were shut down the list of nodes that were stopped
""" """
nodes_to_stop = random.sample(nodes, number) nodes_to_stop = random.sample(nodes, number)
for node in nodes_to_stop: for node in nodes_to_stop:
host = hosting.get_host_by_service(node) node.stop_service()
host.stop_service(node)
return nodes_to_stop return nodes_to_stop
@allure.step("Start storage nodes") @allure.step("Start storage node")
def start_nodes(hosting: Hosting, nodes: list[str]) -> None: def start_storage_nodes(nodes: list[StorageNode]) -> None:
""" """
The function starts specified storage nodes. The function starts specified storage nodes.
Args: Args:
nodes (list): the list of nodes to start nodes: the list of nodes to start
""" """
for node in nodes: for node in nodes:
host = hosting.get_host_by_service(node) node.start_service()
host.start_service(node)
@allure.step("Get Locode") @allure.step("Get Locode from random storage node")
def get_locode() -> str: def get_locode_from_random_node(cluster: Cluster) -> str:
endpoint_values = random.choice(list(NEOFS_NETMAP_DICT.values())) node = random.choice(cluster.storage_nodes)
locode = endpoint_values["UN-LOCODE"] locode = node.get_un_locode()
logger.info(f"Random locode chosen: {locode}") logger.info(f"Chosen '{locode}' locode from node {node}")
return locode return locode
@allure.step("Healthcheck for node {node_name}") @allure.step("Healthcheck for storage node {node}")
def node_healthcheck(hosting: Hosting, node_name: str) -> HealthStatus: def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
""" """
The function returns node's health status. The function returns storage node's health status.
Args: Args:
node_name str: node name for which health status should be retrieved. node: storage node for which health status should be retrieved.
Returns: Returns:
health status as HealthStatus object. health status as HealthStatus object.
""" """
command = "control healthcheck" command = "control healthcheck"
output = _run_control_command_with_retries(hosting, node_name, command) output = _run_control_command_with_retries(node, command)
return HealthStatus.from_stdout(output) return HealthStatus.from_stdout(output)
@allure.step("Set status for node {node_name}") @allure.step("Set status for {node}")
def node_set_status(hosting: Hosting, node_name: str, status: str, retries: int = 0) -> None: def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
""" """
The function sets particular status for given node. The function sets particular status for given node.
Args: Args:
node_name: node name for which status should be set. node: node for which status should be set.
status: online or offline. status: online or offline.
retries (optional, int): number of retry attempts if it didn't work from the first time retries (optional, int): number of retry attempts if it didn't work from the first time
""" """
command = f"control set-status --status {status}" command = f"control set-status --status {status}"
_run_control_command_with_retries(hosting, node_name, command, retries) _run_control_command_with_retries(node, command, retries)
@allure.step("Get netmap snapshot") @allure.step("Get netmap snapshot")
def get_netmap_snapshot(node_name: str, shell: Shell) -> str: def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
""" """
The function returns string representation of netmap snapshot. The function returns string representation of netmap snapshot.
Args: Args:
node_name str: node name from which netmap snapshot should be requested. node: node from which netmap snapshot should be requested.
Returns: Returns:
string representation of netmap string representation of netmap
""" """
node_info = NEOFS_NETMAP_DICT[node_name]
cli = NeofsCli(shell, NEOFS_CLI_EXEC, config_file=STORAGE_WALLET_CONFIG) storage_wallet_config = node.get_wallet_config_path()
storage_wallet_path = node.get_wallet_path()
cli = NeofsCli(shell, NEOFS_CLI_EXEC, config_file=storage_wallet_config)
return cli.netmap.snapshot( return cli.netmap.snapshot(
rpc_endpoint=node_info["rpc"], rpc_endpoint=node.get_rpc_endpoint(),
wallet=node_info["wallet_path"], wallet=storage_wallet_path,
).stdout ).stdout
@allure.step("Get shard list for node {node_name}") @allure.step("Get shard list for {node}")
def node_shard_list(hosting: Hosting, node_name: str) -> list[str]: def node_shard_list(node: StorageNode) -> list[str]:
""" """
The function returns list of shards for specified node. The function returns list of shards for specified storage node.
Args: Args:
node_name str: node name for which shards should be returned. node: node for which shards should be returned.
Returns: Returns:
list of shards. list of shards.
""" """
command = "control shards list" command = "control shards list"
output = _run_control_command_with_retries(hosting, node_name, command) output = _run_control_command_with_retries(node, command)
return re.findall(r"Shard (.*):", output) return re.findall(r"Shard (.*):", output)
@allure.step("Shard set for node {node_name}") @allure.step("Shard set for {node}")
def node_shard_set_mode(hosting: Hosting, node_name: str, shard: str, mode: str) -> str: def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
""" """
The function sets mode for specified shard. The function sets mode for specified shard.
Args: Args:
node_name str: node name on which shard mode should be set. node: node on which shard mode should be set.
""" """
command = f"control shards set-mode --id {shard} --mode {mode}" command = f"control shards set-mode --id {shard} --mode {mode}"
return _run_control_command_with_retries(hosting, node_name, command) return _run_control_command_with_retries(node, command)
@allure.step("Drop object from node {node_name}") @allure.step("Drop object from {node}")
def drop_object(hosting: Hosting, node_name: str, cid: str, oid: str) -> str: def drop_object(node: StorageNode, cid: str, oid: str) -> str:
""" """
The function drops object from specified node. The function drops object from specified node.
Args: Args:
node_name str: node name from which object should be dropped. node_id str: node from which object should be dropped.
""" """
command = f"control drop-objects -o {cid}/{oid}" command = f"control drop-objects -o {cid}/{oid}"
return _run_control_command_with_retries(hosting, node_name, command) return _run_control_command_with_retries(node, command)
@allure.step("Delete data of node {node_name}") @allure.step("Delete data from host for node {node}")
def delete_node_data(hosting: Hosting, node_name: str) -> None: def delete_node_data(node: StorageNode) -> None:
host = hosting.get_host_by_service(node_name) node.stop_service()
host.stop_service(node_name) node.host.delete_storage_node_data(node.name)
host.delete_storage_node_data(node_name)
time.sleep(parse_time(MORPH_BLOCK_TIME)) time.sleep(parse_time(MORPH_BLOCK_TIME))
@allure.step("Exclude node {node_to_exclude} from network map") @allure.step("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map( def exclude_node_from_network_map(
hosting: Hosting, node_to_exclude: str, alive_node: str, shell: Shell node_to_exclude: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None: ) -> None:
node_wallet_path = NEOFS_NETMAP_DICT[node_to_exclude]["wallet_path"] node_netmap_key = node_to_exclude.get_wallet_public_key()
node_netmap_key = get_wallet_public_key(node_wallet_path, STORAGE_WALLET_PASS)
node_set_status(hosting, node_to_exclude, status="offline") storage_node_set_status(node_to_exclude, status="offline")
time.sleep(parse_time(MORPH_BLOCK_TIME)) time.sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch(shell=shell) tick_epoch(shell, cluster)
snapshot = get_netmap_snapshot(node_name=alive_node, shell=shell) snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
assert ( assert (
node_netmap_key not in snapshot node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} not in network map" ), f"Expected node with key {node_netmap_key} to be absent in network map"
@allure.step("Include node {node_to_include} into network map") @allure.step("Include node {node_to_include} into network map")
def include_node_to_network_map( def include_node_to_network_map(
hosting: Hosting, node_to_include: str, alive_node: str, shell: Shell node_to_include: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None: ) -> None:
node_set_status(hosting, node_to_include, status="online") storage_node_set_status(node_to_include, status="online")
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
# First sleep can be ommited afer https://github.com/nspcc-dev/neofs-node/issues/1790 complete. # First sleep can be omitted after https://github.com/nspcc-dev/neofs-node/issues/1790 complete.
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
tick_epoch(shell=shell) tick_epoch(shell, cluster)
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
check_node_in_map(node_to_include, shell, alive_node) check_node_in_map(node_to_include, shell, alive_node)
@allure.step("Check node {node_name} in network map") @allure.step("Check node {node} in network map")
def check_node_in_map(node_name: str, shell: Shell, alive_node: Optional[str] = None) -> None: def check_node_in_map(
alive_node = alive_node or node_name node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
node_wallet_path = NEOFS_NETMAP_DICT[node_name]["wallet_path"] ) -> None:
node_netmap_key = get_wallet_public_key(node_wallet_path, STORAGE_WALLET_PASS) alive_node = alive_node or node
logger.info(f"Node {node_name} netmap key: {node_netmap_key}") node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(node_name=alive_node, shell=shell) snapshot = get_netmap_snapshot(alive_node, shell)
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} in network map" assert (
node_netmap_key in snapshot
), f"Expected node with key {node_netmap_key} to be in network map"
def _run_control_command_with_retries( def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
hosting: Hosting, node_name: str, command: str, retries: int = 0
) -> str:
for attempt in range(1 + retries): # original attempt + specified retries for attempt in range(1 + retries): # original attempt + specified retries
try: try:
return _run_control_command(hosting, node_name, command) return _run_control_command(node, command)
except AssertionError as err: except AssertionError as err:
if attempt < retries: if attempt < retries:
logger.warning(f"Command {command} failed with error {err} and will be retried") logger.warning(f"Command {command} failed with error {err} and will be retried")
@ -223,16 +222,16 @@ def _run_control_command_with_retries(
raise AssertionError(f"Command {command} failed with error {err}") from err raise AssertionError(f"Command {command} failed with error {err}") from err
def _run_control_command(hosting: Hosting, service_name: str, command: str) -> None: def _run_control_command(node: StorageNode, command: str) -> None:
host = hosting.get_host_by_service(service_name) host = node.host
service_config = host.get_service_config(service_name) service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"] wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"] wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"] control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell() shell = host.get_shell()
wallet_config_path = f"/tmp/{service_name}-config.yaml" wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'password: "{wallet_password}"' wallet_config = f'password: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")

View file

@ -1,16 +1,17 @@
from typing import Optional from typing import Optional
import allure import allure
from cluster import Cluster
from file_helper import get_file_hash from file_helper import get_file_hash
from grpc_responses import OBJECT_ACCESS_DENIED, error_matches_status from grpc_responses import OBJECT_ACCESS_DENIED, error_matches_status
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from python_keywords.neofs_verbs import ( from python_keywords.neofs_verbs import (
delete_object, delete_object,
get_object, get_object_from_random_node,
get_range, get_range,
get_range_hash, get_range_hash,
head_object, head_object,
put_object, put_object_to_random_node,
search_object, search_object,
) )
@ -23,13 +24,14 @@ def can_get_object(
oid: str, oid: str,
file_name: str, file_name: str,
shell: Shell, shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
) -> bool: ) -> bool:
with allure.step("Try get object from container"): with allure.step("Try get object from container"):
try: try:
got_file_path = get_object( got_file_path = get_object_from_random_node(
wallet, wallet,
cid, cid,
oid, oid,
@ -37,6 +39,7 @@ def can_get_object(
wallet_config=wallet_config, wallet_config=wallet_config,
xhdr=xhdr, xhdr=xhdr,
shell=shell, shell=shell,
cluster=cluster,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert error_matches_status( assert error_matches_status(
@ -52,6 +55,7 @@ def can_put_object(
cid: str, cid: str,
file_name: str, file_name: str,
shell: Shell, shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -59,7 +63,7 @@ def can_put_object(
) -> bool: ) -> bool:
with allure.step("Try put object to container"): with allure.step("Try put object to container"):
try: try:
put_object( put_object_to_random_node(
wallet, wallet,
file_name, file_name,
cid, cid,
@ -68,6 +72,7 @@ def can_put_object(
xhdr=xhdr, xhdr=xhdr,
attributes=attributes, attributes=attributes,
shell=shell, shell=shell,
cluster=cluster,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert error_matches_status( assert error_matches_status(
@ -82,6 +87,7 @@ def can_delete_object(
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -89,7 +95,14 @@ def can_delete_object(
with allure.step("Try delete object from container"): with allure.step("Try delete object from container"):
try: try:
delete_object( delete_object(
wallet, cid, oid, bearer=bearer, wallet_config=wallet_config, xhdr=xhdr, shell=shell wallet,
cid,
oid,
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert error_matches_status( assert error_matches_status(
@ -104,6 +117,7 @@ def can_get_head_object(
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -118,6 +132,7 @@ def can_get_head_object(
wallet_config=wallet_config, wallet_config=wallet_config,
xhdr=xhdr, xhdr=xhdr,
shell=shell, shell=shell,
endpoint=endpoint,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert error_matches_status( assert error_matches_status(
@ -132,6 +147,7 @@ def can_get_range_of_object(
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -147,6 +163,7 @@ def can_get_range_of_object(
wallet_config=wallet_config, wallet_config=wallet_config,
xhdr=xhdr, xhdr=xhdr,
shell=shell, shell=shell,
endpoint=endpoint,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert error_matches_status( assert error_matches_status(
@ -161,6 +178,7 @@ def can_get_range_hash_of_object(
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -176,6 +194,7 @@ def can_get_range_hash_of_object(
wallet_config=wallet_config, wallet_config=wallet_config,
xhdr=xhdr, xhdr=xhdr,
shell=shell, shell=shell,
endpoint=endpoint,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert error_matches_status( assert error_matches_status(
@ -189,6 +208,7 @@ def can_search_object(
wallet: str, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str,
oid: Optional[str] = None, oid: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None, wallet_config: Optional[str] = None,
@ -197,7 +217,13 @@ def can_search_object(
with allure.step("Try search object in container"): with allure.step("Try search object in container"):
try: try:
oids = search_object( oids = search_object(
wallet, cid, bearer=bearer, wallet_config=wallet_config, xhdr=xhdr, shell=shell wallet,
cid,
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
) )
except OPERATION_ERROR_TYPE as err: except OPERATION_ERROR_TYPE as err:
assert error_matches_status( assert error_matches_status(

View file

@ -6,19 +6,9 @@ import time
from typing import Optional from typing import Optional
import allure import allure
from common import ( from cluster import MainChain, MorphChain
GAS_HASH, from common import GAS_HASH, MAINNET_BLOCK_TIME, NEOFS_CONTRACT, NEOGO_EXECUTABLE
MAINNET_BLOCK_TIME,
MAINNET_SINGLE_ADDR,
MAINNET_WALLET_PASS,
MAINNET_WALLET_PATH,
MORPH_ENDPOINT,
NEO_MAINNET_ENDPOINT,
NEOFS_CONTRACT,
NEOGO_EXECUTABLE,
)
from neo3 import wallet as neo3_wallet from neo3 import wallet as neo3_wallet
from neofs_testlib.blockchain import RPCClient
from neofs_testlib.cli import NeoGo from neofs_testlib.cli import NeoGo
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
from neofs_testlib.utils.converters import contract_hash_to_address from neofs_testlib.utils.converters import contract_hash_to_address
@ -32,30 +22,26 @@ TX_PERSIST_TIMEOUT = 15 # seconds
ASSET_POWER_MAINCHAIN = 10**8 ASSET_POWER_MAINCHAIN = 10**8
ASSET_POWER_SIDECHAIN = 10**12 ASSET_POWER_SIDECHAIN = 10**12
morph_rpc_client = RPCClient(MORPH_ENDPOINT)
mainnet_rpc_client = RPCClient(NEO_MAINNET_ENDPOINT) def get_nns_contract_hash(morph_chain: MorphChain) -> str:
return morph_chain.rpc_client.get_contract_state(1)["hash"]
def get_nns_contract_hash() -> str: def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) -> str:
rpc_client = RPCClient(MORPH_ENDPOINT) nns_contract_hash = get_nns_contract_hash(morph_chain)
return rpc_client.get_contract_state(1)["hash"]
def get_contract_hash(resolve_name: str, shell: Shell) -> str:
nns_contract_hash = get_nns_contract_hash()
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.testinvokefunction( out = neogo.contract.testinvokefunction(
scripthash=nns_contract_hash, scripthash=nns_contract_hash,
method="resolve", method="resolve",
arguments=f"string:{resolve_name} int:16", arguments=f"string:{resolve_name} int:16",
rpc_endpoint=MORPH_ENDPOINT, rpc_endpoint=morph_chain.get_endpoint(),
) )
stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]
return bytes.decode(base64.b64decode(stack_data[0]["value"])) return bytes.decode(base64.b64decode(stack_data[0]["value"]))
@allure.step("Withdraw Mainnet Gas") @allure.step("Withdraw Mainnet Gas")
def withdraw_mainnet_gas(shell: Shell, wlt: str, amount: int): def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int):
address = get_last_address_from_wallet(wlt, EMPTY_PASSWORD) address = get_last_address_from_wallet(wlt, EMPTY_PASSWORD)
scripthash = neo3_wallet.Account.address_to_script_hash(address) scripthash = neo3_wallet.Account.address_to_script_hash(address)
@ -63,7 +49,7 @@ def withdraw_mainnet_gas(shell: Shell, wlt: str, amount: int):
out = neogo.contract.invokefunction( out = neogo.contract.invokefunction(
wallet=wlt, wallet=wlt,
address=address, address=address,
rpc_endpoint=NEO_MAINNET_ENDPOINT, rpc_endpoint=main_chain.get_endpoint(),
scripthash=NEOFS_CONTRACT, scripthash=NEOFS_CONTRACT,
method="withdraw", method="withdraw",
arguments=f"{scripthash} int:{amount}", arguments=f"{scripthash} int:{amount}",
@ -79,7 +65,7 @@ def withdraw_mainnet_gas(shell: Shell, wlt: str, amount: int):
raise AssertionError(f"TX {tx} hasn't been processed") raise AssertionError(f"TX {tx} hasn't been processed")
def transaction_accepted(tx_id: str): def transaction_accepted(main_chain: MainChain, tx_id: str):
""" """
This function returns True in case of accepted TX. This function returns True in case of accepted TX.
Args: Args:
@ -91,7 +77,7 @@ def transaction_accepted(tx_id: str):
try: try:
for _ in range(0, TX_PERSIST_TIMEOUT): for _ in range(0, TX_PERSIST_TIMEOUT):
time.sleep(1) time.sleep(1)
resp = mainnet_rpc_client.get_transaction_height(tx_id) resp = main_chain.rpc_client.get_transaction_height(tx_id)
if resp is not None: if resp is not None:
logger.info(f"TX is accepted in block: {resp}") logger.info(f"TX is accepted in block: {resp}")
return True return True
@ -102,7 +88,7 @@ def transaction_accepted(tx_id: str):
@allure.step("Get NeoFS Balance") @allure.step("Get NeoFS Balance")
def get_balance(shell: Shell, wallet_path: str, wallet_password: str = ""): def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
""" """
This function returns NeoFS balance for given wallet. This function returns NeoFS balance for given wallet.
""" """
@ -111,8 +97,8 @@ def get_balance(shell: Shell, wallet_path: str, wallet_password: str = ""):
acc = wallet.accounts[-1] acc = wallet.accounts[-1]
payload = [{"type": "Hash160", "value": str(acc.script_hash)}] payload = [{"type": "Hash160", "value": str(acc.script_hash)}]
try: try:
resp = morph_rpc_client.invoke_function( resp = morph_chain.rpc_client.invoke_function(
get_contract_hash("balance.neofs", shell=shell), "balanceOf", payload get_contract_hash(morph_chain, "balance.neofs", shell=shell), "balanceOf", payload
) )
logger.info(f"Got response \n{resp}") logger.info(f"Got response \n{resp}")
value = int(resp["stack"][0]["value"]) value = int(resp["stack"][0]["value"])
@ -126,9 +112,10 @@ def get_balance(shell: Shell, wallet_path: str, wallet_password: str = ""):
def transfer_gas( def transfer_gas(
shell: Shell, shell: Shell,
amount: int, amount: int,
wallet_from_path: str = MAINNET_WALLET_PATH, main_chain: MainChain,
wallet_from_password: str = MAINNET_WALLET_PASS, wallet_from_path: Optional[str] = None,
address_from: str = MAINNET_SINGLE_ADDR, wallet_from_password: Optional[str] = None,
address_from: Optional[str] = None,
address_to: Optional[str] = None, address_to: Optional[str] = None,
wallet_to_path: Optional[str] = None, wallet_to_path: Optional[str] = None,
wallet_to_password: Optional[str] = None, wallet_to_password: Optional[str] = None,
@ -148,11 +135,20 @@ def transfer_gas(
address_to: The address of the wallet to transfer assets to. address_to: The address of the wallet to transfer assets to.
amount: Amount of gas to transfer. amount: Amount of gas to transfer.
""" """
wallet_from_path = wallet_from_path or main_chain.get_wallet_path()
wallet_from_password = (
wallet_from_password
if wallet_from_password is not None
else main_chain.get_wallet_password()
)
address_from = address_from or get_last_address_from_wallet(
wallet_from_path, wallet_from_password
)
address_to = address_to or get_last_address_from_wallet(wallet_to_path, wallet_to_password) address_to = address_to or get_last_address_from_wallet(wallet_to_path, wallet_to_password)
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.nep17.transfer( out = neogo.nep17.transfer(
rpc_endpoint=NEO_MAINNET_ENDPOINT, rpc_endpoint=main_chain.get_endpoint(),
wallet=wallet_from_path, wallet=wallet_from_path,
wallet_password=wallet_from_password, wallet_password=wallet_from_password,
amount=amount, amount=amount,
@ -164,13 +160,19 @@ def transfer_gas(
txid = out.stdout.strip().split("\n")[-1] txid = out.stdout.strip().split("\n")[-1]
if len(txid) != 64: if len(txid) != 64:
raise Exception("Got no TXID after run the command") raise Exception("Got no TXID after run the command")
if not transaction_accepted(txid): if not transaction_accepted(main_chain, txid):
raise AssertionError(f"TX {txid} hasn't been processed") raise AssertionError(f"TX {txid} hasn't been processed")
time.sleep(parse_time(MAINNET_BLOCK_TIME)) time.sleep(parse_time(MAINNET_BLOCK_TIME))
@allure.step("NeoFS Deposit") @allure.step("NeoFS Deposit")
def deposit_gas(shell: Shell, amount: int, wallet_from_path: str, wallet_from_password: str): def deposit_gas(
shell: Shell,
main_chain: MainChain,
amount: int,
wallet_from_path: str,
wallet_from_password: str,
):
""" """
Transferring GAS from given wallet to NeoFS contract address. Transferring GAS from given wallet to NeoFS contract address.
""" """
@ -182,6 +184,7 @@ def deposit_gas(shell: Shell, amount: int, wallet_from_path: str, wallet_from_pa
) )
transfer_gas( transfer_gas(
shell=shell, shell=shell,
main_chain=main_chain,
amount=amount, amount=amount,
wallet_from_path=wallet_from_path, wallet_from_path=wallet_from_path,
wallet_from_password=wallet_from_password, wallet_from_password=wallet_from_password,
@ -191,8 +194,8 @@ def deposit_gas(shell: Shell, amount: int, wallet_from_path: str, wallet_from_pa
@allure.step("Get Mainnet Balance") @allure.step("Get Mainnet Balance")
def get_mainnet_balance(address: str): def get_mainnet_balance(main_chain: MainChain, address: str):
resp = mainnet_rpc_client.get_nep17_balances(address=address) resp = main_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}") logger.info(f"Got getnep17balances response: {resp}")
for balance in resp["balance"]: for balance in resp["balance"]:
if balance["assethash"] == GAS_HASH: if balance["assethash"] == GAS_HASH:
@ -201,8 +204,8 @@ def get_mainnet_balance(address: str):
@allure.step("Get Sidechain Balance") @allure.step("Get Sidechain Balance")
def get_sidechain_balance(address: str): def get_sidechain_balance(morph_chain: MorphChain, address: str):
resp = morph_rpc_client.get_nep17_balances(address=address) resp = morph_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}") logger.info(f"Got getnep17balances response: {resp}")
for balance in resp["balance"]: for balance in resp["balance"]:
if balance["assethash"] == GAS_HASH: if balance["assethash"] == GAS_HASH:

View file

@ -6,7 +6,8 @@ import logging
from typing import Optional from typing import Optional
import allure import allure
from common import COMPLEX_OBJ_SIZE, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, SIMPLE_OBJ_SIZE, WALLET_CONFIG from cluster import Cluster
from common import COMPLEX_OBJ_SIZE, NEOFS_CLI_EXEC, SIMPLE_OBJ_SIZE, WALLET_CONFIG
from complex_object_actions import get_link_object from complex_object_actions import get_link_object
from neofs_testlib.cli import NeofsCli from neofs_testlib.cli import NeofsCli
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
@ -18,6 +19,7 @@ logger = logging.getLogger("NeoLogger")
@allure.step("Put Storagegroup") @allure.step("Put Storagegroup")
def put_storagegroup( def put_storagegroup(
shell: Shell, shell: Shell,
endpoint: str,
wallet: str, wallet: str,
cid: str, cid: str,
objects: list, objects: list,
@ -47,7 +49,7 @@ def put_storagegroup(
lifetime=lifetime, lifetime=lifetime,
members=objects, members=objects,
bearer=bearer, bearer=bearer,
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=endpoint,
) )
gid = result.stdout.split("\n")[1].split(": ")[1] gid = result.stdout.split("\n")[1].split(": ")[1]
return gid return gid
@ -56,6 +58,7 @@ def put_storagegroup(
@allure.step("List Storagegroup") @allure.step("List Storagegroup")
def list_storagegroup( def list_storagegroup(
shell: Shell, shell: Shell,
endpoint: str,
wallet: str, wallet: str,
cid: str, cid: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
@ -78,7 +81,7 @@ def list_storagegroup(
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
bearer=bearer, bearer=bearer,
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=endpoint,
) )
# throwing off the first string of output # throwing off the first string of output
found_objects = result.stdout.split("\n")[1:] found_objects = result.stdout.split("\n")[1:]
@ -88,6 +91,7 @@ def list_storagegroup(
@allure.step("Get Storagegroup") @allure.step("Get Storagegroup")
def get_storagegroup( def get_storagegroup(
shell: Shell, shell: Shell,
endpoint: str,
wallet: str, wallet: str,
cid: str, cid: str,
gid: str, gid: str,
@ -112,7 +116,7 @@ def get_storagegroup(
cid=cid, cid=cid,
bearer=bearer, bearer=bearer,
id=gid, id=gid,
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=endpoint,
) )
# TODO: temporary solution for parsing output. Needs to be replaced with # TODO: temporary solution for parsing output. Needs to be replaced with
@ -136,6 +140,7 @@ def get_storagegroup(
@allure.step("Delete Storagegroup") @allure.step("Delete Storagegroup")
def delete_storagegroup( def delete_storagegroup(
shell: Shell, shell: Shell,
endpoint: str,
wallet: str, wallet: str,
cid: str, cid: str,
gid: str, gid: str,
@ -160,7 +165,7 @@ def delete_storagegroup(
cid=cid, cid=cid,
bearer=bearer, bearer=bearer,
id=gid, id=gid,
rpc_endpoint=NEOFS_ENDPOINT, rpc_endpoint=endpoint,
) )
tombstone_id = result.stdout.strip().split("\n")[1].split(": ")[1] tombstone_id = result.stdout.strip().split("\n")[1].split(": ")[1]
return tombstone_id return tombstone_id
@ -169,6 +174,7 @@ def delete_storagegroup(
@allure.step("Verify list operation over Storagegroup") @allure.step("Verify list operation over Storagegroup")
def verify_list_storage_group( def verify_list_storage_group(
shell: Shell, shell: Shell,
endpoint: str,
wallet: str, wallet: str,
cid: str, cid: str,
gid: str, gid: str,
@ -176,7 +182,12 @@ def verify_list_storage_group(
wallet_config: str = WALLET_CONFIG, wallet_config: str = WALLET_CONFIG,
): ):
storage_groups = list_storagegroup( storage_groups = list_storagegroup(
shell=shell, wallet=wallet, cid=cid, bearer=bearer, wallet_config=wallet_config shell=shell,
endpoint=endpoint,
wallet=wallet,
cid=cid,
bearer=bearer,
wallet_config=wallet_config,
) )
assert gid in storage_groups assert gid in storage_groups
@ -184,6 +195,7 @@ def verify_list_storage_group(
@allure.step("Verify get operation over Storagegroup") @allure.step("Verify get operation over Storagegroup")
def verify_get_storage_group( def verify_get_storage_group(
shell: Shell, shell: Shell,
cluster: Cluster,
wallet: str, wallet: str,
cid: str, cid: str,
gid: str, gid: str,
@ -193,16 +205,24 @@ def verify_get_storage_group(
wallet_config: str = WALLET_CONFIG, wallet_config: str = WALLET_CONFIG,
): ):
obj_parts = [] obj_parts = []
endpoint = cluster.default_rpc_endpoint
if object_size == COMPLEX_OBJ_SIZE: if object_size == COMPLEX_OBJ_SIZE:
for obj in obj_list: for obj in obj_list:
link_oid = get_link_object( link_oid = get_link_object(
wallet, cid, obj, shell=shell, bearer=bearer, wallet_config=wallet_config wallet,
cid,
obj,
shell=shell,
nodes=cluster.storage_nodes,
bearer=bearer,
wallet_config=wallet_config,
) )
obj_head = head_object( obj_head = head_object(
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
oid=link_oid, oid=link_oid,
shell=shell, shell=shell,
endpoint=endpoint,
is_raw=True, is_raw=True,
bearer=bearer, bearer=bearer,
wallet_config=wallet_config, wallet_config=wallet_config,
@ -212,6 +232,7 @@ def verify_get_storage_group(
obj_num = len(obj_list) obj_num = len(obj_list)
storagegroup_data = get_storagegroup( storagegroup_data = get_storagegroup(
shell=shell, shell=shell,
endpoint=endpoint,
wallet=wallet, wallet=wallet,
cid=cid, cid=cid,
gid=gid, gid=gid,

View file

@ -6,12 +6,12 @@
""" """
import logging import logging
from typing import List, Optional from typing import List
import allure import allure
import complex_object_actions import complex_object_actions
import neofs_verbs import neofs_verbs
from common import NEOFS_NETMAP from cluster import StorageNode
from grpc_responses import OBJECT_NOT_FOUND, error_matches_status from grpc_responses import OBJECT_NOT_FOUND, error_matches_status
from neofs_testlib.shell import Shell from neofs_testlib.shell import Shell
@ -19,7 +19,9 @@ logger = logging.getLogger("NeoLogger")
@allure.step("Get Object Copies") @allure.step("Get Object Copies")
def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell) -> int: def get_object_copies(
complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
The function performs requests to all nodes of the container and The function performs requests to all nodes of the container and
finds out if they store a copy of the object. The procedure is finds out if they store a copy of the object. The procedure is
@ -37,14 +39,16 @@ def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: S
(int): the number of object copies in the container (int): the number of object copies in the container
""" """
return ( return (
get_simple_object_copies(wallet, cid, oid, shell) get_simple_object_copies(wallet, cid, oid, shell, nodes)
if complexity == "Simple" if complexity == "Simple"
else get_complex_object_copies(wallet, cid, oid, shell) else get_complex_object_copies(wallet, cid, oid, shell, nodes)
) )
@allure.step("Get Simple Object Copies") @allure.step("Get Simple Object Copies")
def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> int: def get_simple_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a simple object copies, only direct To figure out the number of a simple object copies, only direct
HEAD requests should be made to the every node of the container. HEAD requests should be made to the every node of the container.
@ -55,14 +59,15 @@ def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> i
cid (str): ID of the container cid (str): ID of the container
oid (str): ID of the Object oid (str): ID of the Object
shell: executor for cli command shell: executor for cli command
nodes: nodes to search on
Returns: Returns:
(int): the number of object copies in the container (int): the number of object copies in the container
""" """
copies = 0 copies = 0
for node in NEOFS_NETMAP: for node in nodes:
try: try:
response = neofs_verbs.head_object( response = neofs_verbs.head_object(
wallet, cid, oid, shell=shell, endpoint=node, is_direct=True wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
) )
if response: if response:
logger.info(f"Found object {oid} on node {node}") logger.info(f"Found object {oid} on node {node}")
@ -74,7 +79,9 @@ def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> i
@allure.step("Get Complex Object Copies") @allure.step("Get Complex Object Copies")
def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell) -> int: def get_complex_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a complex object copies, we firstly To figure out the number of a complex object copies, we firstly
need to retrieve its Last object. We consider that the number of need to retrieve its Last object. We consider that the number of
@ -90,37 +97,40 @@ def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell) ->
Returns: Returns:
(int): the number of object copies in the container (int): the number of object copies in the container
""" """
last_oid = complex_object_actions.get_last_object(wallet, cid, oid, shell) last_oid = complex_object_actions.get_last_object(wallet, cid, oid, shell, nodes)
assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes" assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes"
return get_simple_object_copies(wallet, cid, last_oid, shell) return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
@allure.step("Get Nodes With Object") @allure.step("Get Nodes With Object")
def get_nodes_with_object( def get_nodes_with_object(
wallet: str, cid: str, oid: str, shell: Shell, skip_nodes: Optional[list[str]] = None cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[str]: ) -> list[StorageNode]:
""" """
The function returns list of nodes which store The function returns list of nodes which store
the given object. the given object.
Args: Args:
wallet (str): the path to the wallet on whose behalf
we request the nodes
cid (str): ID of the container which store the object cid (str): ID of the container which store the object
oid (str): object ID oid (str): object ID
shell: executor for cli command shell: executor for cli command
skip_nodes (list): list of nodes that should be excluded from check nodes: nodes to find on
Returns: Returns:
(list): nodes which store the object (list): nodes which store the object
""" """
nodes_to_search = NEOFS_NETMAP
if skip_nodes:
nodes_to_search = [node for node in NEOFS_NETMAP if node not in skip_nodes]
nodes_list = [] nodes_list = []
for node in nodes_to_search: for node in nodes:
wallet = node.get_wallet_path()
wallet_config = node.get_wallet_config_path()
try: try:
res = neofs_verbs.head_object( res = neofs_verbs.head_object(
wallet, cid, oid, shell=shell, endpoint=node, is_direct=True wallet,
cid,
oid,
shell=shell,
endpoint=node.get_rpc_endpoint(),
is_direct=True,
wallet_config=wallet_config,
) )
if res is not None: if res is not None:
logger.info(f"Found object {oid} on node {node}") logger.info(f"Found object {oid} on node {node}")
@ -132,7 +142,9 @@ def get_nodes_with_object(
@allure.step("Get Nodes Without Object") @allure.step("Get Nodes Without Object")
def get_nodes_without_object(wallet: str, cid: str, oid: str, shell: Shell) -> List[str]: def get_nodes_without_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
""" """
The function returns list of nodes which do not store The function returns list of nodes which do not store
the given object. the given object.
@ -146,10 +158,10 @@ def get_nodes_without_object(wallet: str, cid: str, oid: str, shell: Shell) -> L
(list): nodes which do not store the object (list): nodes which do not store the object
""" """
nodes_list = [] nodes_list = []
for node in NEOFS_NETMAP: for node in nodes:
try: try:
res = neofs_verbs.head_object( res = neofs_verbs.head_object(
wallet, cid, oid, shell=shell, endpoint=node, is_direct=True wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
) )
if res is None: if res is None:
nodes_list.append(node) nodes_list.append(node)

View file

@ -10,8 +10,10 @@ logger = logging.getLogger("NeoLogger")
@allure.step("Verify Head Tombstone") @allure.step("Verify Head Tombstone")
def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell): def verify_head_tombstone(
header = head_object(wallet_path, cid, oid_ts, shell=shell)["header"] wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str
):
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
logger.info(f"Header Session OIDs is {s_oid}") logger.info(f"Header Session OIDs is {s_oid}")

View file

@ -17,13 +17,6 @@ NEOFS_CONTRACT_CACHE_TIMEOUT = os.getenv("NEOFS_CONTRACT_CACHE_TIMEOUT", "30s")
# of 1min plus 15 seconds for GC pass itself) # of 1min plus 15 seconds for GC pass itself)
STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s") STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s")
# TODO: we should use hosting instead of these endpoints
NEOFS_ENDPOINT = os.getenv("NEOFS_ENDPOINT", "s01.neofs.devenv:8080")
NEO_MAINNET_ENDPOINT = os.getenv("NEO_MAINNET_ENDPOINT", "http://main-chain.neofs.devenv:30333")
MORPH_ENDPOINT = os.getenv("MORPH_ENDPOINT", "http://morph-chain.neofs.devenv:30333")
HTTP_GATE = os.getenv("HTTP_GATE", "http://http.neofs.devenv")
S3_GATE = os.getenv("S3_GATE", "https://s3.neofs.devenv:8080")
GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf") GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf")
NEOFS_CONTRACT = os.getenv("NEOFS_IR_CONTRACTS_NEOFS") NEOFS_CONTRACT = os.getenv("NEOFS_IR_CONTRACTS_NEOFS")
@ -43,81 +36,12 @@ BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 10)
BACKGROUND_OBJ_SIZE = os.getenv("BACKGROUND_OBJ_SIZE", 1024) BACKGROUND_OBJ_SIZE = os.getenv("BACKGROUND_OBJ_SIZE", 1024)
BACKGROUND_LOAD_MAX_TIME = os.getenv("BACKGROUND_LOAD_MAX_TIME", 600) BACKGROUND_LOAD_MAX_TIME = os.getenv("BACKGROUND_LOAD_MAX_TIME", 600)
# Configuration of storage nodes
# TODO: we should use hosting instead of all these variables
STORAGE_RPC_ENDPOINT_1 = os.getenv("STORAGE_RPC_ENDPOINT_1", "s01.neofs.devenv:8080")
STORAGE_RPC_ENDPOINT_2 = os.getenv("STORAGE_RPC_ENDPOINT_2", "s02.neofs.devenv:8080")
STORAGE_RPC_ENDPOINT_3 = os.getenv("STORAGE_RPC_ENDPOINT_3", "s03.neofs.devenv:8080")
STORAGE_RPC_ENDPOINT_4 = os.getenv("STORAGE_RPC_ENDPOINT_4", "s04.neofs.devenv:8080")
STORAGE_CONTROL_ENDPOINT_1 = os.getenv("STORAGE_CONTROL_ENDPOINT_1", "s01.neofs.devenv:8081")
STORAGE_CONTROL_ENDPOINT_2 = os.getenv("STORAGE_CONTROL_ENDPOINT_2", "s02.neofs.devenv:8081")
STORAGE_CONTROL_ENDPOINT_3 = os.getenv("STORAGE_CONTROL_ENDPOINT_3", "s03.neofs.devenv:8081")
STORAGE_CONTROL_ENDPOINT_4 = os.getenv("STORAGE_CONTROL_ENDPOINT_4", "s04.neofs.devenv:8081")
STORAGE_WALLET_PATH_1 = os.getenv(
"STORAGE_WALLET_PATH_1", os.path.join(DEVENV_PATH, "services", "storage", "wallet01.json")
)
STORAGE_WALLET_PATH_2 = os.getenv(
"STORAGE_WALLET_PATH_2", os.path.join(DEVENV_PATH, "services", "storage", "wallet02.json")
)
STORAGE_WALLET_PATH_3 = os.getenv(
"STORAGE_WALLET_PATH_3", os.path.join(DEVENV_PATH, "services", "storage", "wallet03.json")
)
STORAGE_WALLET_PATH_4 = os.getenv(
"STORAGE_WALLET_PATH_4", os.path.join(DEVENV_PATH, "services", "storage", "wallet04.json")
)
STORAGE_WALLET_PATH = STORAGE_WALLET_PATH_1
STORAGE_WALLET_PASS = os.getenv("STORAGE_WALLET_PASS", "")
NEOFS_NETMAP_DICT = {
"s01": {
"rpc": STORAGE_RPC_ENDPOINT_1,
"control": STORAGE_CONTROL_ENDPOINT_1,
"wallet_path": STORAGE_WALLET_PATH_1,
"UN-LOCODE": "RU MOW",
},
"s02": {
"rpc": STORAGE_RPC_ENDPOINT_2,
"control": STORAGE_CONTROL_ENDPOINT_2,
"wallet_path": STORAGE_WALLET_PATH_2,
"UN-LOCODE": "RU LED",
},
"s03": {
"rpc": STORAGE_RPC_ENDPOINT_3,
"control": STORAGE_CONTROL_ENDPOINT_3,
"wallet_path": STORAGE_WALLET_PATH_3,
"UN-LOCODE": "SE STO",
},
"s04": {
"rpc": STORAGE_RPC_ENDPOINT_4,
"control": STORAGE_CONTROL_ENDPOINT_4,
"wallet_path": STORAGE_WALLET_PATH_4,
"UN-LOCODE": "FI HEL",
},
}
NEOFS_NETMAP = [node["rpc"] for node in NEOFS_NETMAP_DICT.values()]
# Paths to CLI executables on machine that runs tests # Paths to CLI executables on machine that runs tests
NEOGO_EXECUTABLE = os.getenv("NEOGO_EXECUTABLE", "neo-go") NEOGO_EXECUTABLE = os.getenv("NEOGO_EXECUTABLE", "neo-go")
NEOFS_CLI_EXEC = os.getenv("NEOFS_CLI_EXEC", "neofs-cli") NEOFS_CLI_EXEC = os.getenv("NEOFS_CLI_EXEC", "neofs-cli")
NEOFS_AUTHMATE_EXEC = os.getenv("NEOFS_AUTHMATE_EXEC", "neofs-authmate") NEOFS_AUTHMATE_EXEC = os.getenv("NEOFS_AUTHMATE_EXEC", "neofs-authmate")
NEOFS_ADM_EXEC = os.getenv("NEOFS_ADM_EXEC", "neofs-adm") NEOFS_ADM_EXEC = os.getenv("NEOFS_ADM_EXEC", "neofs-adm")
MAINNET_WALLET_PATH = os.getenv(
"MAINNET_WALLET_PATH", os.path.join(DEVENV_PATH, "services", "chain", "node-wallet.json")
)
MAINNET_SINGLE_ADDR = os.getenv("MAINNET_SINGLE_ADDR", "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP")
MAINNET_WALLET_PASS = os.getenv("MAINNET_WALLET_PASS", "one")
IR_WALLET_PATH = os.getenv("IR_WALLET_PATH", os.path.join(DEVENV_PATH, "services", "ir", "az.json"))
IR_WALLET_PASS = os.getenv("IR_WALLET_PASS", "one")
S3_GATE_WALLET_PATH = os.getenv(
"S3_GATE_WALLET_PATH", os.path.join(DEVENV_PATH, "services", "s3_gate", "wallet.json")
)
S3_GATE_WALLET_PASS = os.getenv("S3_GATE_WALLET_PASS", "s3")
# Config for neofs-adm utility. Optional if tests are running against devenv # Config for neofs-adm utility. Optional if tests are running against devenv
NEOFS_ADM_CONFIG_PATH = os.getenv("NEOFS_ADM_CONFIG_PATH") NEOFS_ADM_CONFIG_PATH = os.getenv("NEOFS_ADM_CONFIG_PATH")
@ -134,15 +58,3 @@ S3_GATE_SERVICE_NAME_REGEX = r"s3-gate\d\d"
WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml") WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml")
with open(WALLET_CONFIG, "w") as file: with open(WALLET_CONFIG, "w") as file:
yaml.dump({"password": WALLET_PASS}, file) yaml.dump({"password": WALLET_PASS}, file)
STORAGE_WALLET_CONFIG = os.path.join(os.getcwd(), "storage_wallet_config.yml")
with open(STORAGE_WALLET_CONFIG, "w") as file:
yaml.dump({"password": STORAGE_WALLET_PASS}, file)
MAINNET_WALLET_CONFIG = os.path.join(os.getcwd(), "mainnet_wallet_config.yml")
with open(MAINNET_WALLET_CONFIG, "w") as file:
yaml.dump({"password": MAINNET_WALLET_PASS}, file)
IR_WALLET_CONFIG = os.path.join(os.getcwd(), "ir_wallet_config.yml")
with open(IR_WALLET_CONFIG, "w") as file:
yaml.dump({"password": IR_WALLET_PASS}, file)