Compare commits

...

25 commits

Author SHA1 Message Date
48f75759f5 [#298] add morph rule chain
Some checks failed
DCO action / DCO (pull_request) Has been cancelled
2024-10-21 23:50:15 +03:00
31abf77a19 [#292] get namespace endpoint 2024-09-18 12:30:04 +00:00
967cae1092 [#268] add no rule found object and morph chain
Some checks failed
DCO action / DCO (pull_request) Has been cancelled
2024-09-11 19:41:50 +03:00
4989356ed0 [#273] Fix get contracts method
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-07-29 11:14:11 +03:00
ceda40c11d [#262] Add error pattern no rule 2024-07-17 21:08:53 +00:00
a3232e9125 [#260] add tests for preupgrade
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-07-15 15:07:27 +03:00
c40b637768 [#255] add filter priority to get_filtered_logs method
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-06-29 12:45:16 +03:00
1880f96277 [#249] add metrics methods
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-06-21 14:14:59 +03:00
c1e5dd1007 [#246] Use TestFiles which automatically deletes itself
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-06-18 13:38:26 +03:00
f4d71b664d [#244] Update versions check
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-06-07 17:06:28 +03:00
da1a4d0099 [#240] write cache metrics 2024-06-06 14:23:56 +00:00
3e36defb90 [#242] New error patterns
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-06-06 13:04:19 +03:00
6810765d46 [#237] Update S3 acl verify method
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-06-05 14:49:32 +03:00
2cffff3ffe [#235] grpc metrics
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-05-31 09:42:15 +03:00
d9f4e88f94 [#232]Change provide methods 2024-05-30 14:54:45 +00:00
deb2f12bec [#228] metrics for object
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-05-28 11:34:45 +03:00
f236c1b083 Added delete bucket policy method to s3 client
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-05-22 11:12:20 +03:00
cc13a43bec [#227] Restore invalid_obj check
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-05-16 09:58:04 +00:00
a74d1bff4f [#220] add container metrics 2024-05-16 08:18:23 +00:00
547f6106ec [#222] Added new control command CLI
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-05-14 16:16:42 +03:00
c2aa41e5dc [#217] Add parameter max_total_size_gb 2024-05-06 08:16:59 +00:00
8e446ccb96 [#219] Add ns attribute for container create
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-05-06 08:16:42 +00:00
9c9fb7878a [#215] Removed x10 wait in delete bucket function
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-04-24 15:05:14 +03:00
3a799afdcf [#211] Return response in complete_multipart_upload function
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
2024-04-23 23:55:12 +03:00
b610e04a7b [#208] Add await for search func
Some checks reported warnings
DCO action / DCO (pull_request) Has been cancelled
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-04-17 11:06:32 +03:00
29 changed files with 776 additions and 444 deletions

View file

@ -69,9 +69,7 @@ class FrostfsAdmMorph(CliCommand):
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def set_config(
self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
) -> CommandResult:
def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult:
"""Add/update global config value in the FrostFS network.
Args:
@ -110,7 +108,7 @@ class FrostfsAdmMorph(CliCommand):
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult:
"""Dump deployed contract hashes.
Args:
@ -124,9 +122,7 @@ class FrostfsAdmMorph(CliCommand):
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def force_new_epoch(
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
) -> CommandResult:
def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult:
"""Create new FrostFS epoch event in the side chain.
Args:
@ -344,9 +340,120 @@ class FrostfsAdmMorph(CliCommand):
return self._execute(
f"morph remove-nodes {' '.join(node_netmap_keys)}",
**{
param: param_value
for param, param_value in locals().items()
if param not in ["self", "node_netmap_keys"]
},
**{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]},
)
def add_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape add-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape get-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
target_type: str,
target_name: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape list-rule-chains",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape rm-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -9,6 +9,8 @@ class FrostfsCliContainer(CliCommand):
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
@ -45,6 +47,8 @@ class FrostfsCliContainer(CliCommand):
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
nns_zone: Container nns zone attribute.
nns_name: Container nns name attribute.
Returns:
Command's result.

View file

@ -69,7 +69,7 @@ class FrostfsCliControl(CliCommand):
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
objects: List of object addresses to be removed in string format
objects: List of object addresses to be removed in string format
timeout: Timeout for an operation (default 15s)
Returns:
@ -78,4 +78,155 @@ class FrostfsCliControl(CliCommand):
return self._execute(
"control drop-objects",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
)
def add_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control add-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address string Address of wallet account
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control get-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
endpoint: str,
target_name: str,
target_type: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-rules",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_targets(
self,
endpoint: str,
chain_name: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-name: Chain name(ingress|s3)
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-targets",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control remove-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -27,3 +27,27 @@ class FrostfsCliTree(CliCommand):
"tree healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
cid: str,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Get Tree List
Args:
cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
timeout: duration Timeout for the operation (default 15 s)
Returns:
Command's result.
"""
return self._execute(
"tree list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -26,7 +26,7 @@ class S3CredentialsProvider(ABC):
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials:
raise NotImplementedError("Directly called abstract class?")
@ -35,7 +35,7 @@ class GrpcCredentialsProvider(ABC):
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo:
raise NotImplementedError("Directly called abstract class?")

View file

@ -47,6 +47,14 @@ class BasicHealthcheck(Healthcheck):
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}")
def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = {
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
def services_healthcheck(self, cluster_node: ClusterNode):
svcs_to_check = cluster_node.services

View file

@ -19,3 +19,7 @@ class Healthcheck(ABC):
@abstractmethod
def services_healthcheck(self, cluster_node: ClusterNode):
"""Perform service status check on target cluster node"""
@abstractmethod
def tree_healthcheck(self, cluster_node: ClusterNode):
"""Perform tree healthcheck on target cluster node"""

View file

@ -240,6 +240,7 @@ class DockerHost(Host):
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None
) -> str:
client = self._get_docker_client()
filtered_logs = ""

View file

@ -297,6 +297,7 @@ class Host(ABC):
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None
) -> str:
"""Get logs from host filtered by regex.
@ -305,6 +306,8 @@ class Host(ABC):
since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC.
unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
Returns:
Found entries as str if any found.

View file

@ -86,7 +86,7 @@ class SummarizedStats:
target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold
target.total_bytes = operation.total_bytes
target.total_bytes += operation.total_bytes
if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations

View file

@ -119,6 +119,8 @@ class NodesSelectionStrategy(Enum):
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
# Select ONE random node except under test (useful for failover).
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
# Select node under test
NODE_UNDER_TEST = "NODE_UNDER_TEST"
class EndpointSelectionStrategy(Enum):
@ -233,6 +235,8 @@ class LoadParams:
)
# Percentage of filling of all data disks on all nodes
fill_percent: Optional[float] = None
# if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB")
# if set, the payload is generated on the fly and is not read into memory fully.
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
# Output format

View file

@ -57,6 +57,8 @@ class LoadVerifier:
invalid_objects = verify_metrics.read.failed_iterations
total_left_objects = load_metrics.write.success_iterations - delete_success
if invalid_objects > 0:
issues.append(f"There were {invalid_objects} verification fails (hash mismatch).")
# Due to interruptions we may see total verified objects to be less than written on writers count
if abs(total_left_objects - verified_objects) > writers:
issues.append(

View file

@ -23,6 +23,10 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
S3_MALFORMED_XML_REQUEST = (
"The XML you provided was not well-formed or did not validate against our published schema."
)
S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs"
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema."
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"

View file

@ -0,0 +1,9 @@
ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"}
ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"}
CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"}
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
PRIVATE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT]
PUBLIC_READ_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_READ_GRANT]
PUBLIC_READ_WRITE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT]

View file

@ -14,6 +14,7 @@ from frostfs_testlib.shell.local_shell import LocalShell
# TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import _configure_aws_cli
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
command_options = CommandOptions(timeout=480)
@ -153,8 +154,7 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> list:
cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
@ -172,10 +172,7 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("List objects S3")
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
@ -319,18 +316,18 @@ class AwsCliClient(S3ClientWrapper):
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> Union[dict, str]:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
) -> dict | TestFile:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} "
f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
if object_range:
cmd += f" --range bytes={object_range[0]}-{object_range[1]}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response if full_output else file_path
return response if full_output else test_file
@reporter.step("Get object ACL")
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
@ -489,6 +486,16 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output)
return response.get("Policy")
@reporter.step("Delete bucket policy")
def delete_bucket_policy(self, bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Put bucket policy")
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
# Leaving it as is was in test repo. Double dumps to escape resulting string
@ -573,7 +580,7 @@ class AwsCliClient(S3ClientWrapper):
self.local_shell.exec(cmd)
@reporter.step("Put object tagging")
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None:
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
version = f" --version-id {version_id}" if version_id else ""
@ -612,8 +619,7 @@ class AwsCliClient(S3ClientWrapper):
metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
)
if metadata:
cmd += " --metadata"
@ -729,7 +735,10 @@ class AwsCliClient(S3ClientWrapper):
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
)
self.local_shell.exec(cmd)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Put object lock configuration")
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
@ -766,9 +775,7 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("Adds the specified user to the specified group")
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -776,12 +783,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Attaches the specified managed policy to the specified IAM group")
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -790,12 +794,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Attaches the specified managed policy to the specified user")
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -804,12 +805,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
def iam_create_access_key(self, user_name: Optional[str] = None) -> dict:
cmd = (
f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
if user_name:
@ -824,12 +822,9 @@ class AwsCliClient(S3ClientWrapper):
return access_key_id, secret_access_key
@reporter.step("Creates a new group")
def iam_create_group(self, group_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -840,7 +835,6 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Creates a new managed policy for your AWS account")
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
cmd = (
@ -858,12 +852,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Creates a new IAM user for your AWS account")
def iam_create_user(self, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -874,12 +865,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Deletes the access key pair associated with the specified IAM user")
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
@ -888,12 +876,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Deletes the specified IAM group")
def iam_delete_group(self, group_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -901,12 +886,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -914,12 +896,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Deletes the specified managed policy")
def iam_delete_policy(self, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -927,26 +906,19 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Deletes the specified IAM user")
def iam_delete_user(self, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -954,12 +926,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Removes the specified managed policy from the specified IAM group")
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -968,12 +937,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Removes the specified managed policy from the specified user")
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -982,12 +948,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
def iam_get_group(self, group_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -998,12 +961,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1011,12 +971,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Retrieves information about the specified managed policy")
def iam_get_policy(self, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1027,12 +984,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Retrieves information about the specified version of the specified managed policy")
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
cmd = (
f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1043,12 +997,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Retrieves information about the specified IAM user")
def iam_get_user(self, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1059,12 +1010,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1074,12 +1022,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
def iam_list_access_keys(self, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1087,12 +1032,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
def iam_list_attached_group_policies(self, group_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1102,12 +1044,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
def iam_list_attached_user_policies(self, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1117,12 +1056,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1133,12 +1069,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
def iam_list_group_policies(self, group_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1148,12 +1081,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists the IAM groups")
def iam_list_groups(self) -> dict:
cmd = (
f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1163,12 +1093,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
def iam_list_groups_for_user(self, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1178,27 +1105,21 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists all the managed policies that are available in your AWS account")
def iam_list_policies(self) -> dict:
cmd = (
f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert 'Policies' in response.keys(), f"Expected Policies in response:\n{response}"
assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}"
return response
@reporter.step("Lists information about the versions of the specified managed policy")
def iam_list_policy_versions(self, policy_arn: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1208,12 +1129,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
def iam_list_user_policies(self, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1223,12 +1141,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Lists the IAM users")
def iam_list_users(self) -> dict:
cmd = (
f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
@ -1238,12 +1153,11 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
cmd = (
f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}"
f" --group-name {group_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'"
f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'"
)
if self.profile:
cmd += f" --profile {self.profile}"
@ -1253,12 +1167,11 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
cmd = (
f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}"
f" --user-name {user_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'"
f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'"
)
if self.profile:
cmd += f" --profile {self.profile}"
@ -1269,7 +1182,6 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Removes the specified user from the specified group")
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
cmd = (
@ -1283,12 +1195,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Updates the name and/or the path of the specified IAM group")
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
cmd = (
f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if new_name:
cmd += f" --new-group-name {new_name}"
if new_path:
@ -1301,12 +1210,9 @@ class AwsCliClient(S3ClientWrapper):
return response
@reporter.step("Updates the name and/or the path of the specified IAM user")
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
cmd = (
f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}"
)
cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if new_name:
cmd += f" --new-user-name {new_name}"
if new_path:
@ -1318,5 +1224,3 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output)
return response

View file

@ -16,10 +16,10 @@ from mypy_boto3_s3 import S3Client
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils.cli_utils import log_command_execution
# TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import _configure_aws_cli
from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
@ -80,7 +80,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
verify=False,
)
@reporter.step("Set endpoint IAM to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str):
self.boto3_iam_client = self.session.client(
@ -88,8 +87,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
endpoint_url=iam_endpoint,
verify=False,)
verify=False,
)
def _to_s3_param(self, param: str):
replacement_map = {
@ -135,7 +134,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
s3_bucket = self.boto3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
return bucket
@reporter.step("List buckets S3")
@ -156,7 +155,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
def delete_bucket(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
@reporter.step("Head bucket S3")
@report_error
@ -167,9 +166,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Put bucket versioning status")
@report_error
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
response = self.boto3_client.put_bucket_versioning(
Bucket=bucket, VersioningConfiguration={"Status": status.value}
)
response = self.boto3_client.put_bucket_versioning(Bucket=bucket, VersioningConfiguration={"Status": status.value})
log_command_execution("S3 Set bucket versioning to", response)
@reporter.step("Get bucket versioning status")
@ -217,11 +214,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> None:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
response = self.boto3_client.put_bucket_acl(**params)
log_command_execution("S3 ACL bucket result", response)
@ -246,6 +239,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 get_bucket_policy result", response)
return response.get("Policy")
@reporter.step("Delete bucket policy")
@report_error
def delete_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.delete_bucket_policy(Bucket=bucket)
log_command_execution("S3 delete_bucket_policy result", response)
return response
@reporter.step("Put bucket policy")
@report_error
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
@ -353,11 +353,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Head object S3")
@report_error
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
response = self.boto3_client.head_object(**params)
log_command_execution("S3 Head object result", response)
return response
@ -365,14 +361,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Delete object S3")
@report_error
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
response = self.boto3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step("Delete objects S3")
@ -383,7 +375,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
assert (
"Errors" not in response
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step("Delete object versions S3")
@ -408,9 +400,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers
for object_version in object_versions:
response = self.boto3_client.delete_object(
Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]
)
response = self.boto3_client.delete_object(Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"])
log_command_execution("S3 Delete object result", response)
@reporter.step("Put object ACL")
@ -429,11 +419,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Get object ACL")
@report_error
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
response = self.boto3_client.get_object_acl(**params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
@ -476,8 +462,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> Union[dict, str]:
filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
) -> dict | TestFile:
range_str = None
if object_range:
range_str = f"bytes={object_range[0]}-{object_range[1]}"
@ -490,12 +475,16 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.get_object(**params)
log_command_execution("S3 Get objects result", response)
with open(f"{filename}", "wb") as get_file:
if full_output:
return response
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
with open(test_file, "wb") as file:
chunk = response["Body"].read(1024)
while chunk:
get_file.write(chunk)
file.write(chunk)
chunk = response["Body"].read(1024)
return response if full_output else filename
return test_file
@reporter.step("Create multipart upload S3")
@report_error
@ -566,11 +555,11 @@ class Boto3ClientWrapper(S3ClientWrapper):
@report_error
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
response = self.boto3_client.complete_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}
)
response = self.boto3_client.complete_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts})
log_command_execution("S3 Complete multipart upload", response)
return response
@reporter.step("Put object retention")
@report_error
def put_object_retention(
@ -581,11 +570,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None,
) -> None:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
response = self.boto3_client.put_object_retention(**params)
log_command_execution("S3 Put object retention ", response)
@ -609,7 +594,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Put object tagging")
@report_error
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None:
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id)
@ -618,11 +603,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Get object tagging")
@report_error
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
response = self.boto3_client.get_object_tagging(**params)
log_command_execution("S3 Get object tagging", response)
return response.get("TagSet")
@ -672,7 +653,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
# END OBJECT METHODS #
# IAM METHODS #
# Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.)
@ -681,21 +661,18 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name)
return response
@reporter.step("Attaches the specified managed policy to the specified IAM group")
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Attaches the specified managed policy to the specified user")
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
def iam_create_access_key(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_access_key(UserName=user_name)
@ -707,7 +684,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return access_key_id, secret_access_key
@reporter.step("Creates a new group")
def iam_create_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.create_group(GroupName=group_name)
@ -716,7 +692,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Creates a new managed policy for your AWS account")
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
@ -725,7 +700,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Creates a new IAM user for your AWS account")
def iam_create_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_user(UserName=user_name)
@ -734,57 +708,48 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Deletes the access key pair associated with the specified IAM user")
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name)
return response
@reporter.step("Deletes the specified IAM group")
def iam_delete_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.delete_group(GroupName=group_name)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name)
return response
@reporter.step("Deletes the specified managed policy")
def iam_delete_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn)
return response
@reporter.step("Deletes the specified IAM user")
def iam_delete_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.delete_user(UserName=user_name)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name)
return response
@reporter.step("Removes the specified managed policy from the specified IAM group")
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified managed policy from the specified user")
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
def iam_get_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.get_group(GroupName=group_name)
@ -792,14 +757,12 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name)
return response
@reporter.step("Retrieves information about the specified managed policy")
def iam_get_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn)
@ -808,7 +771,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Retrieves information about the specified version of the specified managed policy")
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id)
@ -817,7 +779,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Retrieves information about the specified IAM user")
def iam_get_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.get_user(UserName=user_name)
@ -826,7 +787,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name)
@ -834,14 +794,12 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
def iam_list_access_keys(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_access_keys(UserName=user_name)
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
def iam_list_attached_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name)
@ -849,7 +807,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
def iam_list_attached_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name)
@ -857,7 +814,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn)
@ -867,7 +823,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
def iam_list_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_group_policies(GroupName=group_name)
@ -875,7 +830,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists the IAM groups")
def iam_list_groups(self) -> dict:
response = self.boto3_iam_client.list_groups()
@ -883,7 +837,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
def iam_list_groups_for_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_groups_for_user(UserName=user_name)
@ -891,7 +844,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists all the managed policies that are available in your AWS account")
def iam_list_policies(self) -> dict:
response = self.boto3_iam_client.list_policies()
@ -899,7 +851,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists information about the versions of the specified managed policy")
def iam_list_policy_versions(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn)
@ -907,7 +858,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
def iam_list_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_user_policies(UserName=user_name)
@ -915,7 +865,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Lists the IAM users")
def iam_list_users(self) -> dict:
response = self.boto3_iam_client.list_users()
@ -923,35 +872,34 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
response = self.boto3_iam_client.put_group_policy(
GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
response = self.boto3_iam_client.put_user_policy(
UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified user from the specified group")
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name)
return response
@reporter.step("Updates the name and/or the path of the specified IAM group")
def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/')
response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/")
return response
@reporter.step("Updates the name and/or the path of the specified IAM user")
def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/')
return response
response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/")
return response

View file

@ -4,6 +4,7 @@ from typing import Literal, Optional, Union
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
from frostfs_testlib.utils.file_utils import TestFile
def _make_objs_dict(key_names):
@ -152,6 +153,10 @@ class S3ClientWrapper(HumanReadableABC):
def get_bucket_policy(self, bucket: str) -> str:
"""Returns the policy of a specified bucket."""
@abstractmethod
def delete_bucket_policy(self, bucket: str) -> str:
"""Deletes the policy of a specified bucket."""
@abstractmethod
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
"""Applies S3 bucket policy to an S3 bucket."""
@ -285,7 +290,7 @@ class S3ClientWrapper(HumanReadableABC):
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> Union[dict, str]:
) -> dict | TestFile:
"""Retrieves objects from S3."""
@abstractmethod
@ -396,153 +401,152 @@ class S3ClientWrapper(HumanReadableABC):
# END OF OBJECT METHODS #
# IAM METHODS #
@abstractmethod
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
'''Adds the specified user to the specified group'''
"""Adds the specified user to the specified group"""
@abstractmethod
def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict:
'''Attaches the specified managed policy to the specified IAM group'''
"""Attaches the specified managed policy to the specified IAM group"""
@abstractmethod
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
'''Attaches the specified managed policy to the specified user'''
"""Attaches the specified managed policy to the specified user"""
@abstractmethod
def iam_create_access_key(self, user_name: str) -> dict:
'''Creates a new AWS secret access key and access key ID for the specified user'''
"""Creates a new AWS secret access key and access key ID for the specified user"""
@abstractmethod
def iam_create_group(self, group_name: str) -> dict:
'''Creates a new group'''
"""Creates a new group"""
@abstractmethod
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
'''Creates a new managed policy for your AWS account'''
"""Creates a new managed policy for your AWS account"""
@abstractmethod
def iam_create_user(self, user_name: str) -> dict:
'''Creates a new IAM user for your AWS account'''
"""Creates a new IAM user for your AWS account"""
@abstractmethod
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
'''Deletes the access key pair associated with the specified IAM user'''
"""Deletes the access key pair associated with the specified IAM user"""
@abstractmethod
def iam_delete_group(self, group_name: str) -> dict:
'''Deletes the specified IAM group'''
"""Deletes the specified IAM group"""
@abstractmethod
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
'''Deletes the specified inline policy that is embedded in the specified IAM group'''
"""Deletes the specified inline policy that is embedded in the specified IAM group"""
@abstractmethod
def iam_delete_policy(self, policy_arn: str) -> dict:
'''Deletes the specified managed policy'''
"""Deletes the specified managed policy"""
@abstractmethod
def iam_delete_user(self, user_name: str) -> dict:
'''Deletes the specified IAM user'''
"""Deletes the specified IAM user"""
@abstractmethod
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
'''Deletes the specified inline policy that is embedded in the specified IAM user'''
"""Deletes the specified inline policy that is embedded in the specified IAM user"""
@abstractmethod
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
'''Removes the specified managed policy from the specified IAM group'''
"""Removes the specified managed policy from the specified IAM group"""
@abstractmethod
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
'''Removes the specified managed policy from the specified user'''
"""Removes the specified managed policy from the specified user"""
@abstractmethod
def iam_get_group(self, group_name: str) -> dict:
'''Returns a list of IAM users that are in the specified IAM group'''
"""Returns a list of IAM users that are in the specified IAM group"""
@abstractmethod
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
'''Retrieves the specified inline policy document that is embedded in the specified IAM group'''
"""Retrieves the specified inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_get_policy(self, policy_arn: str) -> dict:
'''Retrieves information about the specified managed policy'''
"""Retrieves information about the specified managed policy"""
@abstractmethod
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
'''Retrieves information about the specified version of the specified managed policy'''
"""Retrieves information about the specified version of the specified managed policy"""
@abstractmethod
def iam_get_user(self, user_name: str) -> dict:
'''Retrieves information about the specified IAM user'''
"""Retrieves information about the specified IAM user"""
@abstractmethod
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
'''Retrieves the specified inline policy document that is embedded in the specified IAM user'''
"""Retrieves the specified inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_list_access_keys(self, user_name: str) -> dict:
'''Returns information about the access key IDs associated with the specified IAM user'''
"""Returns information about the access key IDs associated with the specified IAM user"""
@abstractmethod
def iam_list_attached_group_policies(self, group_name: str) -> dict:
'''Lists all managed policies that are attached to the specified IAM group'''
"""Lists all managed policies that are attached to the specified IAM group"""
@abstractmethod
def iam_list_attached_user_policies(self, user_name: str) -> dict:
'''Lists all managed policies that are attached to the specified IAM user'''
"""Lists all managed policies that are attached to the specified IAM user"""
@abstractmethod
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
'''Lists all IAM users, groups, and roles that the specified managed policy is attached to'''
"""Lists all IAM users, groups, and roles that the specified managed policy is attached to"""
@abstractmethod
def iam_list_group_policies(self, group_name: str) -> dict:
'''Lists the names of the inline policies that are embedded in the specified IAM group'''
"""Lists the names of the inline policies that are embedded in the specified IAM group"""
@abstractmethod
def iam_list_groups(self) -> dict:
'''Lists the IAM groups'''
"""Lists the IAM groups"""
@abstractmethod
def iam_list_groups_for_user(self, user_name: str) -> dict:
'''Lists the IAM groups that the specified IAM user belongs to'''
"""Lists the IAM groups that the specified IAM user belongs to"""
@abstractmethod
def iam_list_policies(self) -> dict:
'''Lists all the managed policies that are available in your AWS account'''
"""Lists all the managed policies that are available in your AWS account"""
@abstractmethod
def iam_list_policy_versions(self, policy_arn: str) -> dict:
'''Lists information about the versions of the specified managed policy'''
"""Lists information about the versions of the specified managed policy"""
@abstractmethod
def iam_list_user_policies(self, user_name: str) -> dict:
'''Lists the names of the inline policies embedded in the specified IAM user'''
"""Lists the names of the inline policies embedded in the specified IAM user"""
@abstractmethod
def iam_list_users(self) -> dict:
'''Lists the IAM users'''
"""Lists the IAM users"""
@abstractmethod
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
'''Adds or updates an inline policy document that is embedded in the specified IAM group'''
"""Adds or updates an inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
'''Adds or updates an inline policy document that is embedded in the specified IAM user'''
"""Adds or updates an inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
'''Removes the specified user from the specified group'''
"""Removes the specified user from the specified group"""
@abstractmethod
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
'''Updates the name and/or the path of the specified IAM group'''
"""Updates the name and/or the path of the specified IAM group"""
@abstractmethod
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
'''Updates the name and/or the path of the specified IAM user'''
"""Updates the name and/or the path of the specified IAM user"""

View file

@ -13,8 +13,10 @@ from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import wait_for_success
from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
@ -80,7 +82,7 @@ def get_object(
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
) -> TestFile:
"""
GET from FrostFS.
@ -102,14 +104,14 @@ def get_object(
if not write_object:
write_object = str(uuid.uuid4())
file_path = os.path.join(ASSETS_DIR, write_object)
test_file = TestFile(os.path.join(ASSETS_DIR, write_object))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.object.get(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
file=file_path,
file=test_file,
bearer=bearer,
no_progress=no_progress,
xhdr=xhdr,
@ -117,7 +119,7 @@ def get_object(
timeout=timeout,
)
return file_path
return test_file
@reporter.step("Get Range Hash from {endpoint}")
@ -356,7 +358,7 @@ def get_range(
Returns:
(str, bytes) - path to the file with range content and content of this file as bytes
"""
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4())))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.object.range(
@ -364,16 +366,16 @@ def get_range(
cid=cid,
oid=oid,
range=range_cut,
file=range_file_path,
file=test_file,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
with open(range_file_path, "rb") as file:
with open(test_file, "rb") as file:
content = file.read()
return range_file_path, content
return test_file, content
@reporter.step("Lock Object")
@ -695,6 +697,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
}
@wait_for_success()
@reporter.step("Search object nodes")
def get_object_nodes(
cluster: Cluster,

View file

@ -0,0 +1,35 @@
import logging
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
logger = logging.getLogger("NeoLogger")
@reporter.step("Get Tree List")
def get_tree_list(
wallet: WalletInfo,
cid: str,
shell: Shell,
endpoint: str,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None:
"""
A wrapper for `frostfs-cli tree list` call.
Args:
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
This function doesn't return anything.
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout)

View file

@ -12,7 +12,7 @@ import requests
from frostfs_testlib import reporter
from frostfs_testlib.cli import GenericCli
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.local_shell import LocalShell
@ -20,11 +20,10 @@ from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils.file_utils import get_file_hash
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
logger = logging.getLogger("NeoLogger")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
local_shell = LocalShell()
@ -50,9 +49,7 @@ def get_via_http_gate(
else:
request = f"{node.http_gate.get_endpoint()}{request_path}"
resp = requests.get(
request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False
)
resp = requests.get(request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False)
if not resp.ok:
raise Exception(
@ -66,10 +63,10 @@ def get_via_http_gate(
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")
with open(file_path, "wb") as file:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}"))
with open(test_file, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return file_path
return test_file
@reporter.step("Get via Zip HTTP Gate")
@ -95,11 +92,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")
with open(file_path, "wb") as file:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip"))
with open(test_file, "wb") as file:
shutil.copyfileobj(resp.raw, file)
with zipfile.ZipFile(file_path, "r") as zip_ref:
with zipfile.ZipFile(test_file, "r") as zip_ref:
zip_ref.extractall(ASSETS_DIR)
return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@ -129,9 +126,7 @@ def get_via_http_gate_by_attribute(
else:
request = f"{node.http_gate.get_endpoint()}{request_path}"
resp = requests.get(
request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]}
)
resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]})
if not resp.ok:
raise Exception(
@ -145,17 +140,15 @@ def get_via_http_gate_by_attribute(
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")
with open(file_path, "wb") as file:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}"))
with open(test_file, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return file_path
return test_file
# TODO: pass http_hostname as a header
@reporter.step("Upload via HTTP Gate")
def upload_via_http_gate(
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
) -> str:
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str:
"""
This function upload given object through HTTP gate
cid: CID to get object from
@ -248,7 +241,7 @@ def upload_via_http_gate_curl(
@retry(max_attempts=3, sleep_interval=1)
@reporter.step("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str:
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile:
"""
This function gets given object from HTTP gate using curl utility.
cid: CID to get object from
@ -256,12 +249,12 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str:
node: node for request
"""
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}"))
curl = GenericCli("curl", node.host)
curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell)
curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {test_file}", shell=local_shell)
return file_path
return test_file
def _attach_allure_step(request: str, status_code: int, req_type="GET"):

View file

@ -0,0 +1,45 @@
import re
from frostfs_testlib import reporter
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.storage.cluster import ClusterNode
@reporter.step("Check metrics result")
@wait_for_success(interval=10)
def check_metrics_counter(
cluster_nodes: list[ClusterNode],
operator: str = "==",
counter_exp: int = 0,
parse_from_command: bool = False,
**metrics_greps: str,
):
counter_act = 0
for cluster_node in cluster_nodes:
counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps)
assert eval(
f"{counter_act} {operator} {counter_exp}"
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}"
@reporter.step("Get metrics value from node: {node}")
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
try:
command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
if parse_from_command:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps)
else:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout)
except RuntimeError as e:
metrics_counter = 0
return metrics_counter
@reporter.step("Parse metrics count and calc sum of result")
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
if command:
result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout)
else:
result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout)
return sum(map(lambda x: int(float(x)), result))

View file

@ -120,32 +120,28 @@ def assert_object_lock_mode(
).days == retain_period, f"Expected retention period is {retain_period} days"
def assert_s3_acl(acl_grants: list, permitted_users: str):
if permitted_users == "AllUsers":
grantees = {"AllUsers": 0, "CanonicalUser": 0}
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "Group":
uri = acl_grant.get("Grantee", {}).get("URI")
permission = acl_grant.get("Permission")
assert (uri, permission) == (
"http://acs.amazonaws.com/groups/global/AllUsers",
"FULL_CONTROL",
), "All Groups should have FULL_CONTROL"
grantees["AllUsers"] += 1
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL"
grantees["CanonicalUser"] += 1
assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL"
assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL"
def _format_grants_as_strings(grants: list[dict]) -> list:
grantee_format = "{g_type}::{uri}:{permission}"
return set(
[
grantee_format.format(
g_type=grant.get("Grantee", {}).get("Type", ""),
uri=grant.get("Grantee", {}).get("URI", ""),
permission=grant.get("Permission", ""),
)
for grant in grants
]
)
if permitted_users == "CanonicalUser":
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL"
else:
logger.error("FULL_CONTROL is given to All Users")
@reporter.step("Verify ACL permissions")
def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True):
actual_grants = _format_grants_as_strings(actual_acl_grants)
expected_grants = _format_grants_as_strings(expected_acl_grants)
assert expected_grants <= actual_grants, "Permissions mismatch"
if strict:
assert expected_grants == actual_grants, "Extra permissions found, must not be there"
@reporter.step("Delete bucket with all objects")

View file

@ -14,6 +14,7 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, Inner
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry
from frostfs_testlib.storage.dataclasses.metrics import Metrics
class ClusterNode:
@ -24,11 +25,13 @@ class ClusterNode:
class_registry: ServiceRegistry
id: int
host: Host
metrics: Metrics
def __init__(self, host: Host, id: int) -> None:
self.host = host
self.id = id
self.class_registry = get_service_registry()
self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint())
@property
def host_ip(self):

View file

@ -529,3 +529,11 @@ class ClusterStateController:
except Exception as err:
logger.warning(f"Host ping fails with error {err}")
return HostStatus.ONLINE
@reporter.step("Get contract by domain - {domain_name}")
def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str):
frostfs_adm = FrostfsAdm(
shell=cluster_node.host.get_shell(),
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
)
return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout

View file

@ -0,0 +1,16 @@
from dataclasses import dataclass
@dataclass
class Operations:
GET_CONTAINER = "GetContainer"
PUT_CONTAINER = "PutContainer"
DELETE_CONTAINER = "DeleteContainer"
LIST_CONTAINER = "ListContainers"
GET_OBJECT = "GetObject"
DELETE_OBJECT = "DeleteObject"
HASH_OBJECT = "HashObject"
RANGE_OBJECT = "RangeObject"
SEARCH_OBJECT = "SearchObject"
HEAD_OBJECT = "HeadObject"
PUT_OBJECT = "PutObject"

View file

@ -39,6 +39,9 @@ class S3Gate(NodeBase):
def get_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
def get_ns_endpoint(self, ns_name: str) -> str:
return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name)
def get_all_endpoints(self) -> list[str]:
return [
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),

View file

@ -0,0 +1,36 @@
from frostfs_testlib.hosting import Host
from frostfs_testlib.shell.interfaces import CommandResult
class Metrics:
def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.storage = StorageMetrics(host, metrics_endpoint)
class StorageMetrics:
"""
Class represents storage metrics in a cluster
"""
def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.host = host
self.metrics_endpoint = metrics_endpoint
def get_metrics_search_by_greps(self, **greps) -> CommandResult:
"""
Get a metrics, search by: cid, metric_type, shard_id etc.
Args:
greps: dict of grep-command-name and value
for example get_metrics_search_by_greps(command='container_objects_total', cid='123456')
Return:
result of metrics
"""
shell = self.host.get_shell()
additional_greps = " |grep ".join([grep_command for grep_command in greps.values()])
result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}")
return result
def get_all_metrics(self) -> CommandResult:
shell = self.host.get_shell()
result = shell.exec(f"curl -s {self.metrics_endpoint}")
return result

View file

@ -10,7 +10,39 @@ from frostfs_testlib.resources.common import ASSETS_DIR
logger = logging.getLogger("NeoLogger")
def generate_file(size: int) -> str:
class TestFile(os.PathLike):
def __init__(self, path: str):
self.path = path
def __del__(self):
logger.debug(f"Removing file {self.path}")
if os.path.exists(self.path):
os.remove(self.path)
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __fspath__(self):
return self.path
def ensure_directory(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
def ensure_directory_opener(path, flags):
ensure_directory(path)
return os.open(path, flags)
@reporter.step("Generate file with size {size}")
def generate_file(size: int) -> TestFile:
"""Generates a binary file with the specified size in bytes.
Args:
@ -19,19 +51,20 @@ def generate_file(size: int) -> str:
Returns:
The path to the generated file.
"""
file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
with open(file_path, "wb") as file:
test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4())))
with open(test_file, "wb", opener=ensure_directory_opener) as file:
file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {file_path}")
logger.info(f"File with size {size} bytes has been generated: {test_file}")
return file_path
return test_file
@reporter.step("Generate file with content of size {size}")
def generate_file_with_content(
size: int,
file_path: Optional[str] = None,
file_path: Optional[str | TestFile] = None,
content: Optional[str] = None,
) -> str:
) -> TestFile:
"""Creates a new file with specified content.
Args:
@ -48,20 +81,22 @@ def generate_file_with_content(
content = os.urandom(size)
mode = "wb"
test_file = None
if not file_path:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
elif isinstance(file_path, TestFile):
test_file = file_path
else:
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
test_file = TestFile(file_path)
with open(file_path, mode) as file:
with open(test_file, mode, opener=ensure_directory_opener) as file:
file.write(content)
return file_path
return test_file
@reporter.step("Get File Hash")
def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str:
"""Generates hash for the specified file.
Args:
@ -88,7 +123,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in
@reporter.step("Concatenation set of files to one file")
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile:
"""Concatenates several files into a single file.
Args:
@ -98,16 +133,24 @@ def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) ->
Returns:
Path to the resulting file.
"""
test_file = None
if not resulting_file_path:
resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
with open(resulting_file_path, "wb") as f:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
elif isinstance(resulting_file_path, TestFile):
test_file = resulting_file_path
else:
test_file = TestFile(resulting_file_path)
with open(test_file, "wb", opener=ensure_directory_opener) as f:
for file in file_paths:
with open(file, "rb") as part_file:
f.write(part_file.read())
return resulting_file_path
return test_file
def split_file(file_path: str, parts: int) -> list[str]:
@reporter.step("Split file to {parts} parts")
def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]:
"""Splits specified file into several specified number of parts.
Each part is saved under name `{original_file}_part_{i}`.
@ -129,7 +172,7 @@ def split_file(file_path: str, parts: int) -> list[str]:
part_file_paths = []
for content_offset in range(0, content_size + 1, chunk_size):
part_file_name = f"{file_path}_part_{part_id}"
part_file_paths.append(part_file_name)
part_file_paths.append(TestFile(part_file_name))
with open(part_file_name, "wb") as out_file:
out_file.write(content[content_offset : content_offset + chunk_size])
part_id += 1
@ -137,9 +180,8 @@ def split_file(file_path: str, parts: int) -> list[str]:
return part_file_paths
def get_file_content(
file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None
) -> Any:
@reporter.step("Get file content")
def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any:
"""Returns content of specified file.
Args:

View file

@ -1,5 +1,6 @@
import logging
import re
from functools import lru_cache
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
@ -36,78 +37,52 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
return versions
@reporter.step("Collect binaries versions from host")
def parallel_binary_verions(host: Host) -> dict[str, str]:
versions_by_host = {}
binary_path_by_name = {} # Maps binary name to executable path
for service_config in host.config.services:
exec_path = service_config.attributes.get("exec_path")
requires_check = service_config.attributes.get("requires_version_check", "true")
if exec_path:
binary_path_by_name[service_config.name] = {
"exec_path": exec_path,
"check": requires_check.lower() == "true",
binary_path_by_name = {
**{
svc.name[:-3]: {
"exec_path": svc.attributes.get("exec_path"),
"param": svc.attributes.get("custom_version_parameter", "--version"),
}
for cli_config in host.config.clis:
requires_check = cli_config.attributes.get("requires_version_check", "true")
binary_path_by_name[cli_config.name] = {
"exec_path": cli_config.exec_path,
"check": requires_check.lower() == "true",
}
for svc in host.config.services
if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true"
},
**{
cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")}
for cli in host.config.clis
if cli.attributes.get("requires_version_check", "true") == "true"
},
}
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary in binary_path_by_name.items():
binary_path = binary["exec_path"]
try:
binary_path = binary["exec_path"]
result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]}
result = shell.exec(f"{binary_path} {binary['param']}")
version = _parse_version(result.stdout) or _parse_version(result.stderr) or "Unknown"
versions_at_host[binary_name] = version
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]}
versions_at_host[binary_name] = "Unknown"
versions_by_host[host.config.address] = versions_at_host
return versions_by_host
@reporter.step("Get remote binaries versions")
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
versions_by_host = {}
future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts)
@lru_cache
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]:
versions_by_host: dict[str, dict[str, str]] = {}
with reporter.step("Get remote binaries versions"):
future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts)
for future in future_binary_verions:
versions_by_host.update(future.result())
# Consolidate versions across all hosts
cheak_versions = {}
exсeptions = []
exception = set()
previous_host = None
versions = {}
captured_version = None
for host, binary_versions in versions_by_host.items():
for name, binary in binary_versions.items():
version = binary["version"]
if not cheak_versions.get(f"{name[:-2]}", None):
captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version)
cheak_versions[f"{name[:-2]}"] = {host: {version: name}}
else:
captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0]
cheak_versions[f"{name[:-2]}"].update({host: {version: name}})
if captured_version and captured_version != version:
exception.add(name[:-2])
versions[name] = {"version": version, "check": binary["check"]}
previous_host = host
logger.info(
"Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()])
)
if exception:
for i in exception:
for host in versions_by_host.keys():
for version, name in cheak_versions.get(i).get(host).items():
exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}")
exсeptions.append("\n")
return versions, exсeptions
return versions_by_host
def _parse_version(version_output: str) -> str: