forked from TrueCloudLab/frostfs-testlib
Move shared code to testlib
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
parent
d97a02d1d3
commit
997e768e92
69 changed files with 9213 additions and 64 deletions
135
src/frostfs_testlib/utils/cli_utils.py
Normal file
135
src/frostfs_testlib/utils/cli_utils.py
Normal file
|
@ -0,0 +1,135 @@
|
|||
#!/usr/bin/python3.10
|
||||
|
||||
# TODO: This file is deprecated and all code which uses these calls should be refactored to use shell classes
|
||||
|
||||
"""
|
||||
Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs.
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
from contextlib import suppress
|
||||
from datetime import datetime
|
||||
from textwrap import shorten
|
||||
from typing import TypedDict, Union
|
||||
|
||||
import pexpect
|
||||
|
||||
from frostfs_testlib.reporter import get_reporter
|
||||
|
||||
reporter = get_reporter()
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
COLOR_GREEN = "\033[92m"
|
||||
COLOR_OFF = "\033[0m"
|
||||
|
||||
|
||||
def _cmd_run(cmd: str, timeout: int = 90) -> str:
|
||||
"""
|
||||
Runs given shell command <cmd>, in case of success returns its stdout,
|
||||
in case of failure returns error message.
|
||||
"""
|
||||
compl_proc = None
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}")
|
||||
start_time = datetime.utcnow()
|
||||
compl_proc = subprocess.run(
|
||||
cmd,
|
||||
check=True,
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
timeout=timeout,
|
||||
shell=True,
|
||||
)
|
||||
output = compl_proc.stdout
|
||||
return_code = compl_proc.returncode
|
||||
end_time = datetime.utcnow()
|
||||
logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}")
|
||||
_attach_allure_log(cmd, output, return_code, start_time, end_time)
|
||||
|
||||
return output
|
||||
except subprocess.CalledProcessError as exc:
|
||||
logger.info(
|
||||
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}"
|
||||
)
|
||||
end_time = datetime.now()
|
||||
return_code, cmd_output = subprocess.getstatusoutput(cmd)
|
||||
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
|
||||
|
||||
raise RuntimeError(
|
||||
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}"
|
||||
) from exc
|
||||
except OSError as exc:
|
||||
raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc
|
||||
except Exception as exc:
|
||||
return_code, cmd_output = subprocess.getstatusoutput(cmd)
|
||||
end_time = datetime.now()
|
||||
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
|
||||
logger.info(
|
||||
f"Command: {cmd}\n"
|
||||
f"Error:\nreturn code: {return_code}\n"
|
||||
f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def _run_with_passwd(cmd: str) -> str:
|
||||
child = pexpect.spawn(cmd)
|
||||
child.delaybeforesend = 1
|
||||
child.expect(".*")
|
||||
child.sendline("\r")
|
||||
if sys.platform == "darwin":
|
||||
child.expect(pexpect.EOF)
|
||||
cmd = child.before
|
||||
else:
|
||||
child.wait()
|
||||
cmd = child.read()
|
||||
return cmd.decode()
|
||||
|
||||
|
||||
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str:
|
||||
child = pexpect.spawn(cmd)
|
||||
child.delaybeforesend = 1
|
||||
|
||||
child.expect("AWS Access Key ID.*")
|
||||
child.sendline(key_id)
|
||||
|
||||
child.expect("AWS Secret Access Key.*")
|
||||
child.sendline(access_key)
|
||||
|
||||
child.expect("Default region name.*")
|
||||
child.sendline("")
|
||||
|
||||
child.expect("Default output format.*")
|
||||
child.sendline(out_format)
|
||||
|
||||
child.wait()
|
||||
cmd = child.read()
|
||||
# child.expect(pexpect.EOF)
|
||||
# cmd = child.before
|
||||
return cmd.decode()
|
||||
|
||||
|
||||
def _attach_allure_log(
|
||||
cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime
|
||||
) -> None:
|
||||
command_attachment = (
|
||||
f"COMMAND: '{cmd}'\n"
|
||||
f"OUTPUT:\n {output}\n"
|
||||
f"RC: {return_code}\n"
|
||||
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}"
|
||||
)
|
||||
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
|
||||
reporter.attach(command_attachment, "Command execution")
|
||||
|
||||
|
||||
def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None:
|
||||
logger.info(f"{cmd}: {output}")
|
||||
with suppress(Exception):
|
||||
json_output = json.dumps(output, indent=4, sort_keys=True)
|
||||
output = json_output
|
||||
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
|
||||
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
|
||||
reporter.attach(command_attachment, "Command execution")
|
30
src/frostfs_testlib/utils/env_utils.py
Normal file
30
src/frostfs_testlib/utils/env_utils.py
Normal file
|
@ -0,0 +1,30 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
from frostfs_testlib.reporter import get_reporter
|
||||
|
||||
reporter = get_reporter()
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@reporter.step_deco("Read environment.properties")
|
||||
def read_env_properties(file_path: str) -> dict:
|
||||
with open(file_path, "r") as file:
|
||||
raw_content = file.read()
|
||||
|
||||
env_properties = {}
|
||||
for line in raw_content.split("\n"):
|
||||
m = re.match("(.*?)=(.*)", line)
|
||||
if not m:
|
||||
logger.warning(f"Could not parse env property from {line}")
|
||||
continue
|
||||
key, value = m.group(1), m.group(2)
|
||||
env_properties[key] = value
|
||||
return env_properties
|
||||
|
||||
|
||||
@reporter.step_deco("Update data in environment.properties")
|
||||
def save_env_properties(file_path: str, env_data: dict) -> None:
|
||||
with open(file_path, "a+") as env_file:
|
||||
for env, env_value in env_data.items():
|
||||
env_file.write(f"{env}={env_value}\n")
|
256
src/frostfs_testlib/utils/failover_utils.py
Normal file
256
src/frostfs_testlib/utils/failover_utils.py
Normal file
|
@ -0,0 +1,256 @@
|
|||
import logging
|
||||
from dataclasses import dataclass
|
||||
from time import sleep
|
||||
from typing import Optional
|
||||
|
||||
from frostfs_testlib.hosting import Host
|
||||
from frostfs_testlib.reporter import get_reporter
|
||||
from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME
|
||||
from frostfs_testlib.shell import CommandOptions, Shell
|
||||
from frostfs_testlib.steps.cli.object import neo_go_dump_keys
|
||||
from frostfs_testlib.steps.node_management import storage_node_healthcheck
|
||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode
|
||||
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
|
||||
from frostfs_testlib.testing.test_control import retry, wait_for_success
|
||||
from frostfs_testlib.utils.datetime_utils import parse_time
|
||||
|
||||
reporter = get_reporter()
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@reporter.step_deco("Ping node")
|
||||
def ping_host(shell: Shell, host: Host):
|
||||
options = CommandOptions(check=False)
|
||||
return shell.exec(f"ping {host.config.address} -c 1", options).return_code
|
||||
|
||||
|
||||
@reporter.step_deco("Wait for storage nodes returned to cluster")
|
||||
def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None:
|
||||
with reporter.step("Run health check for all storage nodes"):
|
||||
for node in cluster.services(StorageNode):
|
||||
wait_for_host_online(shell, node)
|
||||
wait_for_node_online(node)
|
||||
|
||||
|
||||
@retry(max_attempts=60, sleep_interval=5, expected_result=0)
|
||||
@reporter.step_deco("Waiting for host of {node} to go online")
|
||||
def wait_for_host_online(shell: Shell, node: StorageNode):
|
||||
try:
|
||||
# TODO: Quick solution for now, should be replaced by lib interactions
|
||||
return ping_host(shell, node.host)
|
||||
except Exception as err:
|
||||
logger.warning(f"Host ping fails with error {err}")
|
||||
return 1
|
||||
|
||||
|
||||
@retry(max_attempts=60, sleep_interval=5, expected_result=1)
|
||||
@reporter.step_deco("Waiting for host of {node} to go offline")
|
||||
def wait_for_host_offline(shell: Shell, node: StorageNode):
|
||||
try:
|
||||
# TODO: Quick solution for now, should be replaced by lib interactions
|
||||
return ping_host(shell, node.host)
|
||||
except Exception as err:
|
||||
logger.warning(f"Host ping fails with error {err}")
|
||||
return 0
|
||||
|
||||
|
||||
@retry(max_attempts=10, sleep_interval=15, expected_result=True)
|
||||
@reporter.step_deco("Waiting for node {node} to go online")
|
||||
def wait_for_node_online(node: StorageNode):
|
||||
try:
|
||||
health_check = storage_node_healthcheck(node)
|
||||
except Exception as err:
|
||||
logger.warning(f"Node healthcheck fails with error {err}")
|
||||
return False
|
||||
|
||||
return health_check.health_status == "READY" and health_check.network_status == "ONLINE"
|
||||
|
||||
|
||||
@reporter.step_deco("Check and return status of given service")
|
||||
def service_status(service: str, shell: Shell) -> str:
|
||||
return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip()
|
||||
|
||||
|
||||
@dataclass
|
||||
class TopCommand:
|
||||
"""
|
||||
This class using `from_stdout` helps to parse result from `top command`, could return result only for one PID
|
||||
pid: Process PID
|
||||
output: stdout result from TOP command
|
||||
"""
|
||||
|
||||
pid: Optional[str] = None
|
||||
user: Optional[str] = None
|
||||
pr: Optional[str] = None
|
||||
ni: Optional[str] = None
|
||||
virt: Optional[str] = None
|
||||
res: Optional[str] = None
|
||||
shr: Optional[str] = None
|
||||
status: Optional[str] = None
|
||||
cpu_percent: Optional[str] = None
|
||||
mem_percent: Optional[str] = None
|
||||
time: Optional[str] = None
|
||||
cmd: Optional[str] = None
|
||||
STATUS_RUNNING = "R"
|
||||
STATUS_SLEEP = "S"
|
||||
STATUS_ZOMBIE = "Z"
|
||||
STATUS_UNSLEEP = "D"
|
||||
STATUS_TRACED = "T"
|
||||
|
||||
@staticmethod
|
||||
def from_stdout(output: str, requested_pid: int) -> "TopCommand":
|
||||
list_var = [None for i in range(12)]
|
||||
for line in output.split("\n"):
|
||||
if str(requested_pid) in line:
|
||||
list_var = line.split()
|
||||
return TopCommand(
|
||||
pid=list_var[0],
|
||||
user=list_var[1],
|
||||
pr=list_var[2],
|
||||
ni=list_var[3],
|
||||
virt=list_var[4],
|
||||
res=list_var[5],
|
||||
shr=list_var[6],
|
||||
status=list_var[7],
|
||||
cpu_percent=list_var[8],
|
||||
mem_percent=list_var[9],
|
||||
time=list_var[10],
|
||||
cmd=list_var[11],
|
||||
)
|
||||
|
||||
|
||||
@reporter.step_deco("Run `top` command with specified PID")
|
||||
def service_status_top(service: str, shell: Shell) -> TopCommand:
|
||||
pid = service_pid(service, shell)
|
||||
output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout
|
||||
return TopCommand.from_stdout(output, pid)
|
||||
|
||||
|
||||
@reporter.step_deco("Restart service n times with sleep")
|
||||
def multiple_restart(
|
||||
service_type: type[NodeBase],
|
||||
node: ClusterNode,
|
||||
count: int = 5,
|
||||
sleep_interval: int = 2,
|
||||
):
|
||||
service_systemctl_name = node.service(service_type).get_service_systemctl_name()
|
||||
service_name = node.service(service_type).name
|
||||
for _ in range(count):
|
||||
node.host.restart_service(service_name)
|
||||
logger.info(
|
||||
f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue"
|
||||
)
|
||||
sleep(sleep_interval)
|
||||
|
||||
|
||||
@reporter.step_deco("Get status of list of services and check expected status")
|
||||
@wait_for_success(60, 5)
|
||||
def check_services_status(service_list: list[str], expected_status: str, shell: Shell):
|
||||
cmd = ""
|
||||
for service in service_list:
|
||||
cmd += f' sudo systemctl status {service} --lines=0 | grep "Active:";'
|
||||
result = shell.exec(cmd).stdout.rstrip()
|
||||
statuses = list()
|
||||
for line in result.split("\n"):
|
||||
status_substring = line.split()
|
||||
statuses.append(status_substring[1])
|
||||
unique_statuses = list(set(statuses))
|
||||
assert (
|
||||
len(unique_statuses) == 1 and expected_status in unique_statuses
|
||||
), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}"
|
||||
|
||||
|
||||
@reporter.step_deco("Wait for active status of passed service")
|
||||
@wait_for_success(60, 5)
|
||||
def wait_service_in_desired_state(
|
||||
service: str, shell: Shell, expected_status: Optional[str] = "active"
|
||||
):
|
||||
real_status = service_status(service=service, shell=shell)
|
||||
assert (
|
||||
expected_status == real_status
|
||||
), f"Service {service}: expected status= {expected_status}, real status {real_status}"
|
||||
|
||||
|
||||
@reporter.step_deco("Run healthcheck against passed service")
|
||||
@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1)
|
||||
def service_type_healthcheck(
|
||||
service_type: type[NodeBase],
|
||||
node: ClusterNode,
|
||||
):
|
||||
service = node.service(service_type)
|
||||
assert (
|
||||
service.service_healthcheck()
|
||||
), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}"
|
||||
|
||||
|
||||
@reporter.step_deco("Kill by process name")
|
||||
def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode):
|
||||
service_systemctl_name = node.service(service_type).get_service_systemctl_name()
|
||||
pid = service_pid(service_systemctl_name, node.host.get_shell())
|
||||
node.host.get_shell().exec(f"sudo kill -9 {pid}")
|
||||
|
||||
|
||||
@reporter.step_deco("Service {service} suspend")
|
||||
def suspend_service(shell: Shell, service: str):
|
||||
shell.exec(f"sudo kill -STOP {service_pid(service, shell)}")
|
||||
|
||||
|
||||
@reporter.step_deco("Service {service} resume")
|
||||
def resume_service(shell: Shell, service: str):
|
||||
shell.exec(f"sudo kill -CONT {service_pid(service, shell)}")
|
||||
|
||||
|
||||
@reporter.step_deco("Retrieve service's pid")
|
||||
# retry mechanism cause when the task has been started recently '0' PID could be returned
|
||||
@wait_for_success(10, 1)
|
||||
def service_pid(service: str, shell: Shell) -> int:
|
||||
output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip()
|
||||
splitted = output.split("=")
|
||||
PID = int(splitted[1])
|
||||
assert PID > 0, f"Service {service} has invalid PID={PID}"
|
||||
return PID
|
||||
|
||||
|
||||
@reporter.step_deco("Wrapper for neo-go dump keys command")
|
||||
def dump_keys(shell: Shell, node: ClusterNode) -> dict:
|
||||
host = node.host
|
||||
service_config = host.get_service_config(node.service(MorphChain).name)
|
||||
wallet = service_config.attributes["wallet_path"]
|
||||
return neo_go_dump_keys(shell=shell, wallet=wallet)
|
||||
|
||||
|
||||
@reporter.step_deco("Wait for object replication")
|
||||
def wait_object_replication(
|
||||
cid: str,
|
||||
oid: str,
|
||||
expected_copies: int,
|
||||
shell: Shell,
|
||||
nodes: list[StorageNode],
|
||||
sleep_interval: int = 15,
|
||||
attempts: int = 20,
|
||||
) -> list[StorageNode]:
|
||||
nodes_with_object = []
|
||||
for _ in range(attempts):
|
||||
nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes)
|
||||
if len(nodes_with_object) >= expected_copies:
|
||||
return nodes_with_object
|
||||
sleep(sleep_interval)
|
||||
raise AssertionError(
|
||||
f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. "
|
||||
f"Waiting time {sleep_interval * attempts}"
|
||||
)
|
||||
|
||||
|
||||
def is_all_storage_nodes_returned(cluster: Cluster) -> bool:
|
||||
with reporter.step("Run health check for all storage nodes"):
|
||||
for node in cluster.services(StorageNode):
|
||||
try:
|
||||
health_check = storage_node_healthcheck(node)
|
||||
except Exception as err:
|
||||
logger.warning(f"Node healthcheck fails with error {err}")
|
||||
return False
|
||||
if health_check.health_status != "READY" or health_check.network_status != "ONLINE":
|
||||
return False
|
||||
return True
|
168
src/frostfs_testlib/utils/file_utils.py
Normal file
168
src/frostfs_testlib/utils/file_utils.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, Optional
|
||||
|
||||
from frostfs_testlib.reporter import get_reporter
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||
|
||||
reporter = get_reporter()
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
def generate_file(size: int) -> str:
|
||||
"""Generates a binary file with the specified size in bytes.
|
||||
|
||||
Args:
|
||||
size: Size in bytes, can be declared as 6e+6 for example.
|
||||
|
||||
Returns:
|
||||
The path to the generated file.
|
||||
"""
|
||||
file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
|
||||
with open(file_path, "wb") as file:
|
||||
file.write(os.urandom(size))
|
||||
logger.info(f"File with size {size} bytes has been generated: {file_path}")
|
||||
|
||||
return file_path
|
||||
|
||||
|
||||
def generate_file_with_content(
|
||||
size: int,
|
||||
file_path: Optional[str] = None,
|
||||
content: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Creates a new file with specified content.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file that should be created. If not specified, then random file
|
||||
path will be generated.
|
||||
content: Content that should be stored in the file. If not specified, then random binary
|
||||
content will be generated.
|
||||
|
||||
Returns:
|
||||
Path to the generated file.
|
||||
"""
|
||||
mode = "w+"
|
||||
if content is None:
|
||||
content = os.urandom(size)
|
||||
mode = "wb"
|
||||
|
||||
if not file_path:
|
||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||
else:
|
||||
if not os.path.exists(os.path.dirname(file_path)):
|
||||
os.makedirs(os.path.dirname(file_path))
|
||||
|
||||
with open(file_path, mode) as file:
|
||||
file.write(content)
|
||||
|
||||
return file_path
|
||||
|
||||
|
||||
@reporter.step_deco("Get File Hash")
|
||||
def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
|
||||
"""Generates hash for the specified file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file to generate hash for.
|
||||
len: How many bytes to read.
|
||||
offset: Position to start reading from.
|
||||
|
||||
Returns:
|
||||
Hash of the file as hex-encoded string.
|
||||
"""
|
||||
file_hash = hashlib.sha256()
|
||||
with open(file_path, "rb") as out:
|
||||
if len and not offset:
|
||||
file_hash.update(out.read(len))
|
||||
elif len and offset:
|
||||
out.seek(offset, 0)
|
||||
file_hash.update(out.read(len))
|
||||
elif offset and not len:
|
||||
out.seek(offset, 0)
|
||||
file_hash.update(out.read())
|
||||
else:
|
||||
file_hash.update(out.read())
|
||||
return file_hash.hexdigest()
|
||||
|
||||
|
||||
@reporter.step_deco("Concatenation set of files to one file")
|
||||
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
|
||||
"""Concatenates several files into a single file.
|
||||
|
||||
Args:
|
||||
file_paths: Paths to the files to concatenate.
|
||||
resulting_file_path: Path to the file where concatenated content should be stored.
|
||||
|
||||
Returns:
|
||||
Path to the resulting file.
|
||||
"""
|
||||
if not resulting_file_path:
|
||||
resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||
with open(resulting_file_path, "wb") as f:
|
||||
for file in file_paths:
|
||||
with open(file, "rb") as part_file:
|
||||
f.write(part_file.read())
|
||||
return resulting_file_path
|
||||
|
||||
|
||||
def split_file(file_path: str, parts: int) -> list[str]:
|
||||
"""Splits specified file into several specified number of parts.
|
||||
|
||||
Each part is saved under name `{original_file}_part_{i}`.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file that should be split.
|
||||
parts: Number of parts the file should be split into.
|
||||
|
||||
Returns:
|
||||
Paths to the part files.
|
||||
"""
|
||||
with open(file_path, "rb") as file:
|
||||
content = file.read()
|
||||
|
||||
content_size = len(content)
|
||||
chunk_size = int((content_size + parts) / parts)
|
||||
|
||||
part_id = 1
|
||||
part_file_paths = []
|
||||
for content_offset in range(0, content_size + 1, chunk_size):
|
||||
part_file_name = f"{file_path}_part_{part_id}"
|
||||
part_file_paths.append(part_file_name)
|
||||
with open(part_file_name, "wb") as out_file:
|
||||
out_file.write(content[content_offset : content_offset + chunk_size])
|
||||
part_id += 1
|
||||
|
||||
return part_file_paths
|
||||
|
||||
|
||||
def get_file_content(
|
||||
file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None
|
||||
) -> Any:
|
||||
"""Returns content of specified file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file.
|
||||
content_len: Limit of content length. If None, then entire file content is returned;
|
||||
otherwise only the first content_len bytes of the content are returned.
|
||||
mode: Mode of opening the file.
|
||||
offset: Position to start reading from.
|
||||
|
||||
Returns:
|
||||
Content of the specified file.
|
||||
"""
|
||||
with open(file_path, mode) as file:
|
||||
if content_len and not offset:
|
||||
content = file.read(content_len)
|
||||
elif content_len and offset:
|
||||
file.seek(offset, 0)
|
||||
content = file.read(content_len)
|
||||
elif offset and not content_len:
|
||||
file.seek(offset, 0)
|
||||
content = file.read()
|
||||
else:
|
||||
content = file.read()
|
||||
|
||||
return content
|
79
src/frostfs_testlib/utils/version_utils.py
Normal file
79
src/frostfs_testlib/utils/version_utils.py
Normal file
|
@ -0,0 +1,79 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
|
||||
from frostfs_testlib.hosting import Hosting
|
||||
from frostfs_testlib.resources.cli import (
|
||||
FROSTFS_ADM_EXEC,
|
||||
FROSTFS_AUTHMATE_EXEC,
|
||||
FROSTFS_CLI_EXEC,
|
||||
NEOGO_EXECUTABLE,
|
||||
)
|
||||
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
|
||||
from frostfs_testlib.shell import Shell
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
|
||||
versions = {}
|
||||
|
||||
for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]:
|
||||
out = shell.exec(f"{binary} --version").stdout
|
||||
versions[binary] = _parse_version(out)
|
||||
|
||||
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||
versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
|
||||
|
||||
try:
|
||||
frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC)
|
||||
versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout)
|
||||
except RuntimeError:
|
||||
logger.info(f"{FROSTFS_ADM_EXEC} not installed")
|
||||
|
||||
out = shell.exec("aws --version").stdout
|
||||
out_lines = out.split("\n")
|
||||
versions["AWS"] = out_lines[0] if out_lines else "Unknown"
|
||||
|
||||
return versions
|
||||
|
||||
|
||||
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
|
||||
versions_by_host = {}
|
||||
for host in hosting.hosts:
|
||||
binary_path_by_name = {} # Maps binary name to executable path
|
||||
for service_config in host.config.services:
|
||||
exec_path = service_config.attributes.get("exec_path")
|
||||
if exec_path:
|
||||
binary_path_by_name[service_config.name] = exec_path
|
||||
for cli_config in host.config.clis:
|
||||
binary_path_by_name[cli_config.name] = cli_config.exec_path
|
||||
|
||||
shell = host.get_shell()
|
||||
versions_at_host = {}
|
||||
for binary_name, binary_path in binary_path_by_name.items():
|
||||
try:
|
||||
result = shell.exec(f"{binary_path} --version")
|
||||
versions_at_host[binary_name] = _parse_version(result.stdout)
|
||||
except Exception as exc:
|
||||
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
|
||||
versions_at_host[binary_name] = "Unknown"
|
||||
versions_by_host[host.config.address] = versions_at_host
|
||||
|
||||
# Consolidate versions across all hosts
|
||||
versions = {}
|
||||
for host, binary_versions in versions_by_host.items():
|
||||
for name, version in binary_versions.items():
|
||||
captured_version = versions.get(name)
|
||||
if captured_version:
|
||||
assert (
|
||||
captured_version == version
|
||||
), f"Binary {name} has inconsistent version on host {host}"
|
||||
else:
|
||||
versions[name] = version
|
||||
return versions
|
||||
|
||||
|
||||
def _parse_version(version_output: str) -> str:
|
||||
version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
|
||||
return version.group(1).strip() if version else "Unknown"
|
Loading…
Add table
Add a link
Reference in a new issue