Compare commits

...

10 commits

Author SHA1 Message Date
aa8c83b682 port fix fill_percent
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-03-28 14:54:39 +03:00
e765276c3f [#187] Add total bytes to report to 0.38
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-27 12:03:20 +03:00
b464591153 [TrueCloudLab/xk6-frostfs#125] Add acl option
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:50:38 +00:00
c978f55e90 [#170] Update metrics
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:42:37 +00:00
2255ee465f [#173] Add flag to remove registry file
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 12:41:29 +03:00
2ec24f4cd1 [#168] Strip components for new xk6 archive and update unit tests for 0.38
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-26 13:36:44 +03:00
2da1a4583f [#165] Add local flag to preset in load
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-22 19:08:30 +03:00
cda3773fa8 [#163] Refactor frostfs-cli functional
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-01-22 14:26:25 +03:00
be36a10f1e [#157] fix for dev-env and unit-tests
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-12 16:42:19 +00:00
df8d99d83c [#156] load_time in the format of days, hours and minutes; new params
Signed-off-by: Liza <e.chichindaeva@yadro.com>
2024-01-12 16:45:18 +03:00
19 changed files with 252 additions and 77 deletions

View file

@ -65,7 +65,6 @@ class FrostfsCliContainer(CliCommand):
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
force: bool = False, force: bool = False,
timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
""" """
Delete an existing container. Delete an existing container.
@ -81,7 +80,6 @@ class FrostfsCliContainer(CliCommand):
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns: Returns:
Command's result. Command's result.
@ -298,9 +296,5 @@ class FrostfsCliContainer(CliCommand):
return self._execute( return self._execute(
f"container nodes {from_str}", f"container nodes {from_str}",
**{ **{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]},
param: value
for param, value in locals().items()
if param not in ["self", "from_file", "from_str"]
},
) )

View file

@ -124,9 +124,7 @@ class FrostfsCliObject(CliCommand):
""" """
return self._execute( return self._execute(
"object hash", "object hash",
**{ **{param: value for param, value in locals().items() if param not in ["self", "params"]},
param: value for param, value in locals().items() if param not in ["self", "params"]
},
) )
def head( def head(
@ -355,8 +353,8 @@ class FrostfsCliObject(CliCommand):
def nodes( def nodes(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional = None, generate_key: Optional = None,

View file

@ -1,5 +1,5 @@
class Options: class Options:
DEFAULT_SHELL_TIMEOUT = 90 DEFAULT_SHELL_TIMEOUT = 120
@staticmethod @staticmethod
def get_default_shell_timeout(): def get_default_shell_timeout():

View file

@ -152,9 +152,7 @@ class DockerHost(Host):
timeout=service_attributes.start_timeout, timeout=service_attributes.start_timeout,
) )
def wait_for_service_to_be_in_state( def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
self, systemd_service_name: str, expected_state: str, timeout: int
) -> None:
raise NotImplementedError("Not implemented for docker") raise NotImplementedError("Not implemented for docker")
def get_data_directory(self, service_name: str) -> str: def get_data_directory(self, service_name: str) -> str:
@ -181,6 +179,12 @@ class DockerHost(Host):
def delete_pilorama(self, service_name: str) -> None: def delete_pilorama(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker") raise NotImplementedError("Not implemented for docker")
def delete_file(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def is_file_exist(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
volume_path = self.get_data_directory(service_name) volume_path = self.get_data_directory(service_name)
@ -305,9 +309,7 @@ class DockerHost(Host):
return container return container
return None return None
def _wait_for_container_to_be_in_state( def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None:
self, container_name: str, expected_state: str, timeout: int
) -> None:
iterations = 10 iterations = 10
iteration_wait_time = timeout / iterations iteration_wait_time = timeout / iterations

View file

@ -50,6 +50,7 @@ class SummarizedStats:
throughput: float = field(default_factory=float) throughput: float = field(default_factory=float)
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
total_bytes: int = field(default_factory=int)
passed: bool = True passed: bool = True
def calc_stats(self): def calc_stats(self):
@ -85,6 +86,7 @@ class SummarizedStats:
target.latencies.by_node[node_key] = operation.latency target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold target.errors.threshold = load_params.error_threshold
target.total_bytes = operation.total_bytes
if operation.failed_iterations: if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations target.errors.by_node[node_key] = operation.failed_iterations

View file

@ -4,6 +4,7 @@ import math
import os import os
from dataclasses import dataclass from dataclasses import dataclass
from datetime import datetime from datetime import datetime
from threading import Event
from time import sleep from time import sleep
from typing import Any from typing import Any
from urllib.parse import urlparse from urllib.parse import urlparse
@ -73,14 +74,16 @@ class K6:
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id) self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id)
def _get_fill_percents(self): def _get_fill_percents(self):
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split(
"\n"
)
return [line.split() for line in fill_percents][:-1] return [line.split() for line in fill_percents][:-1]
def check_fill_percent(self): def check_fill_percent(self):
fill_percents = self._get_fill_percents() fill_percents = self._get_fill_percents()
percent_mean = 0 percent_mean = 0
for line in fill_percents: for line in fill_percents:
percent_mean += float(line[1].split('%')[0]) percent_mean += float(line[1].split("%")[0])
percent_mean = percent_mean / len(fill_percents) percent_mean = percent_mean / len(fill_percents)
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
return percent_mean >= self.load_params.fill_percent return percent_mean >= self.load_params.fill_percent
@ -145,7 +148,7 @@ class K6:
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
self._k6_process.start() self._k6_process.start()
def wait_until_finished(self, event, soft_timeout: int = 0) -> None: def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None:
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
if self.load_params.scenario == LoadScenario.VERIFY: if self.load_params.scenario == LoadScenario.VERIFY:
timeout = self.load_params.verify_time or 0 timeout = self.load_params.verify_time or 0
@ -188,23 +191,25 @@ class K6:
wait_interval = min_wait_interval wait_interval = min_wait_interval
if self._k6_process is None: if self._k6_process is None:
assert "No k6 instances were executed" assert "No k6 instances were executed"
while timeout > 0: while timeout > 0:
if not self.load_params.fill_percent is None: if not self.load_params.fill_percent is None:
with reporter.step(f"Check the percentage of filling of all data disks on the node"): with reporter.step(f"Check the percentage of filling of all data disks on the node"):
if self.check_fill_percent(): if self.check_fill_percent():
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") logger.info(
f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%"
)
event.set() event.set()
self.stop() self.stop()
return return
if event.is_set(): if event.is_set():
self.stop() self.stop()
return return
if not self._k6_process.running(): if not self._k6_process.running():
return return
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
logger.info( logger.info(

View file

@ -3,11 +3,28 @@ import os
from dataclasses import dataclass, field, fields, is_dataclass from dataclasses import dataclass, field, fields, is_dataclass
from enum import Enum from enum import Enum
from types import MappingProxyType from types import MappingProxyType
from typing import Any, Optional, get_args from typing import Any, Callable, Optional, get_args
from frostfs_testlib.utils.converting_utils import calc_unit from frostfs_testlib.utils.converting_utils import calc_unit
def convert_time_to_seconds(time: int | str | None) -> int:
if time is None:
return None
if str(time).isdigit():
seconds = int(time)
else:
days, hours, minutes = 0, 0, 0
if "d" in time:
days, time = time.split("d")
if "h" in time:
hours, time = time.split("h")
if "min" in time:
minutes = time.replace("min", "")
seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60
return seconds
class LoadType(Enum): class LoadType(Enum):
gRPC = "grpc" gRPC = "grpc"
S3 = "s3" S3 = "s3"
@ -76,6 +93,7 @@ def metadata_field(
scenario_variable: Optional[str] = None, scenario_variable: Optional[str] = None,
string_repr: Optional[bool] = True, string_repr: Optional[bool] = True,
distributed: Optional[bool] = False, distributed: Optional[bool] = False,
formatter: Optional[Callable] = None,
): ):
return field( return field(
default=None, default=None,
@ -85,6 +103,7 @@ def metadata_field(
"env_variable": scenario_variable, "env_variable": scenario_variable,
"string_repr": string_repr, "string_repr": string_repr,
"distributed": distributed, "distributed": distributed,
"formatter": formatter,
}, },
) )
@ -128,6 +147,8 @@ class Preset:
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset # Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# ------ GRPC ------ # ------ GRPC ------
# Amount of containers which should be created # Amount of containers which should be created
@ -147,6 +168,9 @@ class Preset:
# Flag to control preset erorrs # Flag to control preset erorrs
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
# Flag to ensure created containers store data on local endpoints
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
@dataclass @dataclass
class LoadParams: class LoadParams:
@ -200,7 +224,9 @@ class LoadParams:
# ------- COMMON SCENARIO PARAMS ------- # ------- COMMON SCENARIO PARAMS -------
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False) load_time: Optional[int] = metadata_field(
all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds
)
# Object size in KB for load and preset. # Object size in KB for load and preset.
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
# For read operations, controls from which set get objects to read # For read operations, controls from which set get objects to read
@ -211,6 +237,8 @@ class LoadParams:
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
# In case if we want to use custom registry file left from another load run # In case if we want to use custom registry file left from another load run
custom_registry: Optional[str] = None custom_registry: Optional[str] = None
# In case if we want to use custom registry file left from another load run
force_fresh_registry: Optional[bool] = None
# Specifies the minimum duration of every single execution (i.e. iteration). # Specifies the minimum duration of every single execution (i.e. iteration).
# Any iterations that are shorter than this value will cause that VU to # Any iterations that are shorter than this value will cause that VU to
# sleep for the remainder of the time until the specified minimum duration is reached. # sleep for the remainder of the time until the specified minimum duration is reached.
@ -384,6 +412,25 @@ class LoadParams:
return fields_with_data or [] return fields_with_data or []
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
def __str__(self) -> str: def __str__(self) -> str:
load_type_str = self.scenario.value if self.scenario else self.load_type.value load_type_str = self.scenario.value if self.scenario else self.load_type.value
# TODO: migrate load_params defaults to testlib # TODO: migrate load_params defaults to testlib

View file

@ -39,6 +39,10 @@ class OperationMetric(ABC):
def throughput(self) -> float: def throughput(self) -> float:
return self._get_metric_rate(self._THROUGHPUT) return self._get_metric_rate(self._THROUGHPUT)
@property
def total_bytes(self) -> float:
return self._get_metric(self._THROUGHPUT)
def _get_metric(self, metric: str) -> int: def _get_metric(self, metric: str) -> int:
metrics_method_map = { metrics_method_map = {
"counter": self._get_counter_metric, "counter": self._get_counter_metric,
@ -107,66 +111,66 @@ class DeleteOperationMetric(OperationMetric):
class GrpcWriteOperationMetric(WriteOperationMetric): class GrpcWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "frostfs_obj_put_total" _SUCCESS = "frostfs_obj_put_success"
_ERRORS = "frostfs_obj_put_fails" _ERRORS = "frostfs_obj_put_fails"
_LATENCY = "frostfs_obj_put_duration" _LATENCY = "frostfs_obj_put_duration"
class GrpcReadOperationMetric(ReadOperationMetric): class GrpcReadOperationMetric(ReadOperationMetric):
_SUCCESS = "frostfs_obj_get_total" _SUCCESS = "frostfs_obj_get_success"
_ERRORS = "frostfs_obj_get_fails" _ERRORS = "frostfs_obj_get_fails"
_LATENCY = "frostfs_obj_get_duration" _LATENCY = "frostfs_obj_get_duration"
class GrpcDeleteOperationMetric(DeleteOperationMetric): class GrpcDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "frostfs_obj_delete_total" _SUCCESS = "frostfs_obj_delete_success"
_ERRORS = "frostfs_obj_delete_fails" _ERRORS = "frostfs_obj_delete_fails"
_LATENCY = "frostfs_obj_delete_duration" _LATENCY = "frostfs_obj_delete_duration"
class S3WriteOperationMetric(WriteOperationMetric): class S3WriteOperationMetric(WriteOperationMetric):
_SUCCESS = "aws_obj_put_total" _SUCCESS = "aws_obj_put_success"
_ERRORS = "aws_obj_put_fails" _ERRORS = "aws_obj_put_fails"
_LATENCY = "aws_obj_put_duration" _LATENCY = "aws_obj_put_duration"
class S3ReadOperationMetric(ReadOperationMetric): class S3ReadOperationMetric(ReadOperationMetric):
_SUCCESS = "aws_obj_get_total" _SUCCESS = "aws_obj_get_success"
_ERRORS = "aws_obj_get_fails" _ERRORS = "aws_obj_get_fails"
_LATENCY = "aws_obj_get_duration" _LATENCY = "aws_obj_get_duration"
class S3DeleteOperationMetric(DeleteOperationMetric): class S3DeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "aws_obj_delete_total" _SUCCESS = "aws_obj_delete_success"
_ERRORS = "aws_obj_delete_fails" _ERRORS = "aws_obj_delete_fails"
_LATENCY = "aws_obj_delete_duration" _LATENCY = "aws_obj_delete_duration"
class S3LocalWriteOperationMetric(WriteOperationMetric): class S3LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "s3local_obj_put_total" _SUCCESS = "s3local_obj_put_success"
_ERRORS = "s3local_obj_put_fails" _ERRORS = "s3local_obj_put_fails"
_LATENCY = "s3local_obj_put_duration" _LATENCY = "s3local_obj_put_duration"
class S3LocalReadOperationMetric(ReadOperationMetric): class S3LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "s3local_obj_get_total" _SUCCESS = "s3local_obj_get_success"
_ERRORS = "s3local_obj_get_fails" _ERRORS = "s3local_obj_get_fails"
_LATENCY = "s3local_obj_get_duration" _LATENCY = "s3local_obj_get_duration"
class LocalWriteOperationMetric(WriteOperationMetric): class LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "local_obj_put_total" _SUCCESS = "local_obj_put_success"
_ERRORS = "local_obj_put_fails" _ERRORS = "local_obj_put_fails"
_LATENCY = "local_obj_put_duration" _LATENCY = "local_obj_put_duration"
class LocalReadOperationMetric(ReadOperationMetric): class LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "local_obj_get_total" _SUCCESS = "local_obj_get_success"
_ERRORS = "local_obj_get_fails" _ERRORS = "local_obj_get_fails"
class LocalDeleteOperationMetric(DeleteOperationMetric): class LocalDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "local_obj_delete_total" _SUCCESS = "local_obj_delete_success"
_ERRORS = "local_obj_delete_fails" _ERRORS = "local_obj_delete_fails"

View file

@ -120,6 +120,11 @@ class LoadReport:
throughput, unit = calc_unit(stats.throughput) throughput, unit = calc_unit(stats.throughput)
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
bytes_html = ""
if stats.total_bytes > 0:
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
per_node_errors_html = "" per_node_errors_html = ""
for node_key, errors in stats.errors.by_node.items(): for node_key, errors in stats.errors.by_node.items():
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
@ -148,6 +153,7 @@ class LoadReport:
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr> <tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
{self._row("Total operations", stats.operations)} {self._row("Total operations", stats.operations)}
{self._row("OP/sec", f"{stats.rate:.2f}")} {self._row("OP/sec", f"{stats.rate:.2f}")}
{bytes_html}
{throughput_html} {throughput_html}
{latency_html} {latency_html}
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr> <tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>

View file

@ -4,6 +4,7 @@ import math
import re import re
import time import time
from dataclasses import fields from dataclasses import fields
from threading import Event
from typing import Optional from typing import Optional
from urllib.parse import urlparse from urllib.parse import urlparse
@ -30,7 +31,6 @@ from frostfs_testlib.testing import parallel, run_optionally
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper from frostfs_testlib.utils.file_keeper import FileKeeper
from threading import Event
class RunnerBase(ScenarioRunner): class RunnerBase(ScenarioRunner):
@ -78,6 +78,10 @@ class DefaultRunner(RunnerBase):
nodes_under_load: list[ClusterNode], nodes_under_load: list[ClusterNode],
k6_dir: str, k6_dir: str,
): ):
if load_params.force_fresh_registry and load_params.custom_registry:
with reporter.step("Forcing fresh registry files"):
parallel(self._force_fresh_registry, self.loaders, load_params)
if load_params.load_type != LoadType.S3: if load_params.load_type != LoadType.S3:
return return
@ -88,6 +92,11 @@ class DefaultRunner(RunnerBase):
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
shell = loader.get_shell()
shell.exec(f"rm -f {load_params.registry_file}")
def _prepare_loader( def _prepare_loader(
self, self,
loader: Loader, loader: Loader,
@ -314,7 +323,7 @@ class LocalRunner(RunnerBase):
with reporter.step("Download K6"): with reporter.step("Download K6"):
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
shell.exec(f"sudo chmod -R 777 {k6_dir}") shell.exec(f"sudo chmod -R 777 {k6_dir}")
with reporter.step("Create empty_passwd"): with reporter.step("Create empty_passwd"):

View file

@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
# Config for frostfs-adm utility. Optional if tests are running against devenv # Config for frostfs-adm utility. Optional if tests are running against devenv
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None) CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s")

View file

@ -1,11 +1,12 @@
import json import json
import logging import logging
import re import re
import requests
from dataclasses import dataclass from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional, Union from typing import Optional, Union
import requests
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
@ -291,18 +292,17 @@ def delete_container(
force: bool = False, force: bool = False,
session_token: Optional[str] = None, session_token: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None: ) -> None:
""" """
A wrapper for `frostfs-cli container delete` call. A wrapper for `frostfs-cli container delete` call.
Args: Args:
await_mode: Block execution until container is removed.
wallet (str): path to a wallet on whose behalf we delete the container wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete cid (str): ID of the container to delete
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file session_token: a path to session token file
timeout: Timeout for the operation.
This function doesn't return anything. This function doesn't return anything.
""" """
@ -314,7 +314,6 @@ def delete_container(
force=force, force=force,
session=session_token, session=session_token,
await_mode=await_mode, await_mode=await_mode,
timeout=timeout,
) )

View file

@ -732,23 +732,24 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
@reporter.step("Search object nodes") @reporter.step("Search object nodes")
def get_object_nodes( def get_object_nodes(
cluster: Cluster, cluster: Cluster,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, alive_node: ClusterNode,
endpoint: str,
bearer: str = "", bearer: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
is_direct: bool = False, is_direct: bool = False,
verify_presence_all: bool = False, verify_presence_all: bool = False,
wallet_config: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) shell = alive_node.host.get_shell()
endpoint = alive_node.storage_node.get_rpc_endpoint()
wallet = alive_node.storage_node.get_remote_wallet_path()
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
result_object_nodes = cli.object.nodes( result_object_nodes = cli.object.nodes(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,

View file

@ -87,7 +87,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
remote_shell = alive_node.host.get_shell() remote_shell = alive_node.host.get_shell()
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: if "force_transactions" not in alive_node.host.config.attributes:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfs_adm = FrostfsAdm( frostfs_adm = FrostfsAdm(
shell=remote_shell, shell=remote_shell,

View file

@ -8,7 +8,8 @@ class ConfigAttributes:
SHARD_CONFIG_PATH = "shard_config_path" SHARD_CONFIG_PATH = "shard_config_path"
LOGGER_CONFIG_PATH = "logger_config_path" LOGGER_CONFIG_PATH = "logger_config_path"
LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_config_path" LOCAL_WALLET_CONFIG = "local_wallet_config_path"
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_0 = "endpoint_data0"
ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_DATA_1 = "endpoint_data1"
ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_INTERNAL = "endpoint_internal0"

View file

@ -114,6 +114,14 @@ class NodeBase(HumanReadableABC):
ConfigAttributes.CONFIG_PATH, ConfigAttributes.CONFIG_PATH,
) )
def get_remote_wallet_config_path(self) -> str:
"""
Returns node config file path located on remote host
"""
return self._get_attribute(
ConfigAttributes.REMOTE_WALLET_CONFIG,
)
def get_wallet_config_path(self) -> str: def get_wallet_config_path(self) -> str:
return self._get_attribute( return self._get_attribute(
ConfigAttributes.LOCAL_WALLET_CONFIG, ConfigAttributes.LOCAL_WALLET_CONFIG,
@ -125,8 +133,11 @@ class NodeBase(HumanReadableABC):
Returns config path for logger located on remote host Returns config path for logger located on remote host
""" """
config_attributes = self.host.get_service_config(self.name) config_attributes = self.host.get_service_config(self.name)
return self._get_attribute( return (
ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH)
if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes
else None
)
@property @property
def config_dir(self) -> str: def config_dir(self) -> str:

View file

@ -4,13 +4,7 @@ import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import ( from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
HTTPGate,
InnerRing,
MorphChain,
S3Gate,
StorageNode,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -22,10 +16,10 @@ class TestDataclassesStr:
[ [
(Boto3ClientWrapper, "Boto3 client"), (Boto3ClientWrapper, "Boto3 client"),
(AwsCliClient, "AWS CLI"), (AwsCliClient, "AWS CLI"),
(ObjectSize("simple", 1), "simple object size"), (ObjectSize("simple", 1), "simple"),
(ObjectSize("simple", 10), "simple object size"), (ObjectSize("simple", 10), "simple"),
(ObjectSize("complex", 5000), "complex object size"), (ObjectSize("complex", 5000), "complex"),
(ObjectSize("complex", 5555), "complex object size"), (ObjectSize("complex", 5555), "complex"),
(StorageNode, "StorageNode"), (StorageNode, "StorageNode"),
(MorphChain, "MorphChain"), (MorphChain, "MorphChain"),
(S3Gate, "S3Gate"), (S3Gate, "S3Gate"),

View file

@ -15,6 +15,7 @@ class TestHosting(TestCase):
HOST1 = { HOST1 = {
"address": HOST1_ADDRESS, "address": HOST1_ADDRESS,
"plugin_name": HOST1_PLUGIN, "plugin_name": HOST1_PLUGIN,
"healthcheck_plugin_name": "basic",
"attributes": HOST1_ATTRIBUTES, "attributes": HOST1_ATTRIBUTES,
"clis": HOST1_CLIS, "clis": HOST1_CLIS,
"services": HOST1_SERVICES, "services": HOST1_SERVICES,
@ -32,6 +33,7 @@ class TestHosting(TestCase):
HOST2 = { HOST2 = {
"address": HOST2_ADDRESS, "address": HOST2_ADDRESS,
"plugin_name": HOST2_PLUGIN, "plugin_name": HOST2_PLUGIN,
"healthcheck_plugin_name": "basic",
"attributes": HOST2_ATTRIBUTES, "attributes": HOST2_ATTRIBUTES,
"clis": HOST2_CLIS, "clis": HOST2_CLIS,
"services": HOST2_SERVICES, "services": HOST2_SERVICES,
@ -52,18 +54,14 @@ class TestHosting(TestCase):
self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN)
self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES)
self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS])
self.assertListEqual( self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES])
host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]
)
host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) host2 = hosting.get_host_by_address(self.HOST2_ADDRESS)
self.assertEqual(host2.config.address, self.HOST2_ADDRESS) self.assertEqual(host2.config.address, self.HOST2_ADDRESS)
self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN)
self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES)
self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS])
self.assertListEqual( self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES])
host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]
)
def test_get_host_by_service(self): def test_get_host_by_service(self):
hosting = Hosting() hosting = Hosting()
@ -104,9 +102,7 @@ class TestHosting(TestCase):
services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}")
self.assertEqual(len(services), 2) self.assertEqual(len(services), 2)
for service in services: for service in services:
self.assertEqual( self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX)
service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX
)
service1 = hosting.find_service_configs(self.SERVICE1["name"]) service1 = hosting.find_service_configs(self.SERVICE1["name"])
self.assertEqual(len(service1), 1) self.assertEqual(len(service1), 1)

View file

@ -136,11 +136,15 @@ class TestLoadConfig:
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--acl 'acl'",
"--preload_obj '13'", "--preload_obj '13'",
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -151,6 +155,7 @@ class TestLoadConfig:
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
"PREPARE_LOCALLY": True, "PREPARE_LOCALLY": True,
} }
@ -167,6 +172,10 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -184,6 +193,7 @@ class TestLoadConfig:
"TIME_UNIT": "time_unit", "TIME_UNIT": "time_unit",
"WRITE_RATE": 10, "WRITE_RATE": 10,
"READ_RATE": 9, "READ_RATE": 9,
"READ_AGE": 8,
"DELETE_RATE": 11, "DELETE_RATE": 11,
"PREPARE_LOCALLY": True, "PREPARE_LOCALLY": True,
} }
@ -201,6 +211,9 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--buckets '13'", "--buckets '13'",
"--location 's3_location'", "--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -211,6 +224,7 @@ class TestLoadConfig:
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"NO_VERIFY_SSL": True, "NO_VERIFY_SSL": True,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
} }
@ -218,6 +232,45 @@ class TestLoadConfig:
self._check_preset_params(load_params, expected_preset_args) self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True)
def test_argument_parsing_for_s3_car_scenario_with_stringed_time(self, load_params: LoadParams):
load_params.load_time = "2d3h5min"
expected_preset_args = [
"--size '11'",
"--preload_obj '13'",
"--no-verify-ssl",
"--out 'pregen_json'",
"--workers '7'",
"--buckets '13'",
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"DURATION": 183900,
"WRITE_OBJ_SIZE": 11,
"REGISTRY_FILE": "registry_file",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout",
"NO_VERIFY_SSL": True,
"MAX_WRITERS": 11,
"MAX_READERS": 11,
"MAX_DELETERS": 12,
"PRE_ALLOC_DELETERS": 21,
"PRE_ALLOC_READERS": 20,
"PRE_ALLOC_WRITERS": 20,
"PREGEN_JSON": "pregen_json",
"TIME_UNIT": "time_unit",
"WRITE_RATE": 10,
"READ_RATE": 9,
"READ_AGE": 8,
"DELETE_RATE": 11,
}
self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True)
def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams):
expected_preset_args = [ expected_preset_args = [
@ -228,6 +281,9 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--buckets '13'", "--buckets '13'",
"--location 's3_location'", "--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -246,6 +302,7 @@ class TestLoadConfig:
"TIME_UNIT": "time_unit", "TIME_UNIT": "time_unit",
"WRITE_RATE": 10, "WRITE_RATE": 10,
"READ_RATE": 9, "READ_RATE": 9,
"READ_AGE": 8,
"DELETE_RATE": 11, "DELETE_RATE": 11,
} }
@ -254,6 +311,7 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [ expected_preset_args = [
"--no-verify-ssl", "--no-verify-ssl",
"--size '11'", "--size '11'",
@ -262,6 +320,9 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -273,6 +334,7 @@ class TestLoadConfig:
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
} }
@ -281,6 +343,7 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--preload_obj '13'", "--preload_obj '13'",
@ -288,6 +351,9 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_FILE": "config_file", "CONFIG_FILE": "config_file",
@ -299,6 +365,7 @@ class TestLoadConfig:
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
} }
@ -338,6 +405,8 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -348,6 +417,7 @@ class TestLoadConfig:
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"PREGEN_JSON": "", "PREGEN_JSON": "",
"PREPARE_LOCALLY": False, "PREPARE_LOCALLY": False,
} }
@ -364,6 +434,8 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -382,6 +454,7 @@ class TestLoadConfig:
"WRITE_RATE": 0, "WRITE_RATE": 0,
"READ_RATE": 0, "READ_RATE": 0,
"DELETE_RATE": 0, "DELETE_RATE": 0,
"READ_AGE": 0,
"PREPARE_LOCALLY": False, "PREPARE_LOCALLY": False,
} }
@ -397,6 +470,8 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--buckets '0'", "--buckets '0'",
"--location ''", "--location ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -407,6 +482,7 @@ class TestLoadConfig:
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"NO_VERIFY_SSL": False, "NO_VERIFY_SSL": False,
"PREGEN_JSON": "", "PREGEN_JSON": "",
} }
@ -423,6 +499,8 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--buckets '0'", "--buckets '0'",
"--location ''", "--location ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -442,6 +520,7 @@ class TestLoadConfig:
"WRITE_RATE": 0, "WRITE_RATE": 0,
"READ_RATE": 0, "READ_RATE": 0,
"DELETE_RATE": 0, "DELETE_RATE": 0,
"READ_AGE": 0,
} }
self._check_preset_params(load_params, expected_preset_args) self._check_preset_params(load_params, expected_preset_args)
@ -456,6 +535,8 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -467,6 +548,7 @@ class TestLoadConfig:
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"PREGEN_JSON": "", "PREGEN_JSON": "",
} }
@ -482,6 +564,8 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_FILE": "", "CONFIG_FILE": "",
@ -493,6 +577,7 @@ class TestLoadConfig:
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"PREGEN_JSON": "", "PREGEN_JSON": "",
} }
@ -531,6 +616,27 @@ class TestLoadConfig:
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize(
"load_params, load_type",
[(LoadScenario.gRPC, LoadType.gRPC)],
indirect=True,
)
@pytest.mark.parametrize(
"load_time, expected_seconds",
[
(300, 300),
("2d3h45min", 186300),
("1d6h", 108000),
("1d", 86400),
("1d1min", 86460),
("2h", 7200),
("2h2min", 7320),
],
)
def test_convert_time_to_seconds(self, load_params: LoadParams, load_time: str | int, expected_seconds: int):
load_params.load_time = load_time
assert load_params.load_time == expected_seconds
def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]):
preset_parameters = load_params.get_preset_arguments() preset_parameters = load_params.get_preset_arguments()
assert sorted(preset_parameters) == sorted(expected_preset_args) assert sorted(preset_parameters) == sorted(expected_preset_args)