[#133] Change reporter usage

Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
Andrey Berezin 2023-11-29 15:27:17 +03:00
parent 39a17f3634
commit dc6b0e407f
37 changed files with 478 additions and 678 deletions

View file

@ -8,10 +8,10 @@ from time import sleep
from typing import Any
from urllib.parse import urlparse
from frostfs_testlib import reporter
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
from frostfs_testlib.processes.remote_process import RemoteProcess
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
from frostfs_testlib.shell import Shell
@ -21,7 +21,6 @@ from frostfs_testlib.testing.test_control import wait_for_success
EXIT_RESULT_CODE = 0
logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@dataclass
@ -102,7 +101,7 @@ class K6:
self.preset_output = result.stdout.strip("\n")
return self.preset_output
@reporter.step_deco("Generate K6 command")
@reporter.step("Generate K6 command")
def _generate_env_variables(self) -> str:
env_vars = self.load_params.get_env_vars()
@ -216,7 +215,7 @@ class K6:
return self._k6_process.running()
return False
@reporter.step_deco("Wait until K6 process end")
@reporter.step("Wait until K6 process end")
@wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout")
def _wait_until_process_end(self):
return self._k6_process.running()

View file

@ -1,11 +1,6 @@
import logging
from frostfs_testlib import reporter
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
from frostfs_testlib.reporter import get_reporter
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
class LoadVerifier:
@ -49,19 +44,11 @@ class LoadVerifier:
if deleters and not delete_operations:
issues.append(f"No any delete operation was performed")
if (
write_operations
and writers
and write_errors / write_operations * 100 > self.load_params.error_threshold
):
if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold:
issues.append(
f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}"
)
if (
read_operations
and readers
and read_errors / read_operations * 100 > self.load_params.error_threshold
):
if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold:
issues.append(
f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}"
)
@ -89,9 +76,7 @@ class LoadVerifier:
)
return verify_issues
def _collect_verify_issues_on_process(
self, label, load_summary, verification_summary
) -> list[str]:
def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]:
issues = []
load_metrics = get_metrics_object(self.load_params.scenario, load_summary)

View file

@ -9,13 +9,13 @@ from urllib.parse import urlparse
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import STORAGE_USER_NAME
@ -31,17 +31,15 @@ from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper
reporter = get_reporter()
class RunnerBase(ScenarioRunner):
k6_instances: list[K6]
@reporter.step_deco("Run preset on loaders")
@reporter.step("Run preset on loaders")
def preset(self):
parallel([k6.preset for k6 in self.k6_instances])
@reporter.step_deco("Wait until load finish")
@reporter.step("Wait until load finish")
def wait_until_finish(self, soft_timeout: int = 0):
parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout)
@ -70,7 +68,7 @@ class DefaultRunner(RunnerBase):
self.loaders_wallet = loaders_wallet
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Preparation steps")
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
@ -127,7 +125,7 @@ class DefaultRunner(RunnerBase):
]
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step_deco("Init k6 instances")
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
cycled_loaders = itertools.cycle(self.loaders)
@ -271,7 +269,7 @@ class LocalRunner(RunnerBase):
self.nodes_under_load = nodes_under_load
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Preparation steps")
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
@ -298,7 +296,7 @@ class LocalRunner(RunnerBase):
return True
@reporter.step_deco("Prepare node {cluster_node}")
@reporter.step("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams):
shell = cluster_node.host.get_shell()
@ -323,7 +321,7 @@ class LocalRunner(RunnerBase):
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
@reporter.step_deco("Init k6 instances")
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
futures = parallel(
@ -369,12 +367,12 @@ class LocalRunner(RunnerBase):
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
time.sleep(wait_after_start_time)
@reporter.step_deco("Restore passwd on {cluster_node}")
@reporter.step("Restore passwd on {cluster_node}")
def restore_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("sudo chattr -i /etc/passwd")
@reporter.step_deco("Lock passwd on {cluster_node}")
@reporter.step("Lock passwd on {cluster_node}")
def lock_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("sudo chattr +i /etc/passwd")
@ -400,19 +398,19 @@ class S3LocalRunner(LocalRunner):
endpoints: list[str]
k6_dir: str
@reporter.step_deco("Run preset on loaders")
@reporter.step("Run preset on loaders")
def preset(self):
LocalRunner.preset(self)
with reporter.step(f"Resolve containers in preset"):
parallel(self._resolve_containers_in_preset, self.k6_instances)
@reporter.step_deco("Resolve containers in preset")
@reporter.step("Resolve containers in preset")
def _resolve_containers_in_preset(self, k6_instance: K6):
k6_instance.shell.exec(
f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}"
)
@reporter.step_deco("Init k6 instances")
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
futures = parallel(
@ -448,7 +446,7 @@ class S3LocalRunner(LocalRunner):
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Preparation steps")
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
@ -464,7 +462,7 @@ class S3LocalRunner(LocalRunner):
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer)
@reporter.step_deco("Prepare node {cluster_node}")
@reporter.step("Prepare node {cluster_node}")
def prepare_node(
self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str
):