Compare commits

...

10 commits

Author SHA1 Message Date
180907b154 [#226] Restore invalid_obj check
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-05-16 09:57:13 +00:00
3dc29d59f6 [#212] Return response in complete_multipart_upload function 2024-04-23 23:57:32 +03:00
aa8c83b682 port fix fill_percent
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-03-28 14:54:39 +03:00
e765276c3f [#187] Add total bytes to report to 0.38
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-27 12:03:20 +03:00
b464591153 [TrueCloudLab/xk6-frostfs#125] Add acl option
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:50:38 +00:00
c978f55e90 [#170] Update metrics
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:42:37 +00:00
2255ee465f [#173] Add flag to remove registry file
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 12:41:29 +03:00
2ec24f4cd1 [#168] Strip components for new xk6 archive and update unit tests for 0.38
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-26 13:36:44 +03:00
2da1a4583f [#165] Add local flag to preset in load
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-22 19:08:30 +03:00
cda3773fa8 [#163] Refactor frostfs-cli functional
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-01-22 14:26:25 +03:00
18 changed files with 110 additions and 49 deletions

View file

@ -65,7 +65,6 @@ class FrostfsCliContainer(CliCommand):
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Delete an existing container.
@ -81,7 +80,6 @@ class FrostfsCliContainer(CliCommand):
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
@ -298,9 +296,5 @@ class FrostfsCliContainer(CliCommand):
return self._execute(
f"container nodes {from_str}",
**{
param: value
for param, value in locals().items()
if param not in ["self", "from_file", "from_str"]
},
**{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]},
)

View file

@ -124,9 +124,7 @@ class FrostfsCliObject(CliCommand):
"""
return self._execute(
"object hash",
**{
param: value for param, value in locals().items() if param not in ["self", "params"]
},
**{param: value for param, value in locals().items() if param not in ["self", "params"]},
)
def head(
@ -355,8 +353,8 @@ class FrostfsCliObject(CliCommand):
def nodes(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional = None,

View file

@ -1,5 +1,5 @@
class Options:
DEFAULT_SHELL_TIMEOUT = 90
DEFAULT_SHELL_TIMEOUT = 120
@staticmethod
def get_default_shell_timeout():

View file

@ -50,6 +50,7 @@ class SummarizedStats:
throughput: float = field(default_factory=float)
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
total_bytes: int = field(default_factory=int)
passed: bool = True
def calc_stats(self):
@ -85,6 +86,7 @@ class SummarizedStats:
target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold
target.total_bytes = operation.total_bytes
if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations

View file

@ -4,6 +4,7 @@ import math
import os
from dataclasses import dataclass
from datetime import datetime
from threading import Event
from time import sleep
from typing import Any
from urllib.parse import urlparse
@ -73,14 +74,16 @@ class K6:
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id)
def _get_fill_percents(self):
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n")
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split(
"\n"
)
return [line.split() for line in fill_percents][:-1]
def check_fill_percent(self):
fill_percents = self._get_fill_percents()
percent_mean = 0
for line in fill_percents:
percent_mean += float(line[1].split('%')[0])
percent_mean += float(line[1].split("%")[0])
percent_mean = percent_mean / len(fill_percents)
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
return percent_mean >= self.load_params.fill_percent
@ -145,7 +148,7 @@ class K6:
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
self._k6_process.start()
def wait_until_finished(self, event, soft_timeout: int = 0) -> None:
def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None:
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
if self.load_params.scenario == LoadScenario.VERIFY:
timeout = self.load_params.verify_time or 0
@ -193,7 +196,9 @@ class K6:
if not self.load_params.fill_percent is None:
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
if self.check_fill_percent():
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%")
logger.info(
f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%"
)
event.set()
self.stop()
return

View file

@ -147,6 +147,8 @@ class Preset:
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# ------ GRPC ------
# Amount of containers which should be created
@ -166,6 +168,9 @@ class Preset:
# Flag to control preset erorrs
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
# Flag to ensure created containers store data on local endpoints
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
@dataclass
class LoadParams:
@ -232,6 +237,8 @@ class LoadParams:
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
# In case if we want to use custom registry file left from another load run
custom_registry: Optional[str] = None
# In case if we want to use custom registry file left from another load run
force_fresh_registry: Optional[bool] = None
# Specifies the minimum duration of every single execution (i.e. iteration).
# Any iterations that are shorter than this value will cause that VU to
# sleep for the remainder of the time until the specified minimum duration is reached.

View file

@ -39,6 +39,10 @@ class OperationMetric(ABC):
def throughput(self) -> float:
return self._get_metric_rate(self._THROUGHPUT)
@property
def total_bytes(self) -> float:
return self._get_metric(self._THROUGHPUT)
def _get_metric(self, metric: str) -> int:
metrics_method_map = {
"counter": self._get_counter_metric,
@ -107,66 +111,66 @@ class DeleteOperationMetric(OperationMetric):
class GrpcWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "frostfs_obj_put_total"
_SUCCESS = "frostfs_obj_put_success"
_ERRORS = "frostfs_obj_put_fails"
_LATENCY = "frostfs_obj_put_duration"
class GrpcReadOperationMetric(ReadOperationMetric):
_SUCCESS = "frostfs_obj_get_total"
_SUCCESS = "frostfs_obj_get_success"
_ERRORS = "frostfs_obj_get_fails"
_LATENCY = "frostfs_obj_get_duration"
class GrpcDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "frostfs_obj_delete_total"
_SUCCESS = "frostfs_obj_delete_success"
_ERRORS = "frostfs_obj_delete_fails"
_LATENCY = "frostfs_obj_delete_duration"
class S3WriteOperationMetric(WriteOperationMetric):
_SUCCESS = "aws_obj_put_total"
_SUCCESS = "aws_obj_put_success"
_ERRORS = "aws_obj_put_fails"
_LATENCY = "aws_obj_put_duration"
class S3ReadOperationMetric(ReadOperationMetric):
_SUCCESS = "aws_obj_get_total"
_SUCCESS = "aws_obj_get_success"
_ERRORS = "aws_obj_get_fails"
_LATENCY = "aws_obj_get_duration"
class S3DeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "aws_obj_delete_total"
_SUCCESS = "aws_obj_delete_success"
_ERRORS = "aws_obj_delete_fails"
_LATENCY = "aws_obj_delete_duration"
class S3LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "s3local_obj_put_total"
_SUCCESS = "s3local_obj_put_success"
_ERRORS = "s3local_obj_put_fails"
_LATENCY = "s3local_obj_put_duration"
class S3LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "s3local_obj_get_total"
_SUCCESS = "s3local_obj_get_success"
_ERRORS = "s3local_obj_get_fails"
_LATENCY = "s3local_obj_get_duration"
class LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "local_obj_put_total"
_SUCCESS = "local_obj_put_success"
_ERRORS = "local_obj_put_fails"
_LATENCY = "local_obj_put_duration"
class LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "local_obj_get_total"
_SUCCESS = "local_obj_get_success"
_ERRORS = "local_obj_get_fails"
class LocalDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "local_obj_delete_total"
_SUCCESS = "local_obj_delete_success"
_ERRORS = "local_obj_delete_fails"

View file

@ -120,6 +120,11 @@ class LoadReport:
throughput, unit = calc_unit(stats.throughput)
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
bytes_html = ""
if stats.total_bytes > 0:
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
per_node_errors_html = ""
for node_key, errors in stats.errors.by_node.items():
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
@ -148,6 +153,7 @@ class LoadReport:
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
{self._row("Total operations", stats.operations)}
{self._row("OP/sec", f"{stats.rate:.2f}")}
{bytes_html}
{throughput_html}
{latency_html}
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>

View file

@ -57,6 +57,8 @@ class LoadVerifier:
invalid_objects = verify_metrics.read.failed_iterations
total_left_objects = load_metrics.write.success_iterations - delete_success
if invalid_objects > 0:
issues.append(f"There were {invalid_objects} verification fails (hash mismatch).")
# Due to interruptions we may see total verified objects to be less than written on writers count
if abs(total_left_objects - verified_objects) > writers:
issues.append(

View file

@ -4,6 +4,7 @@ import math
import re
import time
from dataclasses import fields
from threading import Event
from typing import Optional
from urllib.parse import urlparse
@ -30,7 +31,6 @@ from frostfs_testlib.testing import parallel, run_optionally
from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper
from threading import Event
class RunnerBase(ScenarioRunner):
@ -78,6 +78,10 @@ class DefaultRunner(RunnerBase):
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
if load_params.force_fresh_registry and load_params.custom_registry:
with reporter.step("Forcing fresh registry files"):
parallel(self._force_fresh_registry, self.loaders, load_params)
if load_params.load_type != LoadType.S3:
return
@ -88,6 +92,11 @@ class DefaultRunner(RunnerBase):
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
shell = loader.get_shell()
shell.exec(f"rm -f {load_params.registry_file}")
def _prepare_loader(
self,
loader: Loader,
@ -314,7 +323,7 @@ class LocalRunner(RunnerBase):
with reporter.step("Download K6"):
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}")
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
shell.exec(f"sudo chmod -R 777 {k6_dir}")
with reporter.step("Create empty_passwd"):

View file

@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
# Config for frostfs-adm utility. Optional if tests are running against devenv
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s")

View file

@ -719,7 +719,10 @@ class AwsCliClient(S3ClientWrapper):
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
)
self.local_shell.exec(cmd)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Put object lock configuration")
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:

View file

@ -554,6 +554,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
)
log_command_execution("S3 Complete multipart upload", response)
return response
@reporter.step("Put object retention")
@report_error
def put_object_retention(

View file

@ -1,11 +1,12 @@
import json
import logging
import re
import requests
from dataclasses import dataclass
from time import sleep
from typing import Optional, Union
import requests
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
@ -291,18 +292,17 @@ def delete_container(
force: bool = False,
session_token: Optional[str] = None,
await_mode: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None:
"""
A wrapper for `frostfs-cli container delete` call.
Args:
await_mode: Block execution until container is removed.
wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file
timeout: Timeout for the operation.
This function doesn't return anything.
"""
@ -314,7 +314,6 @@ def delete_container(
force=force,
session=session_token,
await_mode=await_mode,
timeout=timeout,
)

View file

@ -732,23 +732,24 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
@reporter.step("Search object nodes")
def get_object_nodes(
cluster: Cluster,
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
wallet_config: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
shell = alive_node.host.get_shell()
endpoint = alive_node.storage_node.get_rpc_endpoint()
wallet = alive_node.storage_node.get_remote_wallet_path()
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
result_object_nodes = cli.object.nodes(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,

View file

@ -8,7 +8,8 @@ class ConfigAttributes:
SHARD_CONFIG_PATH = "shard_config_path"
LOGGER_CONFIG_PATH = "logger_config_path"
LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_config_path"
LOCAL_WALLET_CONFIG = "local_wallet_config_path"
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
ENDPOINT_DATA_0 = "endpoint_data0"
ENDPOINT_DATA_1 = "endpoint_data1"
ENDPOINT_INTERNAL = "endpoint_internal0"

View file

@ -114,6 +114,14 @@ class NodeBase(HumanReadableABC):
ConfigAttributes.CONFIG_PATH,
)
def get_remote_wallet_config_path(self) -> str:
"""
Returns node config file path located on remote host
"""
return self._get_attribute(
ConfigAttributes.REMOTE_WALLET_CONFIG,
)
def get_wallet_config_path(self) -> str:
return self._get_attribute(
ConfigAttributes.LOCAL_WALLET_CONFIG,
@ -125,8 +133,11 @@ class NodeBase(HumanReadableABC):
Returns config path for logger located on remote host
"""
config_attributes = self.host.get_service_config(self.name)
return self._get_attribute(
ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None
return (
self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH)
if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes
else None
)
@property
def config_dir(self) -> str:

View file

@ -136,6 +136,7 @@ class TestLoadConfig:
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
expected_preset_args = [
"--size '11'",
"--acl 'acl'",
"--preload_obj '13'",
"--out 'pregen_json'",
"--workers '7'",
@ -143,6 +144,7 @@ class TestLoadConfig:
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
]
expected_env_vars = {
"DURATION": 9,
@ -172,6 +174,8 @@ class TestLoadConfig:
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
"--acl 'acl'",
]
expected_env_vars = {
"DURATION": 9,
@ -209,6 +213,7 @@ class TestLoadConfig:
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"DURATION": 9,
@ -240,6 +245,7 @@ class TestLoadConfig:
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"DURATION": 183900,
@ -277,6 +283,7 @@ class TestLoadConfig:
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"DURATION": 9,
@ -304,6 +311,7 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [
"--no-verify-ssl",
"--size '11'",
@ -314,6 +322,7 @@ class TestLoadConfig:
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"DURATION": 9,
@ -334,6 +343,7 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [
"--size '11'",
"--preload_obj '13'",
@ -343,6 +353,7 @@ class TestLoadConfig:
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"CONFIG_FILE": "config_file",
@ -395,6 +406,7 @@ class TestLoadConfig:
"--containers '0'",
"--policy ''",
"--sleep '0'",
"--acl ''",
]
expected_env_vars = {
"DURATION": 0,
@ -423,6 +435,7 @@ class TestLoadConfig:
"--containers '0'",
"--policy ''",
"--sleep '0'",
"--acl ''",
]
expected_env_vars = {
"DURATION": 0,
@ -458,6 +471,7 @@ class TestLoadConfig:
"--buckets '0'",
"--location ''",
"--sleep '0'",
"--acl ''",
]
expected_env_vars = {
"DURATION": 0,
@ -486,6 +500,7 @@ class TestLoadConfig:
"--buckets '0'",
"--location ''",
"--sleep '0'",
"--acl ''",
]
expected_env_vars = {
"DURATION": 0,
@ -521,6 +536,7 @@ class TestLoadConfig:
"--containers '0'",
"--policy ''",
"--sleep '0'",
"--acl ''",
]
expected_env_vars = {
"DURATION": 0,
@ -549,6 +565,7 @@ class TestLoadConfig:
"--containers '0'",
"--policy ''",
"--sleep '0'",
"--acl ''",
]
expected_env_vars = {
"CONFIG_FILE": "",