forked from TrueCloudLab/frostfs-testlib
Compare commits
10 commits
master
...
support/v0
Author | SHA1 | Date | |
---|---|---|---|
180907b154 | |||
3dc29d59f6 | |||
aa8c83b682 | |||
e765276c3f | |||
b464591153 | |||
c978f55e90 | |||
2255ee465f | |||
2ec24f4cd1 | |||
2da1a4583f | |||
cda3773fa8 |
18 changed files with 110 additions and 49 deletions
|
@ -65,7 +65,6 @@ class FrostfsCliContainer(CliCommand):
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
force: bool = False,
|
force: bool = False,
|
||||||
timeout: Optional[str] = None,
|
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
Delete an existing container.
|
Delete an existing container.
|
||||||
|
@ -81,7 +80,6 @@ class FrostfsCliContainer(CliCommand):
|
||||||
ttl: TTL value in request meta header (default 2).
|
ttl: TTL value in request meta header (default 2).
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
timeout: Timeout for the operation (default 15s).
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -298,9 +296,5 @@ class FrostfsCliContainer(CliCommand):
|
||||||
|
|
||||||
return self._execute(
|
return self._execute(
|
||||||
f"container nodes {from_str}",
|
f"container nodes {from_str}",
|
||||||
**{
|
**{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]},
|
||||||
param: value
|
|
||||||
for param, value in locals().items()
|
|
||||||
if param not in ["self", "from_file", "from_str"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -124,9 +124,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"object hash",
|
"object hash",
|
||||||
**{
|
**{param: value for param, value in locals().items() if param not in ["self", "params"]},
|
||||||
param: value for param, value in locals().items() if param not in ["self", "params"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def head(
|
def head(
|
||||||
|
@ -355,8 +353,8 @@ class FrostfsCliObject(CliCommand):
|
||||||
def nodes(
|
def nodes(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
generate_key: Optional = None,
|
generate_key: Optional = None,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
class Options:
|
class Options:
|
||||||
DEFAULT_SHELL_TIMEOUT = 90
|
DEFAULT_SHELL_TIMEOUT = 120
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_default_shell_timeout():
|
def get_default_shell_timeout():
|
||||||
|
|
|
@ -50,6 +50,7 @@ class SummarizedStats:
|
||||||
throughput: float = field(default_factory=float)
|
throughput: float = field(default_factory=float)
|
||||||
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
|
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
|
||||||
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
|
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
|
||||||
|
total_bytes: int = field(default_factory=int)
|
||||||
passed: bool = True
|
passed: bool = True
|
||||||
|
|
||||||
def calc_stats(self):
|
def calc_stats(self):
|
||||||
|
@ -85,6 +86,7 @@ class SummarizedStats:
|
||||||
target.latencies.by_node[node_key] = operation.latency
|
target.latencies.by_node[node_key] = operation.latency
|
||||||
target.throughput += operation.throughput
|
target.throughput += operation.throughput
|
||||||
target.errors.threshold = load_params.error_threshold
|
target.errors.threshold = load_params.error_threshold
|
||||||
|
target.total_bytes = operation.total_bytes
|
||||||
if operation.failed_iterations:
|
if operation.failed_iterations:
|
||||||
target.errors.by_node[node_key] = operation.failed_iterations
|
target.errors.by_node[node_key] = operation.failed_iterations
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import math
|
||||||
import os
|
import os
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from threading import Event
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
@ -73,14 +74,16 @@ class K6:
|
||||||
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id)
|
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id)
|
||||||
|
|
||||||
def _get_fill_percents(self):
|
def _get_fill_percents(self):
|
||||||
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n")
|
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split(
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
return [line.split() for line in fill_percents][:-1]
|
return [line.split() for line in fill_percents][:-1]
|
||||||
|
|
||||||
def check_fill_percent(self):
|
def check_fill_percent(self):
|
||||||
fill_percents = self._get_fill_percents()
|
fill_percents = self._get_fill_percents()
|
||||||
percent_mean = 0
|
percent_mean = 0
|
||||||
for line in fill_percents:
|
for line in fill_percents:
|
||||||
percent_mean += float(line[1].split('%')[0])
|
percent_mean += float(line[1].split("%")[0])
|
||||||
percent_mean = percent_mean / len(fill_percents)
|
percent_mean = percent_mean / len(fill_percents)
|
||||||
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
|
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
|
||||||
return percent_mean >= self.load_params.fill_percent
|
return percent_mean >= self.load_params.fill_percent
|
||||||
|
@ -145,7 +148,7 @@ class K6:
|
||||||
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
self._k6_process.start()
|
self._k6_process.start()
|
||||||
|
|
||||||
def wait_until_finished(self, event, soft_timeout: int = 0) -> None:
|
def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None:
|
||||||
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
if self.load_params.scenario == LoadScenario.VERIFY:
|
if self.load_params.scenario == LoadScenario.VERIFY:
|
||||||
timeout = self.load_params.verify_time or 0
|
timeout = self.load_params.verify_time or 0
|
||||||
|
@ -188,23 +191,25 @@ class K6:
|
||||||
wait_interval = min_wait_interval
|
wait_interval = min_wait_interval
|
||||||
if self._k6_process is None:
|
if self._k6_process is None:
|
||||||
assert "No k6 instances were executed"
|
assert "No k6 instances were executed"
|
||||||
|
|
||||||
while timeout > 0:
|
while timeout > 0:
|
||||||
if not self.load_params.fill_percent is None:
|
if not self.load_params.fill_percent is None:
|
||||||
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
|
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
|
||||||
if self.check_fill_percent():
|
if self.check_fill_percent():
|
||||||
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%")
|
logger.info(
|
||||||
|
f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%"
|
||||||
|
)
|
||||||
event.set()
|
event.set()
|
||||||
self.stop()
|
self.stop()
|
||||||
return
|
return
|
||||||
|
|
||||||
if event.is_set():
|
if event.is_set():
|
||||||
self.stop()
|
self.stop()
|
||||||
return
|
return
|
||||||
|
|
||||||
if not self._k6_process.running():
|
if not self._k6_process.running():
|
||||||
return
|
return
|
||||||
|
|
||||||
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
|
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
|
||||||
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
|
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
|
||||||
logger.info(
|
logger.info(
|
||||||
|
|
|
@ -147,6 +147,8 @@ class Preset:
|
||||||
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
|
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
|
||||||
# Workers count for preset
|
# Workers count for preset
|
||||||
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
|
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
|
||||||
|
# Acl for container/buckets
|
||||||
|
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
|
||||||
|
|
||||||
# ------ GRPC ------
|
# ------ GRPC ------
|
||||||
# Amount of containers which should be created
|
# Amount of containers which should be created
|
||||||
|
@ -166,6 +168,9 @@ class Preset:
|
||||||
# Flag to control preset erorrs
|
# Flag to control preset erorrs
|
||||||
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
|
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
|
||||||
|
|
||||||
|
# Flag to ensure created containers store data on local endpoints
|
||||||
|
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class LoadParams:
|
class LoadParams:
|
||||||
|
@ -232,6 +237,8 @@ class LoadParams:
|
||||||
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
|
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
|
||||||
# In case if we want to use custom registry file left from another load run
|
# In case if we want to use custom registry file left from another load run
|
||||||
custom_registry: Optional[str] = None
|
custom_registry: Optional[str] = None
|
||||||
|
# In case if we want to use custom registry file left from another load run
|
||||||
|
force_fresh_registry: Optional[bool] = None
|
||||||
# Specifies the minimum duration of every single execution (i.e. iteration).
|
# Specifies the minimum duration of every single execution (i.e. iteration).
|
||||||
# Any iterations that are shorter than this value will cause that VU to
|
# Any iterations that are shorter than this value will cause that VU to
|
||||||
# sleep for the remainder of the time until the specified minimum duration is reached.
|
# sleep for the remainder of the time until the specified minimum duration is reached.
|
||||||
|
|
|
@ -39,6 +39,10 @@ class OperationMetric(ABC):
|
||||||
def throughput(self) -> float:
|
def throughput(self) -> float:
|
||||||
return self._get_metric_rate(self._THROUGHPUT)
|
return self._get_metric_rate(self._THROUGHPUT)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def total_bytes(self) -> float:
|
||||||
|
return self._get_metric(self._THROUGHPUT)
|
||||||
|
|
||||||
def _get_metric(self, metric: str) -> int:
|
def _get_metric(self, metric: str) -> int:
|
||||||
metrics_method_map = {
|
metrics_method_map = {
|
||||||
"counter": self._get_counter_metric,
|
"counter": self._get_counter_metric,
|
||||||
|
@ -107,66 +111,66 @@ class DeleteOperationMetric(OperationMetric):
|
||||||
|
|
||||||
|
|
||||||
class GrpcWriteOperationMetric(WriteOperationMetric):
|
class GrpcWriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "frostfs_obj_put_total"
|
_SUCCESS = "frostfs_obj_put_success"
|
||||||
_ERRORS = "frostfs_obj_put_fails"
|
_ERRORS = "frostfs_obj_put_fails"
|
||||||
_LATENCY = "frostfs_obj_put_duration"
|
_LATENCY = "frostfs_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class GrpcReadOperationMetric(ReadOperationMetric):
|
class GrpcReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "frostfs_obj_get_total"
|
_SUCCESS = "frostfs_obj_get_success"
|
||||||
_ERRORS = "frostfs_obj_get_fails"
|
_ERRORS = "frostfs_obj_get_fails"
|
||||||
_LATENCY = "frostfs_obj_get_duration"
|
_LATENCY = "frostfs_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
class GrpcDeleteOperationMetric(DeleteOperationMetric):
|
class GrpcDeleteOperationMetric(DeleteOperationMetric):
|
||||||
_SUCCESS = "frostfs_obj_delete_total"
|
_SUCCESS = "frostfs_obj_delete_success"
|
||||||
_ERRORS = "frostfs_obj_delete_fails"
|
_ERRORS = "frostfs_obj_delete_fails"
|
||||||
_LATENCY = "frostfs_obj_delete_duration"
|
_LATENCY = "frostfs_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3WriteOperationMetric(WriteOperationMetric):
|
class S3WriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "aws_obj_put_total"
|
_SUCCESS = "aws_obj_put_success"
|
||||||
_ERRORS = "aws_obj_put_fails"
|
_ERRORS = "aws_obj_put_fails"
|
||||||
_LATENCY = "aws_obj_put_duration"
|
_LATENCY = "aws_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3ReadOperationMetric(ReadOperationMetric):
|
class S3ReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "aws_obj_get_total"
|
_SUCCESS = "aws_obj_get_success"
|
||||||
_ERRORS = "aws_obj_get_fails"
|
_ERRORS = "aws_obj_get_fails"
|
||||||
_LATENCY = "aws_obj_get_duration"
|
_LATENCY = "aws_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3DeleteOperationMetric(DeleteOperationMetric):
|
class S3DeleteOperationMetric(DeleteOperationMetric):
|
||||||
_SUCCESS = "aws_obj_delete_total"
|
_SUCCESS = "aws_obj_delete_success"
|
||||||
_ERRORS = "aws_obj_delete_fails"
|
_ERRORS = "aws_obj_delete_fails"
|
||||||
_LATENCY = "aws_obj_delete_duration"
|
_LATENCY = "aws_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3LocalWriteOperationMetric(WriteOperationMetric):
|
class S3LocalWriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "s3local_obj_put_total"
|
_SUCCESS = "s3local_obj_put_success"
|
||||||
_ERRORS = "s3local_obj_put_fails"
|
_ERRORS = "s3local_obj_put_fails"
|
||||||
_LATENCY = "s3local_obj_put_duration"
|
_LATENCY = "s3local_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3LocalReadOperationMetric(ReadOperationMetric):
|
class S3LocalReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "s3local_obj_get_total"
|
_SUCCESS = "s3local_obj_get_success"
|
||||||
_ERRORS = "s3local_obj_get_fails"
|
_ERRORS = "s3local_obj_get_fails"
|
||||||
_LATENCY = "s3local_obj_get_duration"
|
_LATENCY = "s3local_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
class LocalWriteOperationMetric(WriteOperationMetric):
|
class LocalWriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "local_obj_put_total"
|
_SUCCESS = "local_obj_put_success"
|
||||||
_ERRORS = "local_obj_put_fails"
|
_ERRORS = "local_obj_put_fails"
|
||||||
_LATENCY = "local_obj_put_duration"
|
_LATENCY = "local_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class LocalReadOperationMetric(ReadOperationMetric):
|
class LocalReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "local_obj_get_total"
|
_SUCCESS = "local_obj_get_success"
|
||||||
_ERRORS = "local_obj_get_fails"
|
_ERRORS = "local_obj_get_fails"
|
||||||
|
|
||||||
|
|
||||||
class LocalDeleteOperationMetric(DeleteOperationMetric):
|
class LocalDeleteOperationMetric(DeleteOperationMetric):
|
||||||
_SUCCESS = "local_obj_delete_total"
|
_SUCCESS = "local_obj_delete_success"
|
||||||
_ERRORS = "local_obj_delete_fails"
|
_ERRORS = "local_obj_delete_fails"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -120,6 +120,11 @@ class LoadReport:
|
||||||
throughput, unit = calc_unit(stats.throughput)
|
throughput, unit = calc_unit(stats.throughput)
|
||||||
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
||||||
|
|
||||||
|
bytes_html = ""
|
||||||
|
if stats.total_bytes > 0:
|
||||||
|
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
|
||||||
|
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
|
||||||
|
|
||||||
per_node_errors_html = ""
|
per_node_errors_html = ""
|
||||||
for node_key, errors in stats.errors.by_node.items():
|
for node_key, errors in stats.errors.by_node.items():
|
||||||
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
|
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
|
||||||
|
@ -148,6 +153,7 @@ class LoadReport:
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
|
||||||
{self._row("Total operations", stats.operations)}
|
{self._row("Total operations", stats.operations)}
|
||||||
{self._row("OP/sec", f"{stats.rate:.2f}")}
|
{self._row("OP/sec", f"{stats.rate:.2f}")}
|
||||||
|
{bytes_html}
|
||||||
{throughput_html}
|
{throughput_html}
|
||||||
{latency_html}
|
{latency_html}
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
||||||
|
|
|
@ -57,6 +57,8 @@ class LoadVerifier:
|
||||||
invalid_objects = verify_metrics.read.failed_iterations
|
invalid_objects = verify_metrics.read.failed_iterations
|
||||||
total_left_objects = load_metrics.write.success_iterations - delete_success
|
total_left_objects = load_metrics.write.success_iterations - delete_success
|
||||||
|
|
||||||
|
if invalid_objects > 0:
|
||||||
|
issues.append(f"There were {invalid_objects} verification fails (hash mismatch).")
|
||||||
# Due to interruptions we may see total verified objects to be less than written on writers count
|
# Due to interruptions we may see total verified objects to be less than written on writers count
|
||||||
if abs(total_left_objects - verified_objects) > writers:
|
if abs(total_left_objects - verified_objects) > writers:
|
||||||
issues.append(
|
issues.append(
|
||||||
|
|
|
@ -4,6 +4,7 @@ import math
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
from dataclasses import fields
|
from dataclasses import fields
|
||||||
|
from threading import Event
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
@ -30,7 +31,6 @@ from frostfs_testlib.testing import parallel, run_optionally
|
||||||
from frostfs_testlib.testing.test_control import retry
|
from frostfs_testlib.testing.test_control import retry
|
||||||
from frostfs_testlib.utils import datetime_utils
|
from frostfs_testlib.utils import datetime_utils
|
||||||
from frostfs_testlib.utils.file_keeper import FileKeeper
|
from frostfs_testlib.utils.file_keeper import FileKeeper
|
||||||
from threading import Event
|
|
||||||
|
|
||||||
|
|
||||||
class RunnerBase(ScenarioRunner):
|
class RunnerBase(ScenarioRunner):
|
||||||
|
@ -78,6 +78,10 @@ class DefaultRunner(RunnerBase):
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
):
|
):
|
||||||
|
if load_params.force_fresh_registry and load_params.custom_registry:
|
||||||
|
with reporter.step("Forcing fresh registry files"):
|
||||||
|
parallel(self._force_fresh_registry, self.loaders, load_params)
|
||||||
|
|
||||||
if load_params.load_type != LoadType.S3:
|
if load_params.load_type != LoadType.S3:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -88,6 +92,11 @@ class DefaultRunner(RunnerBase):
|
||||||
|
|
||||||
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
|
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
|
||||||
|
|
||||||
|
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
|
||||||
|
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
|
||||||
|
shell = loader.get_shell()
|
||||||
|
shell.exec(f"rm -f {load_params.registry_file}")
|
||||||
|
|
||||||
def _prepare_loader(
|
def _prepare_loader(
|
||||||
self,
|
self,
|
||||||
loader: Loader,
|
loader: Loader,
|
||||||
|
@ -314,7 +323,7 @@ class LocalRunner(RunnerBase):
|
||||||
with reporter.step("Download K6"):
|
with reporter.step("Download K6"):
|
||||||
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
|
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
|
||||||
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
|
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
|
||||||
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}")
|
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
|
||||||
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
||||||
|
|
||||||
with reporter.step("Create empty_passwd"):
|
with reporter.step("Create empty_passwd"):
|
||||||
|
|
|
@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
|
||||||
# Config for frostfs-adm utility. Optional if tests are running against devenv
|
# Config for frostfs-adm utility. Optional if tests are running against devenv
|
||||||
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
|
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
|
||||||
|
|
||||||
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)
|
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s")
|
||||||
|
|
|
@ -719,7 +719,10 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
|
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
|
||||||
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||||
)
|
)
|
||||||
self.local_shell.exec(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
|
response = self._to_json(output)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
@reporter.step("Put object lock configuration")
|
@reporter.step("Put object lock configuration")
|
||||||
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
|
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
|
||||||
|
|
|
@ -554,6 +554,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
)
|
)
|
||||||
log_command_execution("S3 Complete multipart upload", response)
|
log_command_execution("S3 Complete multipart upload", response)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
@reporter.step("Put object retention")
|
@reporter.step("Put object retention")
|
||||||
@report_error
|
@report_error
|
||||||
def put_object_retention(
|
def put_object_retention(
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
import requests
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
||||||
|
@ -291,18 +292,17 @@ def delete_container(
|
||||||
force: bool = False,
|
force: bool = False,
|
||||||
session_token: Optional[str] = None,
|
session_token: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
A wrapper for `frostfs-cli container delete` call.
|
A wrapper for `frostfs-cli container delete` call.
|
||||||
Args:
|
Args:
|
||||||
|
await_mode: Block execution until container is removed.
|
||||||
wallet (str): path to a wallet on whose behalf we delete the container
|
wallet (str): path to a wallet on whose behalf we delete the container
|
||||||
cid (str): ID of the container to delete
|
cid (str): ID of the container to delete
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
force (bool): do not check whether container contains locks and remove immediately
|
force (bool): do not check whether container contains locks and remove immediately
|
||||||
session_token: a path to session token file
|
session_token: a path to session token file
|
||||||
timeout: Timeout for the operation.
|
|
||||||
This function doesn't return anything.
|
This function doesn't return anything.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -314,7 +314,6 @@ def delete_container(
|
||||||
force=force,
|
force=force,
|
||||||
session=session_token,
|
session=session_token,
|
||||||
await_mode=await_mode,
|
await_mode=await_mode,
|
||||||
timeout=timeout,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -732,23 +732,24 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
|
||||||
@reporter.step("Search object nodes")
|
@reporter.step("Search object nodes")
|
||||||
def get_object_nodes(
|
def get_object_nodes(
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
wallet: str,
|
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
alive_node: ClusterNode,
|
||||||
endpoint: str,
|
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
is_direct: bool = False,
|
is_direct: bool = False,
|
||||||
verify_presence_all: bool = False,
|
verify_presence_all: bool = False,
|
||||||
wallet_config: Optional[str] = None,
|
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
) -> list[ClusterNode]:
|
) -> list[ClusterNode]:
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
shell = alive_node.host.get_shell()
|
||||||
|
endpoint = alive_node.storage_node.get_rpc_endpoint()
|
||||||
|
wallet = alive_node.storage_node.get_remote_wallet_path()
|
||||||
|
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
|
||||||
|
|
||||||
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
|
||||||
|
|
||||||
result_object_nodes = cli.object.nodes(
|
result_object_nodes = cli.object.nodes(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
wallet=wallet,
|
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
|
|
@ -8,7 +8,8 @@ class ConfigAttributes:
|
||||||
SHARD_CONFIG_PATH = "shard_config_path"
|
SHARD_CONFIG_PATH = "shard_config_path"
|
||||||
LOGGER_CONFIG_PATH = "logger_config_path"
|
LOGGER_CONFIG_PATH = "logger_config_path"
|
||||||
LOCAL_WALLET_PATH = "local_wallet_path"
|
LOCAL_WALLET_PATH = "local_wallet_path"
|
||||||
LOCAL_WALLET_CONFIG = "local_config_path"
|
LOCAL_WALLET_CONFIG = "local_wallet_config_path"
|
||||||
|
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
|
||||||
ENDPOINT_DATA_0 = "endpoint_data0"
|
ENDPOINT_DATA_0 = "endpoint_data0"
|
||||||
ENDPOINT_DATA_1 = "endpoint_data1"
|
ENDPOINT_DATA_1 = "endpoint_data1"
|
||||||
ENDPOINT_INTERNAL = "endpoint_internal0"
|
ENDPOINT_INTERNAL = "endpoint_internal0"
|
||||||
|
|
|
@ -114,6 +114,14 @@ class NodeBase(HumanReadableABC):
|
||||||
ConfigAttributes.CONFIG_PATH,
|
ConfigAttributes.CONFIG_PATH,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def get_remote_wallet_config_path(self) -> str:
|
||||||
|
"""
|
||||||
|
Returns node config file path located on remote host
|
||||||
|
"""
|
||||||
|
return self._get_attribute(
|
||||||
|
ConfigAttributes.REMOTE_WALLET_CONFIG,
|
||||||
|
)
|
||||||
|
|
||||||
def get_wallet_config_path(self) -> str:
|
def get_wallet_config_path(self) -> str:
|
||||||
return self._get_attribute(
|
return self._get_attribute(
|
||||||
ConfigAttributes.LOCAL_WALLET_CONFIG,
|
ConfigAttributes.LOCAL_WALLET_CONFIG,
|
||||||
|
@ -125,8 +133,11 @@ class NodeBase(HumanReadableABC):
|
||||||
Returns config path for logger located on remote host
|
Returns config path for logger located on remote host
|
||||||
"""
|
"""
|
||||||
config_attributes = self.host.get_service_config(self.name)
|
config_attributes = self.host.get_service_config(self.name)
|
||||||
return self._get_attribute(
|
return (
|
||||||
ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None
|
self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH)
|
||||||
|
if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def config_dir(self) -> str:
|
def config_dir(self) -> str:
|
||||||
|
|
|
@ -136,6 +136,7 @@ class TestLoadConfig:
|
||||||
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
|
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
|
||||||
expected_preset_args = [
|
expected_preset_args = [
|
||||||
"--size '11'",
|
"--size '11'",
|
||||||
|
"--acl 'acl'",
|
||||||
"--preload_obj '13'",
|
"--preload_obj '13'",
|
||||||
"--out 'pregen_json'",
|
"--out 'pregen_json'",
|
||||||
"--workers '7'",
|
"--workers '7'",
|
||||||
|
@ -143,6 +144,7 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
|
"--local",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
|
@ -172,6 +174,8 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
|
"--local",
|
||||||
|
"--acl 'acl'",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
|
@ -209,6 +213,7 @@ class TestLoadConfig:
|
||||||
"--location 's3_location'",
|
"--location 's3_location'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
|
"--acl 'acl'",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
|
@ -240,6 +245,7 @@ class TestLoadConfig:
|
||||||
"--location 's3_location'",
|
"--location 's3_location'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
|
"--acl 'acl'",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 183900,
|
"DURATION": 183900,
|
||||||
|
@ -277,6 +283,7 @@ class TestLoadConfig:
|
||||||
"--location 's3_location'",
|
"--location 's3_location'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
|
"--acl 'acl'",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
|
@ -304,6 +311,7 @@ class TestLoadConfig:
|
||||||
|
|
||||||
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
|
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
|
||||||
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
|
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
|
||||||
|
load_params.preset.local = False
|
||||||
expected_preset_args = [
|
expected_preset_args = [
|
||||||
"--no-verify-ssl",
|
"--no-verify-ssl",
|
||||||
"--size '11'",
|
"--size '11'",
|
||||||
|
@ -314,6 +322,7 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
|
"--acl 'acl'",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
|
@ -334,6 +343,7 @@ class TestLoadConfig:
|
||||||
|
|
||||||
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
|
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
|
||||||
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
|
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
|
||||||
|
load_params.preset.local = False
|
||||||
expected_preset_args = [
|
expected_preset_args = [
|
||||||
"--size '11'",
|
"--size '11'",
|
||||||
"--preload_obj '13'",
|
"--preload_obj '13'",
|
||||||
|
@ -343,6 +353,7 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
|
"--acl 'acl'",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"CONFIG_FILE": "config_file",
|
"CONFIG_FILE": "config_file",
|
||||||
|
@ -395,6 +406,7 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
|
"--acl ''",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
|
@ -423,6 +435,7 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
|
"--acl ''",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
|
@ -458,6 +471,7 @@ class TestLoadConfig:
|
||||||
"--buckets '0'",
|
"--buckets '0'",
|
||||||
"--location ''",
|
"--location ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
|
"--acl ''",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
|
@ -486,6 +500,7 @@ class TestLoadConfig:
|
||||||
"--buckets '0'",
|
"--buckets '0'",
|
||||||
"--location ''",
|
"--location ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
|
"--acl ''",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
|
@ -521,6 +536,7 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
|
"--acl ''",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
|
@ -549,6 +565,7 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
|
"--acl ''",
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"CONFIG_FILE": "",
|
"CONFIG_FILE": "",
|
||||||
|
|
Loading…
Reference in a new issue