forked from TrueCloudLab/frostfs-testcases
Add http benchmark tests
Signed-off-by: a.lipay <a.lipay@yadro.com>
This commit is contained in:
parent
f80a9b7cbe
commit
b8ab64e2c6
4 changed files with 251 additions and 36 deletions
|
@ -9,6 +9,17 @@ from neofs_testlib.shell import Shell
|
||||||
from remote_process import RemoteProcess
|
from remote_process import RemoteProcess
|
||||||
|
|
||||||
EXIT_RESULT_CODE = 0
|
EXIT_RESULT_CODE = 0
|
||||||
|
LOAD_RESULTS_PATTERNS = {
|
||||||
|
"grpc": {
|
||||||
|
"write_ops": r"neofs_obj_put_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
|
||||||
|
"read_ops": r"neofs_obj_get_total\W*\d*\W*(?P<read_ops>\d*\.\d*)",
|
||||||
|
},
|
||||||
|
"s3": {
|
||||||
|
"write_ops": r"aws_obj_put_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
|
||||||
|
"read_ops": r"aws_obj_get_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
|
||||||
|
},
|
||||||
|
"http": {"total_ops": r"http_reqs\W*\d*\W*(?P<total_ops>\d*\.\d*)"},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -27,16 +38,11 @@ class LoadParams:
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class LoadResults:
|
class LoadResults:
|
||||||
latency_write_min: float = 0.0
|
data_sent: float = 0.0
|
||||||
latency_write_max: float = 0.0
|
data_received: float = 0.0
|
||||||
latency_write_med: float = 0.0
|
|
||||||
latency_write_avg: float = 0.0
|
|
||||||
latency_read_min: float = 0.0
|
|
||||||
latency_read_max: float = 0.0
|
|
||||||
latency_read_med: float = 0.0
|
|
||||||
latency_read_avg: float = 0.0
|
|
||||||
read_ops: float = 0.0
|
read_ops: float = 0.0
|
||||||
write_ops: float = 0.0
|
write_ops: float = 0.0
|
||||||
|
total_ops: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
class K6:
|
class K6:
|
||||||
|
@ -176,17 +182,10 @@ class K6:
|
||||||
def parsing_results(self) -> LoadResults:
|
def parsing_results(self) -> LoadResults:
|
||||||
output = self._k6_process.stdout(full=True).replace("\n", "")
|
output = self._k6_process.stdout(full=True).replace("\n", "")
|
||||||
metric_regex_map = {
|
metric_regex_map = {
|
||||||
"latency_write_min": r"neofs_obj_put_duration.*?min=(?P<latency_write_min>\d*\.\d*)",
|
"data_received": r"data_received\W*\d*.\d*.\w*\W*(?P<data_received>\d*)",
|
||||||
"latency_write_max": r"neofs_obj_put_duration.*?max=(?P<latency_write_max>\d*\.\d*)",
|
"data_sent": r"data_sent\W*\d*.\d*.\w*\W*(?P<data_sent>\d*)",
|
||||||
"latency_write_med": r"neofs_obj_put_duration.*?med=(?P<latency_write_med>\d*\.\d*)",
|
|
||||||
"latency_write_avg": r"neofs_obj_put_duration.*?avg=(?P<latency_write_avg>\d*\.\d*)",
|
|
||||||
"write_ops": r"neofs_obj_put_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
|
|
||||||
"latency_read_min": r"neofs_obj_get_duration.*?min=(?P<latency_read_min>\d*\.\d*)",
|
|
||||||
"latency_read_max": r"neofs_obj_get_duration.*?max=(?P<latency_read_max>\d*\.\d*)",
|
|
||||||
"latency_read_med": r"neofs_obj_get_duration.*?med=(?P<latency_read_med>\d*\.\d*)",
|
|
||||||
"latency_read_avg": r"neofs_obj_get_duration.*?avg=(?P<latency_read_avg>\d*\.\d*)",
|
|
||||||
"read_ops": r"neofs_obj_get_total\W*\d*\W*(?P<read_ops>\d*\.\d*)",
|
|
||||||
}
|
}
|
||||||
|
metric_regex_map.update(LOAD_RESULTS_PATTERNS[self.load_params.load_type])
|
||||||
metric_values = {}
|
metric_values = {}
|
||||||
for metric_name, metric_regex in metric_regex_map.items():
|
for metric_name, metric_regex in metric_regex_map.items():
|
||||||
match = re.search(metric_regex, output)
|
match = re.search(metric_regex, output)
|
||||||
|
|
|
@ -9,10 +9,12 @@ from neofs_testlib.shell import SSHShell
|
||||||
from k6 import K6, LoadParams, LoadResults
|
from k6 import K6, LoadParams, LoadResults
|
||||||
|
|
||||||
|
|
||||||
@allure.title("Get storage host endpoints")
|
@allure.title("Get services endpoints")
|
||||||
def get_storage_host_endpoints(hosting: Hosting) -> list:
|
def get_services_endpoints(
|
||||||
service_configs = hosting.find_service_configs(STORAGE_NODE_SERVICE_NAME_REGEX)
|
hosting: Hosting, service_name_regex: str, endpoint_attribute: str
|
||||||
return [service_config.attributes["rpc_endpoint"] for service_config in service_configs]
|
) -> list[str]:
|
||||||
|
service_configs = hosting.find_service_configs(service_name_regex)
|
||||||
|
return [service_config.attributes[endpoint_attribute] for service_config in service_configs]
|
||||||
|
|
||||||
|
|
||||||
@allure.title("Clear cache and data from storage nodes")
|
@allure.title("Clear cache and data from storage nodes")
|
||||||
|
|
|
@ -2,13 +2,19 @@ from enum import Enum
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from common import LOAD_NODE_SSH_PRIVATE_KEY_PATH, LOAD_NODE_SSH_USER, LOAD_NODES
|
from common import (
|
||||||
|
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
|
LOAD_NODE_SSH_USER,
|
||||||
|
LOAD_NODES,
|
||||||
|
STORAGE_NODE_SERVICE_NAME_REGEX,
|
||||||
|
HTTP_GATE_SERVICE_NAME_REGEX,
|
||||||
|
)
|
||||||
from neofs_testlib.hosting import Hosting
|
from neofs_testlib.hosting import Hosting
|
||||||
|
|
||||||
from k6 import LoadParams
|
from k6 import LoadParams
|
||||||
from load import (
|
from load import (
|
||||||
clear_cache_and_data,
|
clear_cache_and_data,
|
||||||
get_storage_host_endpoints,
|
get_services_endpoints,
|
||||||
multi_node_k6_run,
|
multi_node_k6_run,
|
||||||
prepare_k6_instances,
|
prepare_k6_instances,
|
||||||
)
|
)
|
||||||
|
@ -35,6 +41,8 @@ class TestLoad:
|
||||||
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
||||||
)
|
)
|
||||||
@pytest.mark.parametrize("node_count", [4])
|
@pytest.mark.parametrize("node_count", [4])
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
@pytest.mark.grpc
|
||||||
def test_grpc_benchmark(
|
def test_grpc_benchmark(
|
||||||
self,
|
self,
|
||||||
obj_size,
|
obj_size,
|
||||||
|
@ -53,7 +61,11 @@ class TestLoad:
|
||||||
f"load_time = {load_time}"
|
f"load_time = {load_time}"
|
||||||
)
|
)
|
||||||
with allure.step("Get endpoints"):
|
with allure.step("Get endpoints"):
|
||||||
endpoints_list = get_storage_host_endpoints(hosting=hosting)
|
endpoints_list = get_services_endpoints(
|
||||||
|
hosting=hosting,
|
||||||
|
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
|
||||||
|
endpoint_attribute="rpc_endpoint",
|
||||||
|
)
|
||||||
endpoints = ",".join(endpoints_list[:node_count])
|
endpoints = ",".join(endpoints_list[:node_count])
|
||||||
load_params = LoadParams(
|
load_params = LoadParams(
|
||||||
endpoint=endpoints,
|
endpoint=endpoints,
|
||||||
|
@ -68,7 +80,7 @@ class TestLoad:
|
||||||
load_type="grpc",
|
load_type="grpc",
|
||||||
)
|
)
|
||||||
k6_load_instances = prepare_k6_instances(
|
k6_load_instances = prepare_k6_instances(
|
||||||
load_nodes=LOAD_NODES.split(","),
|
load_nodes=LOAD_NODES,
|
||||||
login=LOAD_NODE_SSH_USER,
|
login=LOAD_NODE_SSH_USER,
|
||||||
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
load_params=load_params,
|
load_params=load_params,
|
||||||
|
@ -94,6 +106,8 @@ class TestLoad:
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
||||||
)
|
)
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
@pytest.mark.grpc
|
||||||
def test_grpc_benchmark_write(
|
def test_grpc_benchmark_write(
|
||||||
self,
|
self,
|
||||||
obj_size,
|
obj_size,
|
||||||
|
@ -103,13 +117,17 @@ class TestLoad:
|
||||||
hosting: Hosting,
|
hosting: Hosting,
|
||||||
):
|
):
|
||||||
allure.dynamic.title(
|
allure.dynamic.title(
|
||||||
f" Single node benchmark write test - "
|
f"Single gate benchmark write test - "
|
||||||
f"writers = {writers}, "
|
f"writers = {writers}, "
|
||||||
f"obj_size = {obj_size}, "
|
f"obj_size = {obj_size}, "
|
||||||
f"load_time = {load_time}"
|
f"load_time = {load_time}"
|
||||||
)
|
)
|
||||||
with allure.step("Get endpoints"):
|
with allure.step("Get endpoints"):
|
||||||
endpoints_list = get_storage_host_endpoints(hosting=hosting)
|
endpoints_list = get_services_endpoints(
|
||||||
|
hosting=hosting,
|
||||||
|
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
|
||||||
|
endpoint_attribute="rpc_endpoint",
|
||||||
|
)
|
||||||
endpoints = ",".join(endpoints_list[:1])
|
endpoints = ",".join(endpoints_list[:1])
|
||||||
load_params = LoadParams(
|
load_params = LoadParams(
|
||||||
endpoint=endpoints,
|
endpoint=endpoints,
|
||||||
|
@ -124,7 +142,7 @@ class TestLoad:
|
||||||
load_type="grpc",
|
load_type="grpc",
|
||||||
)
|
)
|
||||||
k6_load_instances = prepare_k6_instances(
|
k6_load_instances = prepare_k6_instances(
|
||||||
load_nodes=LOAD_NODES.split(","),
|
load_nodes=LOAD_NODES,
|
||||||
login=LOAD_NODE_SSH_USER,
|
login=LOAD_NODE_SSH_USER,
|
||||||
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
load_params=load_params,
|
load_params=load_params,
|
||||||
|
@ -144,6 +162,8 @@ class TestLoad:
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
||||||
)
|
)
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
@pytest.mark.grpc
|
||||||
def test_grpc_benchmark_write_read_70_30(
|
def test_grpc_benchmark_write_read_70_30(
|
||||||
self,
|
self,
|
||||||
obj_size,
|
obj_size,
|
||||||
|
@ -154,14 +174,18 @@ class TestLoad:
|
||||||
hosting: Hosting,
|
hosting: Hosting,
|
||||||
):
|
):
|
||||||
allure.dynamic.title(
|
allure.dynamic.title(
|
||||||
f" Single node benchmark write + read (70%/30%) test - "
|
f"Single gate benchmark write + read (70%/30%) test - "
|
||||||
f"writers = {writers}, "
|
f"writers = {writers}, "
|
||||||
f"readers = {readers}, "
|
f"readers = {readers}, "
|
||||||
f"obj_size = {obj_size}, "
|
f"obj_size = {obj_size}, "
|
||||||
f"load_time = {load_time}"
|
f"load_time = {load_time}"
|
||||||
)
|
)
|
||||||
with allure.step("Get endpoints"):
|
with allure.step("Get endpoints"):
|
||||||
endpoints_list = get_storage_host_endpoints(hosting=hosting)
|
endpoints_list = get_services_endpoints(
|
||||||
|
hosting=hosting,
|
||||||
|
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
|
||||||
|
endpoint_attribute="rpc_endpoint",
|
||||||
|
)
|
||||||
endpoints = ",".join(endpoints_list[:1])
|
endpoints = ",".join(endpoints_list[:1])
|
||||||
load_params = LoadParams(
|
load_params = LoadParams(
|
||||||
endpoint=endpoints,
|
endpoint=endpoints,
|
||||||
|
@ -176,7 +200,7 @@ class TestLoad:
|
||||||
load_type="grpc",
|
load_type="grpc",
|
||||||
)
|
)
|
||||||
k6_load_instances = prepare_k6_instances(
|
k6_load_instances = prepare_k6_instances(
|
||||||
load_nodes=LOAD_NODES.split(","),
|
load_nodes=LOAD_NODES,
|
||||||
login=LOAD_NODE_SSH_USER,
|
login=LOAD_NODE_SSH_USER,
|
||||||
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
load_params=load_params,
|
load_params=load_params,
|
||||||
|
@ -202,6 +226,8 @@ class TestLoad:
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
||||||
)
|
)
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
@pytest.mark.grpc
|
||||||
def test_grpc_benchmark_read(
|
def test_grpc_benchmark_read(
|
||||||
self,
|
self,
|
||||||
obj_size,
|
obj_size,
|
||||||
|
@ -211,13 +237,17 @@ class TestLoad:
|
||||||
hosting: Hosting,
|
hosting: Hosting,
|
||||||
):
|
):
|
||||||
allure.dynamic.title(
|
allure.dynamic.title(
|
||||||
f" Single node benchmark read test - "
|
f"Single gate benchmark read test - "
|
||||||
f"readers = {readers}, "
|
f"readers = {readers}, "
|
||||||
f"obj_size = {obj_size}, "
|
f"obj_size = {obj_size}, "
|
||||||
f"load_time = {load_time}"
|
f"load_time = {load_time}"
|
||||||
)
|
)
|
||||||
with allure.step("Get endpoints"):
|
with allure.step("Get endpoints"):
|
||||||
endpoints_list = get_storage_host_endpoints(hosting=hosting)
|
endpoints_list = get_services_endpoints(
|
||||||
|
hosting=hosting,
|
||||||
|
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
|
||||||
|
endpoint_attribute="rpc_endpoint",
|
||||||
|
)
|
||||||
endpoints = ",".join(endpoints_list[:1])
|
endpoints = ",".join(endpoints_list[:1])
|
||||||
load_params = LoadParams(
|
load_params = LoadParams(
|
||||||
endpoint=endpoints,
|
endpoint=endpoints,
|
||||||
|
@ -232,7 +262,189 @@ class TestLoad:
|
||||||
load_type="grpc",
|
load_type="grpc",
|
||||||
)
|
)
|
||||||
k6_load_instances = prepare_k6_instances(
|
k6_load_instances = prepare_k6_instances(
|
||||||
load_nodes=LOAD_NODES.split(","),
|
load_nodes=LOAD_NODES,
|
||||||
|
login=LOAD_NODE_SSH_USER,
|
||||||
|
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
|
load_params=load_params,
|
||||||
|
)
|
||||||
|
with allure.step("Run load"):
|
||||||
|
multi_node_k6_run(k6_load_instances)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"obj_size, out_file, writers",
|
||||||
|
[
|
||||||
|
(4, "4kb_300.json", 300),
|
||||||
|
(16, "16kb_250.json", 250),
|
||||||
|
(64, "64kb_250.json", 250),
|
||||||
|
(128, "128kb_250.json", 250),
|
||||||
|
(512, "512kb_200.json", 200),
|
||||||
|
(1000, "1mb_200.json", 200),
|
||||||
|
(8000, "8mb_150.json", 150),
|
||||||
|
(32000, "32mb_150.json", 150),
|
||||||
|
(128000, "128mb_100.json", 100),
|
||||||
|
(512000, "512mb_50.json", 50),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
||||||
|
)
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
@pytest.mark.http
|
||||||
|
def test_http_benchmark_write(
|
||||||
|
self,
|
||||||
|
obj_size,
|
||||||
|
out_file,
|
||||||
|
writers,
|
||||||
|
load_time,
|
||||||
|
hosting: Hosting,
|
||||||
|
):
|
||||||
|
allure.dynamic.title(
|
||||||
|
f"Single gate benchmark write test - "
|
||||||
|
f"writers = {writers}, "
|
||||||
|
f"obj_size = {obj_size}, "
|
||||||
|
f"load_time = {load_time}"
|
||||||
|
)
|
||||||
|
with allure.step("Get endpoints"):
|
||||||
|
endpoints_list = get_services_endpoints(
|
||||||
|
hosting=hosting,
|
||||||
|
service_name_regex=HTTP_GATE_SERVICE_NAME_REGEX,
|
||||||
|
endpoint_attribute="endpoint",
|
||||||
|
)
|
||||||
|
endpoints = ",".join(endpoints_list[:1])
|
||||||
|
load_params = LoadParams(
|
||||||
|
endpoint=endpoints,
|
||||||
|
obj_size=obj_size,
|
||||||
|
containers_count=CONTAINERS_COUNT,
|
||||||
|
out_file=out_file,
|
||||||
|
obj_count=OBJ_COUNT,
|
||||||
|
writers=writers,
|
||||||
|
readers=0,
|
||||||
|
deleters=0,
|
||||||
|
load_time=load_time,
|
||||||
|
load_type="http",
|
||||||
|
)
|
||||||
|
k6_load_instances = prepare_k6_instances(
|
||||||
|
load_nodes=LOAD_NODES,
|
||||||
|
login=LOAD_NODE_SSH_USER,
|
||||||
|
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
|
load_params=load_params,
|
||||||
|
)
|
||||||
|
with allure.step("Run load"):
|
||||||
|
multi_node_k6_run(k6_load_instances)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"obj_size, out_file, writers, readers",
|
||||||
|
[
|
||||||
|
(8000, "8mb_350.json", 245, 105),
|
||||||
|
(32000, "32mb_300.json", 210, 90),
|
||||||
|
(128000, "128mb_100.json", 70, 30),
|
||||||
|
(512000, "512mb_70.json", 49, 21),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
||||||
|
)
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
@pytest.mark.http
|
||||||
|
def test_http_benchmark_write_read_70_30(
|
||||||
|
self,
|
||||||
|
obj_size,
|
||||||
|
out_file,
|
||||||
|
writers,
|
||||||
|
readers,
|
||||||
|
load_time,
|
||||||
|
hosting: Hosting,
|
||||||
|
):
|
||||||
|
allure.dynamic.title(
|
||||||
|
f"Single gate benchmark write + read (70%/30%) test - "
|
||||||
|
f"writers = {writers}, "
|
||||||
|
f"readers = {readers}, "
|
||||||
|
f"obj_size = {obj_size}, "
|
||||||
|
f"load_time = {load_time}"
|
||||||
|
)
|
||||||
|
with allure.step("Get endpoints"):
|
||||||
|
endpoints_list = get_services_endpoints(
|
||||||
|
hosting=hosting,
|
||||||
|
service_name_regex=HTTP_GATE_SERVICE_NAME_REGEX,
|
||||||
|
endpoint_attribute="endpoint",
|
||||||
|
)
|
||||||
|
endpoints = ",".join(endpoints_list[:1])
|
||||||
|
load_params = LoadParams(
|
||||||
|
endpoint=endpoints,
|
||||||
|
obj_size=obj_size,
|
||||||
|
containers_count=CONTAINERS_COUNT,
|
||||||
|
out_file=out_file,
|
||||||
|
obj_count=500,
|
||||||
|
writers=writers,
|
||||||
|
readers=readers,
|
||||||
|
deleters=0,
|
||||||
|
load_time=load_time,
|
||||||
|
load_type="http",
|
||||||
|
)
|
||||||
|
k6_load_instances = prepare_k6_instances(
|
||||||
|
load_nodes=LOAD_NODES,
|
||||||
|
login=LOAD_NODE_SSH_USER,
|
||||||
|
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
|
load_params=load_params,
|
||||||
|
)
|
||||||
|
with allure.step("Run load"):
|
||||||
|
multi_node_k6_run(k6_load_instances)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"obj_size, out_file, readers",
|
||||||
|
[
|
||||||
|
(4, "4kb_300.json", 300),
|
||||||
|
(16, "16kb_300.json", 300),
|
||||||
|
(64, "64kb_300.json", 300),
|
||||||
|
(128, "128kb_250.json", 250),
|
||||||
|
(512, "512kb_150.json", 150),
|
||||||
|
(1000, "1mb_150.json", 150),
|
||||||
|
(8000, "8mb_150.json", 150),
|
||||||
|
(32000, "32mb_100.json", 100),
|
||||||
|
(128000, "128mb_25.json", 25),
|
||||||
|
(512000, "512mb_25.json", 25),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
|
||||||
|
)
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
@pytest.mark.http
|
||||||
|
def test_http_benchmark_read(
|
||||||
|
self,
|
||||||
|
obj_size,
|
||||||
|
out_file,
|
||||||
|
readers,
|
||||||
|
load_time,
|
||||||
|
hosting: Hosting,
|
||||||
|
):
|
||||||
|
allure.dynamic.title(
|
||||||
|
f"Single gate benchmark read test - "
|
||||||
|
f"readers = {readers}, "
|
||||||
|
f"obj_size = {obj_size}, "
|
||||||
|
f"load_time = {load_time}"
|
||||||
|
)
|
||||||
|
with allure.step("Get endpoints"):
|
||||||
|
endpoints_list = get_services_endpoints(
|
||||||
|
hosting=hosting,
|
||||||
|
service_name_regex=HTTP_GATE_SERVICE_NAME_REGEX,
|
||||||
|
endpoint_attribute="endpoint",
|
||||||
|
)
|
||||||
|
endpoints = ",".join(endpoints_list[:1])
|
||||||
|
load_params = LoadParams(
|
||||||
|
endpoint=endpoints,
|
||||||
|
obj_size=obj_size,
|
||||||
|
containers_count=1,
|
||||||
|
out_file=out_file,
|
||||||
|
obj_count=500,
|
||||||
|
writers=0,
|
||||||
|
readers=readers,
|
||||||
|
deleters=0,
|
||||||
|
load_time=load_time,
|
||||||
|
load_type="http",
|
||||||
|
)
|
||||||
|
k6_load_instances = prepare_k6_instances(
|
||||||
|
load_nodes=LOAD_NODES,
|
||||||
login=LOAD_NODE_SSH_USER,
|
login=LOAD_NODE_SSH_USER,
|
||||||
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
|
||||||
load_params=load_params,
|
load_params=load_params,
|
||||||
|
|
|
@ -35,7 +35,7 @@ DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "neofs-dev-env"))
|
||||||
WALLET_PASS = os.getenv("WALLET_PASS", "")
|
WALLET_PASS = os.getenv("WALLET_PASS", "")
|
||||||
|
|
||||||
# Load node parameters
|
# Load node parameters
|
||||||
LOAD_NODES = os.getenv("LOAD_NODES")
|
LOAD_NODES = os.getenv("LOAD_NODES", "").split(",")
|
||||||
LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER")
|
LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER")
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
|
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
|
||||||
|
|
||||||
|
@ -122,6 +122,8 @@ BIN_VERSIONS_FILE = os.getenv("BIN_VERSIONS_FILE")
|
||||||
|
|
||||||
HOSTING_CONFIG_FILE = os.getenv("HOSTING_CONFIG_FILE", ".devenv.hosting.yaml")
|
HOSTING_CONFIG_FILE = os.getenv("HOSTING_CONFIG_FILE", ".devenv.hosting.yaml")
|
||||||
STORAGE_NODE_SERVICE_NAME_REGEX = r"s\d\d"
|
STORAGE_NODE_SERVICE_NAME_REGEX = r"s\d\d"
|
||||||
|
HTTP_GATE_SERVICE_NAME_REGEX = r"http-gate\d\d"
|
||||||
|
S3_GATE_SERVICE_NAME_REGEX = r"s3-gate\d\d"
|
||||||
|
|
||||||
# Generate wallet configs
|
# Generate wallet configs
|
||||||
# TODO: we should move all info about wallet configs to fixtures
|
# TODO: we should move all info about wallet configs to fixtures
|
||||||
|
|
Loading…
Reference in a new issue