From a85f04a73b3a15a210cc1644779bee49e3aa2aac Mon Sep 17 00:00:00 2001 From: "a.lipay" Date: Fri, 21 Oct 2022 13:25:16 +0300 Subject: [PATCH] Add grpc benchmark tests Signed-off-by: a.lipay --- pytest_tests/testsuites/load/test_load.py | 178 +++++++++++++++++++++- 1 file changed, 176 insertions(+), 2 deletions(-) diff --git a/pytest_tests/testsuites/load/test_load.py b/pytest_tests/testsuites/load/test_load.py index 5706ac3..f02ffca 100644 --- a/pytest_tests/testsuites/load/test_load.py +++ b/pytest_tests/testsuites/load/test_load.py @@ -1,3 +1,5 @@ +from enum import Enum + import allure import pytest from common import LOAD_NODE_SSH_PRIVATE_KEY_PATH, LOAD_NODE_SSH_USER, LOAD_NODES @@ -11,6 +13,12 @@ from load import ( prepare_k6_instances, ) + +class LoadTime(Enum): + EXPECTED_MAXIMUM = 200 + PMI_EXPECTATION = 900 + + CONTAINERS_COUNT = 1 OBJ_COUNT = 3 @@ -23,7 +31,9 @@ class TestLoad: @pytest.mark.parametrize("obj_size, out_file", [(1000, "1mb_200.json")]) @pytest.mark.parametrize("writers, readers, deleters", [(140, 60, 0), (200, 0, 0)]) - @pytest.mark.parametrize("load_time", [200, 900]) + @pytest.mark.parametrize( + "load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value] + ) @pytest.mark.parametrize("node_count", [4]) def test_grpc_benchmark( self, @@ -58,7 +68,171 @@ class TestLoad: load_type="grpc", ) k6_load_instances = prepare_k6_instances( - load_nodes=LOAD_NODES.split(','), + load_nodes=LOAD_NODES.split(","), + login=LOAD_NODE_SSH_USER, + pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH, + load_params=load_params, + ) + with allure.step("Run load"): + multi_node_k6_run(k6_load_instances) + + @pytest.mark.parametrize( + "obj_size, out_file, writers", + [ + (4, "4kb_300.json", 300), + (16, "16kb_250.json", 250), + (64, "64kb_250.json", 250), + (128, "128kb_250.json", 250), + (512, "512kb_200.json", 200), + (1000, "1mb_200.json", 200), + (8000, "8mb_150.json", 150), + (32000, "32mb_150.json", 150), + (128000, "128mb_100.json", 100), + (512000, "512mb_50.json", 50), + ], + ) + @pytest.mark.parametrize( + "load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value] + ) + def test_grpc_benchmark_write( + self, + obj_size, + out_file, + writers, + load_time, + hosting: Hosting, + ): + allure.dynamic.title( + f" Single node benchmark write test - " + f"writers = {writers}, " + f"obj_size = {obj_size}, " + f"load_time = {load_time}" + ) + with allure.step("Get endpoints"): + endpoints_list = get_storage_host_endpoints(hosting=hosting) + endpoints = ",".join(endpoints_list[:1]) + load_params = LoadParams( + endpoint=endpoints, + obj_size=obj_size, + containers_count=CONTAINERS_COUNT, + out_file=out_file, + obj_count=OBJ_COUNT, + writers=writers, + readers=0, + deleters=0, + load_time=load_time, + load_type="grpc", + ) + k6_load_instances = prepare_k6_instances( + load_nodes=LOAD_NODES.split(","), + login=LOAD_NODE_SSH_USER, + pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH, + load_params=load_params, + ) + with allure.step("Run load"): + multi_node_k6_run(k6_load_instances) + + @pytest.mark.parametrize( + "obj_size, out_file, writers, readers", + [ + (8000, "8mb_350.json", 245, 105), + (32000, "32mb_300.json", 210, 90), + (128000, "128mb_100.json", 70, 30), + (512000, "512mb_70.json", 49, 21), + ], + ) + @pytest.mark.parametrize( + "load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value] + ) + def test_grpc_benchmark_write_read_70_30( + self, + obj_size, + out_file, + writers, + readers, + load_time, + hosting: Hosting, + ): + allure.dynamic.title( + f" Single node benchmark write + read (70%/30%) test - " + f"writers = {writers}, " + f"readers = {readers}, " + f"obj_size = {obj_size}, " + f"load_time = {load_time}" + ) + with allure.step("Get endpoints"): + endpoints_list = get_storage_host_endpoints(hosting=hosting) + endpoints = ",".join(endpoints_list[:1]) + load_params = LoadParams( + endpoint=endpoints, + obj_size=obj_size, + containers_count=CONTAINERS_COUNT, + out_file=out_file, + obj_count=500, + writers=writers, + readers=readers, + deleters=0, + load_time=load_time, + load_type="grpc", + ) + k6_load_instances = prepare_k6_instances( + load_nodes=LOAD_NODES.split(","), + login=LOAD_NODE_SSH_USER, + pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH, + load_params=load_params, + ) + with allure.step("Run load"): + multi_node_k6_run(k6_load_instances) + + @pytest.mark.parametrize( + "obj_size, out_file, readers", + [ + (4, "4kb_300.json", 300), + (16, "16kb_300.json", 300), + (64, "64kb_300.json", 300), + (128, "128kb_250.json", 250), + (512, "512kb_150.json", 150), + (1000, "1mb_150.json", 150), + (8000, "8mb_150.json", 150), + (32000, "32mb_100.json", 100), + (128000, "128mb_25.json", 25), + (512000, "512mb_25.json", 25), + ], + ) + @pytest.mark.parametrize( + "load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value] + ) + def test_grpc_benchmark_read( + self, + obj_size, + out_file, + readers, + load_time, + hosting: Hosting, + ): + allure.dynamic.title( + f" Single node benchmark read test - " + f"readers = {readers}, " + f"obj_size = {obj_size}, " + f"load_time = {load_time}" + ) + with allure.step("Get endpoints"): + endpoints_list = get_storage_host_endpoints(hosting=hosting) + endpoints = ",".join(endpoints_list[:1]) + load_params = LoadParams( + endpoint=endpoints, + obj_size=obj_size, + containers_count=1, + out_file=out_file, + obj_count=500, + writers=0, + readers=readers, + deleters=0, + load_time=load_time, + load_type="grpc", + ) + k6_load_instances = prepare_k6_instances( + load_nodes=LOAD_NODES.split(","), login=LOAD_NODE_SSH_USER, pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH, load_params=load_params,