[#328] Remove basic-acl from metrics tests #328

Merged
abereziny merged 1 commit from abereziny/frostfs-testcases:feature-remove-basic-acl-5 into master 2024-11-22 12:51:41 +00:00
7 changed files with 179 additions and 170 deletions

View file

@ -1,6 +1,7 @@
from dataclasses import dataclass from dataclasses import dataclass
from functools import partial from functools import partial
import pytest
from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
@ -70,3 +71,13 @@ PUBLIC_WITH_POLICY = partial(ContainerRequest, ape_rules=APE_EVERYONE_ALLOW_ALL,
EVERYONE_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Everyone_Allow_All") EVERYONE_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Everyone_Allow_All")
OWNER_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_OWNER_ALLOW_ALL, short_name="Owner_Allow_All") OWNER_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_OWNER_ALLOW_ALL, short_name="Owner_Allow_All")
PRIVATE = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=[], short_name="Private_No_APE") PRIVATE = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=[], short_name="Private_No_APE")
def requires_container(container_request: None | ContainerRequest | list[ContainerRequest] = None) -> pytest.MarkDecorator:
if container_request is None:
container_request = EVERYONE_ALLOW_ALL
if not isinstance(container_request, list):
container_request = [container_request]
return pytest.mark.parametrize("container_request", container_request, indirect=True)

View file

@ -3,7 +3,7 @@ import math
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container, wait_for_container_deletion from frostfs_testlib.steps.cli.container import delete_container, search_nodes_with_container, wait_for_container_deletion
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import calc_metrics_count_from_stdout, check_metrics_counter, get_metrics_value from frostfs_testlib.steps.metrics import calc_metrics_count_from_stdout, check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.steps.storage_policy import get_nodes_with_object
@ -12,8 +12,9 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import TestFile, generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
from ...helpers.utility import are_numbers_similar from ...helpers.utility import are_numbers_similar
@ -33,124 +34,118 @@ class TestContainerMetrics(ClusterTestBase):
except Exception as e: except Exception as e:
return None return None
@allure.title("Container metrics (obj_size={object_size},policy={policy})") @allure.title("Container metrics (obj_size={object_size}, policy={container_request})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")]) @pytest.mark.parametrize(
"container_request, copies",
[
(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), 2),
(PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC"), 1),
],
indirect=["container_request"],
)
def test_container_metrics( def test_container_metrics(
self, self,
object_size: ObjectSize, object_size: ObjectSize,
max_object_size: int, max_object_size: int,
default_wallet: WalletInfo, default_wallet: WalletInfo,
cluster: Cluster, cluster: Cluster,
placement_policy: str, copies: int,
policy: str, container: str,
test_file: TestFile,
container_request: ContainerRequest,
): ):
file_path = generate_file(object_size.value)
copies = 2 if policy == "REP" else 1
object_chunks = 1 object_chunks = 1
link_object = 0 link_object = 0
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
if object_size.value > max_object_size: if object_size.value > max_object_size:
object_chunks = math.ceil(object_size.value / max_object_size) object_chunks = math.ceil(object_size.value / max_object_size)
link_object = len(search_nodes_with_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint, cluster)) link_object = len(search_nodes_with_container(default_wallet, container, self.shell, cluster.default_rpc_endpoint, cluster))
with reporter.step("Put object to random node"): with reporter.step("Put object to random node"):
oid = put_object_to_random_node( oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, cluster)
wallet=default_wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=cluster,
)
with reporter.step("Get object nodes"): with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes) object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes] object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Check metric appears in node where the object is located"): with reporter.step("Check metric appears in node where the object is located"):
count_metrics = (object_chunks * copies) + link_object count_metrics = (object_chunks * copies) + link_object
if policy == "EC": if container_request.short_name == "EC":
count_metrics = (object_chunks * 2) + link_object count_metrics = (object_chunks * 2) + link_object
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="phy") check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=container, type="phy")
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="logic") check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=container, type="logic")
check_metrics_counter(object_nodes, counter_exp=copies, command="container_objects_total", cid=cid, type="user") check_metrics_counter(object_nodes, counter_exp=copies, command="container_objects_total", cid=container, type="user")
with reporter.step("Delete file, wait until gc remove object"): with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint) delete_object(default_wallet, container, oid, self.shell, cluster.default_rpc_endpoint)
with reporter.step(f"Check container metrics 'the counter should equal {len(object_nodes)}' in object nodes"): with reporter.step(f"Check container metrics 'the counter should equal {len(object_nodes)}' in object nodes"):
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="phy") check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=container, type="phy")
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="logic") check_metrics_counter(
check_metrics_counter(object_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user") object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=container, type="logic"
)
check_metrics_counter(object_nodes, counter_exp=0, command="container_objects_total", cid=container, type="user")
with reporter.step("Check metrics(Phy, Logic, User) in each nodes"): with reporter.step("Check metrics(Phy, Logic, User) in each nodes"):
# Phy and Logic metrics are 4, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4 # Phy and Logic metrics are x2, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4
expect_metrics = 4 if policy == "REP" else 2 expect_metrics = copies * 2
check_metrics_counter(cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=cid, type="phy")
check_metrics_counter( check_metrics_counter(
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=cid, type="logic" cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=container, type="phy"
) )
check_metrics_counter(cluster.cluster_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user") check_metrics_counter(
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=container, type="logic"
)
check_metrics_counter(cluster.cluster_nodes, counter_exp=0, command="container_objects_total", cid=container, type="user")
@allure.title("Container size metrics (obj_size={object_size},policy={policy})") @allure.title("Container size metrics (obj_size={object_size}, policy={container_request})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")]) @requires_container(
[PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC")]
)
def test_container_size_metrics( def test_container_size_metrics(
self, self,
object_size: ObjectSize, object_size: ObjectSize,
default_wallet: WalletInfo, default_wallet: WalletInfo,
placement_policy: str, test_file: TestFile,
policy: str, container: str,
): ):
file_path = generate_file(object_size.value)
with reporter.step(f"Create container with policy {policy}"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Put object to random node"): with reporter.step("Put object to random node"):
oid = put_object_to_random_node( oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
wallet=default_wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
with reporter.step("Get object nodes"): with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, self.cluster.storage_nodes) object_storage_nodes = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
object_nodes = [ object_nodes = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
] ]
with reporter.step("Check metric appears in all node where the object is located"): with reporter.step("Check metric appears in all node where the object is located"):
act_metric = sum( act_metric = sum(
[get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in object_nodes] [get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=container) for node in object_nodes]
) )
assert (act_metric // 2) == object_size.value assert (act_metric // 2) == object_size.value
with reporter.step("Delete file, wait until gc remove object"): with reporter.step("Delete file, wait until gc remove object"):
id_tombstone = delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint) id_tombstone = delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
tombstone = head_object(default_wallet, cid, id_tombstone, self.shell, self.cluster.default_rpc_endpoint) tombstone = head_object(default_wallet, container, id_tombstone, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check container size metrics"): with reporter.step(f"Check container size metrics"):
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=cid) act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=container)
assert act_metric == int(tombstone["header"]["payloadLength"]) assert act_metric == int(tombstone["header"]["payloadLength"])
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})") @allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
@pytest.mark.parametrize("objects_count", [5, 10, 20]) @pytest.mark.parametrize("objects_count", [5, 10, 20])
def test_container_size_metrics_more_objects(self, object_size: ObjectSize, default_wallet: WalletInfo, objects_count: int): @requires_container
with reporter.step(f"Create container"): def test_container_size_metrics_more_objects(
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint) self, object_size: ObjectSize, default_wallet: WalletInfo, objects_count: int, container: str
):
with reporter.step(f"Put {objects_count} objects"): with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(object_size.value) for _ in range(objects_count)] files_path = [generate_file(object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid) futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=container)
oids = [future.result() for future in futures] oids = [future.result() for future in futures]
with reporter.step("Check metric appears in all nodes"): with reporter.step("Check metric appears in all nodes"):
metric_values = [ metric_values = [
get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in self.cluster.cluster_nodes get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=container)
for node in self.cluster.cluster_nodes
] ]
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2 actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
expected_value = object_size.value * objects_count expected_value = object_size.value * objects_count
@ -161,47 +156,61 @@ class TestContainerMetrics(ClusterTestBase):
with reporter.step("Delete file, wait until gc remove object"): with reporter.step("Delete file, wait until gc remove object"):
tombstones_size = 0 tombstones_size = 0
for oid in oids: for oid in oids:
tombstone_id = delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint) tombstone_id = delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
tombstone = head_object(default_wallet, cid, tombstone_id, self.shell, self.cluster.default_rpc_endpoint) tombstone = head_object(default_wallet, container, tombstone_id, self.shell, self.cluster.default_rpc_endpoint)
tombstones_size += int(tombstone["header"]["payloadLength"]) tombstones_size += int(tombstone["header"]["payloadLength"])
with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"): with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"):
futures = parallel(get_metrics_value, self.cluster.cluster_nodes, command="frostfs_node_engine_container_size_bytes", cid=cid) futures = parallel(
get_metrics_value, self.cluster.cluster_nodes, command="frostfs_node_engine_container_size_bytes", cid=container
)
metrics_value_nodes = [future.result() for future in futures] metrics_value_nodes = [future.result() for future in futures]
for act_metric in metrics_value_nodes: for act_metric in metrics_value_nodes:
assert act_metric >= 0, "Metrics value is negative" assert act_metric >= 0, "Metrics value is negative"
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct" assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"
@allure.title("Container metrics (policy={policy})") @allure.title("Container metrics (policy={container_request})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")]) @pytest.mark.parametrize(
"container_request, copies",
[
(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), 2),
(PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC"), 1),
],
indirect=["container_request"],
)
def test_container_metrics_delete_complex_objects( def test_container_metrics_delete_complex_objects(
self, complex_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, placement_policy: str, policy: str self,
complex_object_size: ObjectSize,
default_wallet: WalletInfo,
cluster: Cluster,
copies: int,
container: str,
container_request: ContainerRequest,
): ):
copies = 2 if policy == "REP" else 1
objects_count = 2 objects_count = 2
metric_name = "frostfs_node_engine_container_objects_total" metric_name = "frostfs_node_engine_container_objects_total"
with reporter.step(f"Create container"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, rule=placement_policy)
with reporter.step(f"Put {objects_count} objects"): with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(complex_object_size.value) for _ in range(objects_count)] files_path = [generate_file(complex_object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid) futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=container)
oids = [future.result() for future in futures] oids = [future.result() for future in futures]
with reporter.step(f"Check metrics value in each nodes, should be {objects_count} for 'user'"): with reporter.step(f"Check metrics value in each nodes, should be {objects_count} for 'user'"):
check_metrics_counter(cluster.cluster_nodes, counter_exp=objects_count * copies, command=metric_name, cid=cid, type="user") check_metrics_counter(
cluster.cluster_nodes, counter_exp=objects_count * copies, command=metric_name, cid=container, type="user"
)
with reporter.step("Delete objects and container"): with reporter.step("Delete objects and container"):
for oid in oids: for oid in oids:
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint) delete_object(default_wallet, container, oid, self.shell, cluster.default_rpc_endpoint)
delete_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint) delete_container(default_wallet, container, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Tick epoch and check container was deleted"): with reporter.step("Tick epoch and check container was deleted"):
self.tick_epoch() self.tick_epoch()
wait_for_container_deletion(default_wallet, cid, shell=self.shell, endpoint=cluster.default_rpc_endpoint) wait_for_container_deletion(default_wallet, container, shell=self.shell, endpoint=cluster.default_rpc_endpoint)
with reporter.step(f"Check metrics value in each nodes, should not be show any result"): with reporter.step(f"Check metrics value in each nodes, should not be show any result"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=cid) futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=container)
metrics_results = [future.result() for future in futures if future.result() is not None] metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}" assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}"

View file

@ -1,11 +1,9 @@
import random
import re import re
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -15,8 +13,11 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics
class TestGarbageCollectorMetrics(ClusterTestBase): class TestGarbageCollectorMetrics(ClusterTestBase):
@wait_for_success(interval=10) @wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str): def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str):
@ -34,9 +35,11 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
return sum(map(int, result)) return sum(map(int, result))
@allure.title("Garbage collector expire_at object") @allure.title("Garbage collector expire_at object")
def test_garbage_collector_metrics_expire_at_object(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster): @requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
def test_garbage_collector_metrics_expire_at_object(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str
):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
metrics_step = 1 metrics_step = 1
with reporter.step("Get current garbage collector metrics for each nodes"): with reporter.step("Get current garbage collector metrics for each nodes"):
@ -44,22 +47,19 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
for node in cluster.cluster_nodes: for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total") metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Put object to random node with expire_at"): with reporter.step("Put object to random node with expire_at"):
current_epoch = self.get_epoch() current_epoch = self.get_epoch()
oid = put_object_to_random_node( oid = put_object_to_random_node(
default_wallet, default_wallet,
file_path, file_path,
cid, container,
self.shell, self.shell,
cluster, cluster,
expire_at=current_epoch + 1, expire_at=current_epoch + 1,
) )
with reporter.step("Get object nodes"): with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes) object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes] object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Tick Epoch"): with reporter.step("Tick Epoch"):
@ -77,9 +77,11 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
) )
@allure.title("Garbage collector delete object") @allure.title("Garbage collector delete object")
def test_garbage_collector_metrics_deleted_objects(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster): @requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
def test_garbage_collector_metrics_deleted_objects(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str
):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
metrics_step = 1 metrics_step = 1
with reporter.step("Get current garbage collector metrics for each nodes"): with reporter.step("Get current garbage collector metrics for each nodes"):
@ -87,24 +89,21 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
for node in cluster.cluster_nodes: for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total") metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step("Put object to random node"): with reporter.step("Put object to random node"):
oid = put_object_to_random_node( oid = put_object_to_random_node(
default_wallet, default_wallet,
file_path, file_path,
cid, container,
self.shell, self.shell,
cluster, cluster,
) )
with reporter.step("Get object nodes"): with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes) object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes] object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Delete file, wait until gc remove object"): with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint()) delete_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"): with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
for node in object_nodes: for node in object_nodes:

View file

@ -19,6 +19,7 @@ from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics
class TestGRPCMetrics(ClusterTestBase): class TestGRPCMetrics(ClusterTestBase):
@pytest.fixture @pytest.fixture
def disable_policer(self, cluster_state_controller: ClusterStateController): def disable_policer(self, cluster_state_controller: ClusterStateController):
@ -84,22 +85,18 @@ class TestGRPCMetrics(ClusterTestBase):
@allure.title("GRPC metrics object operations") @allure.title("GRPC metrics object operations")
def test_grpc_metrics_object_operations( def test_grpc_metrics_object_operations(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, disable_policer self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str, disable_policer
): ):
file_path = generate_file(simple_object_size.value) file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
with reporter.step("Select random node"): with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes) node = random.choice(cluster.cluster_nodes)
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step("Get current gRPC metrics for method 'Put'"): with reporter.step("Get current gRPC metrics for method 'Put'"):
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Put") metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Put")
with reporter.step("Put object to selected node"): with reporter.step("Put object to selected node"):
oid = put_object(default_wallet, file_path, cid, self.shell, node.storage_node.get_rpc_endpoint()) oid = put_object(default_wallet, file_path, container, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
metrics_counter_put += 1 metrics_counter_put += 1
@ -115,7 +112,7 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Get") metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Get")
with reporter.step(f"Get object"): with reporter.step(f"Get object"):
get_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint()) get_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
metrics_counter_get += 1 metrics_counter_get += 1
@ -131,7 +128,7 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_search = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Search") metrics_counter_search = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Search")
with reporter.step(f"Search object"): with reporter.step(f"Search object"):
search_object(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint()) search_object(default_wallet, container, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by 1'"):
metrics_counter_search += 1 metrics_counter_search += 1
@ -147,7 +144,7 @@ class TestGRPCMetrics(ClusterTestBase):
metrics_counter_head = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Head") metrics_counter_head = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Head")
with reporter.step(f"Head object"): with reporter.step(f"Head object"):
head_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint()) head_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by 1'"): with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by 1'"):
metrics_counter_head += 1 metrics_counter_head += 1

View file

@ -6,7 +6,7 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.metrics import get_metrics_value from frostfs_testlib.steps.metrics import get_metrics_value
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
@ -15,6 +15,7 @@ from frostfs_testlib.testing.test_control import wait_for_success
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics
class TestLogsMetrics(ClusterTestBase): class TestLogsMetrics(ClusterTestBase):
@pytest.fixture @pytest.fixture
def revert_all(self, cluster_state_controller: ClusterStateController): def revert_all(self, cluster_state_controller: ClusterStateController):

View file

@ -4,73 +4,73 @@ import re
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container from frostfs_testlib.steps.cli.container import delete_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics
class TestObjectMetrics(ClusterTestBase): class TestObjectMetrics(ClusterTestBase):
@allure.title("Object metrics of removed container (obj_size={object_size})") @allure.title("Object metrics of removed container (obj_size={object_size})")
def test_object_metrics_removed_container(self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster): @requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X"))
file_path = generate_file(object_size.value) def test_object_metrics_removed_container(self, default_wallet: WalletInfo, cluster: Cluster, container: str, test_file: TestFile):
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
copies = 2
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Put object to random node"): with reporter.step("Put object to random node"):
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, cluster) oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, cluster)
with reporter.step("Check metric appears in node where the object is located"): with reporter.step("Check metric appears in node where the object is located"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes) object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes] object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
check_metrics_counter( check_metrics_counter(
object_nodes, object_nodes,
counter_exp=copies, counter_exp=2,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
cid=cid, cid=container,
type="user", type="user",
) )
with reporter.step("Delete container"): with reporter.step("Delete container"):
delete_container(default_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint) delete_container(default_wallet, container, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
with reporter.step("Tick Epoch"): with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2, wait_block=2) self.tick_epochs(epochs_to_tick=2, wait_block=2)
with reporter.step("Check metrics of removed containers doesn't appear in the storage node"): with reporter.step("Check metrics of removed containers doesn't appear in the storage node"):
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_objects_total", cid=cid, type="user") check_metrics_counter(
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=cid) object_nodes, counter_exp=0, command="frostfs_node_engine_container_objects_total", cid=container, type="user"
)
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=container)
for node in object_nodes: for node in object_nodes:
all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte") all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte")
assert cid not in all_metrics.stdout, "metrics of removed containers shouldn't appear in the storage node" assert container not in all_metrics.stdout, "metrics of removed containers shouldn't appear in the storage node"
@allure.title("Object metrics, locked object (obj_size={object_size}, policy={placement_policy})") @allure.title("Object metrics, locked object (obj_size={object_size}, policy={container_request})")
@pytest.mark.parametrize("placement_policy", ["REP 1 IN X CBF 1 SELECT 1 FROM * AS X", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"]) @requires_container(
[
PUBLIC_WITH_POLICY("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", short_name="REP 1"),
PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"),
]
)
def test_object_metrics_blocked_object( def test_object_metrics_blocked_object(
self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, placement_policy: str self, default_wallet: WalletInfo, cluster: Cluster, container: str, container_request: ContainerRequest, test_file: TestFile
): ):
file_path = generate_file(object_size.value) metric_step = int(re.search(r"REP\s(\d+)", container_request.policy).group(1))
metric_step = int(re.search(r"REP\s(\d+)", placement_policy).group(1))
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
with reporter.step("Search container nodes"): with reporter.step("Search container nodes"):
container_nodes = search_nodes_with_container( container_nodes = search_nodes_with_container(
wallet=default_wallet, wallet=default_wallet,
cid=cid, cid=container,
shell=self.shell, shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint, endpoint=self.cluster.default_rpc_endpoint,
cluster=cluster, cluster=cluster,
@ -82,7 +82,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter += get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user") objects_metric_counter += get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Put object to container node"): with reporter.step("Put object to container node"):
oid = put_object(default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint()) oid = put_object(default_wallet, test_file.path, container, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"): with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
objects_metric_counter += metric_step objects_metric_counter += metric_step
@ -96,12 +96,12 @@ class TestObjectMetrics(ClusterTestBase):
container_nodes, container_nodes,
counter_exp=metric_step, counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
cid=cid, cid=container,
type="user", type="user",
) )
with reporter.step("Delete object"): with reporter.step("Delete object"):
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint) delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"): with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
@ -115,16 +115,16 @@ class TestObjectMetrics(ClusterTestBase):
container_nodes, container_nodes,
counter_exp=0, counter_exp=0,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
cid=cid, cid=container,
type="user", type="user",
) )
with reporter.step("Put object and lock it to next epoch"): with reporter.step("Put object and lock it to next epoch"):
oid = put_object(default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint()) oid = put_object(default_wallet, test_file.path, container, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
current_epoch = self.get_epoch() current_epoch = self.get_epoch()
lock_object( lock_object(
default_wallet, default_wallet,
cid, container,
oid, oid,
self.shell, self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(), container_nodes[0].storage_node.get_rpc_endpoint(),
@ -143,7 +143,7 @@ class TestObjectMetrics(ClusterTestBase):
container_nodes, container_nodes,
counter_exp=metric_step, counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
cid=cid, cid=container,
type="user", type="user",
) )
@ -157,7 +157,7 @@ class TestObjectMetrics(ClusterTestBase):
) )
with reporter.step("Delete object"): with reporter.step("Delete object"):
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint) delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"): with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
@ -171,7 +171,7 @@ class TestObjectMetrics(ClusterTestBase):
container_nodes, container_nodes,
counter_exp=0, counter_exp=0,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
cid=cid, cid=container,
type="user", type="user",
) )
@ -179,8 +179,8 @@ class TestObjectMetrics(ClusterTestBase):
current_epoch = self.get_epoch() current_epoch = self.get_epoch()
oid = put_object( oid = put_object(
default_wallet, default_wallet,
file_path, test_file.path,
cid, container,
self.shell, self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(), container_nodes[0].storage_node.get_rpc_endpoint(),
expire_at=current_epoch + 1, expire_at=current_epoch + 1,
@ -198,7 +198,7 @@ class TestObjectMetrics(ClusterTestBase):
container_nodes, container_nodes,
counter_exp=metric_step, counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
cid=cid, cid=container,
type="user", type="user",
) )
@ -217,31 +217,28 @@ class TestObjectMetrics(ClusterTestBase):
container_nodes, container_nodes,
counter_exp=0, counter_exp=0,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
cid=cid, cid=container,
type="user", type="user",
) )
@allure.title("Object metrics, stop the node (obj_size={object_size})") @allure.title("Object metrics, stop the node (obj_size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
def test_object_metrics_stop_node( def test_object_metrics_stop_node(
self, self,
object_size: ObjectSize,
default_wallet: WalletInfo, default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
container: str,
test_file: TestFile,
): ):
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
file_path = generate_file(object_size.value)
copies = 2 copies = 2
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy)
with reporter.step(f"Check object metrics in container 'should be zero'"): with reporter.step(f"Check object metrics in container 'should be zero'"):
check_metrics_counter( check_metrics_counter(
self.cluster.cluster_nodes, self.cluster.cluster_nodes,
counter_exp=0, counter_exp=0,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
type="user", type="user",
cid=cid, cid=container,
) )
with reporter.step("Get current metrics for each nodes"): with reporter.step("Get current metrics for each nodes"):
@ -250,10 +247,10 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter[node] = get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user") objects_metric_counter[node] = get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Put object"): with reporter.step("Put object"):
oid = put_object(default_wallet, file_path, cid, self.shell, self.cluster.default_rpc_endpoint) oid = put_object(default_wallet, test_file.path, container, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Get object nodes"): with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, self.cluster.storage_nodes) object_storage_nodes = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
object_nodes = [ object_nodes = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
] ]
@ -266,7 +263,7 @@ class TestObjectMetrics(ClusterTestBase):
counter_exp=copies, counter_exp=copies,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
type="user", type="user",
cid=cid, cid=container,
) )
with reporter.step(f"Select node to stop"): with reporter.step(f"Select node to stop"):
@ -290,5 +287,5 @@ class TestObjectMetrics(ClusterTestBase):
counter_exp=copies, counter_exp=copies,
command="frostfs_node_engine_container_objects_total", command="frostfs_node_engine_container_objects_total",
type="user", type="user",
cid=cid, cid=container,
) )

View file

@ -5,8 +5,6 @@ import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object, put_object from frostfs_testlib.steps.cli.object import get_object, put_object
from frostfs_testlib.steps.metrics import check_metrics_counter from frostfs_testlib.steps.metrics import check_metrics_counter
from frostfs_testlib.steps.node_management import node_shard_list, node_shard_set_mode from frostfs_testlib.steps.node_management import node_shard_list, node_shard_set_mode
@ -18,8 +16,11 @@ from frostfs_testlib.testing import parallel, wait_for_success
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics
class TestShardMetrics(ClusterTestBase): class TestShardMetrics(ClusterTestBase):
@pytest.fixture() @pytest.fixture()
@allure.title("Get two shards for set mode") @allure.title("Get two shards for set mode")
@ -127,28 +128,22 @@ class TestShardMetrics(ClusterTestBase):
) )
@allure.title("Metric for error count on shard") @allure.title("Metric for error count on shard")
def test_shard_metrics_error_count(self, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster, revert_all_shards_mode): @requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1"))
def test_shard_metrics_error_count(
self, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster, container: str, revert_all_shards_mode
):
file_path = generate_file(round(max_object_size * 0.8)) file_path = generate_file(round(max_object_size * 0.8))
with reporter.step(f"Create container"):
cid = create_container(
wallet=default_wallet,
shell=self.shell,
endpoint=cluster.default_rpc_endpoint,
rule="REP 1 CBF 1",
basic_acl=EACL_PUBLIC_READ_WRITE,
)
with reporter.step("Put object"): with reporter.step("Put object"):
oid = put_object(default_wallet, file_path, cid, self.shell, cluster.default_rpc_endpoint) oid = put_object(default_wallet, file_path, container, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Get object nodes"): with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes) object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes] object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
node = random.choice(object_nodes) node = random.choice(object_nodes)
with reporter.step("Search object in system."): with reporter.step("Search object in system."):
object_path, object_name = self.get_object_path_and_name_file(oid, cid, node) object_path, object_name = self.get_object_path_and_name_file(oid, container, node)
with reporter.step("Block read file"): with reporter.step("Block read file"):
node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}") node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}")
@ -157,7 +152,7 @@ class TestShardMetrics(ClusterTestBase):
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND): with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
get_object( get_object(
wallet=default_wallet, wallet=default_wallet,
cid=cid, cid=container,
oid=oid, oid=oid,
shell=self.shell, shell=self.shell,
endpoint=node.storage_node.get_rpc_endpoint(), endpoint=node.storage_node.get_rpc_endpoint(),