[#374] Added test metrics aborted multipart object
All checks were successful
DCO check / DCO (pull_request) Successful in 22s

Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
This commit is contained in:
Ilyas Niyazov 2025-03-20 09:04:08 +03:00
parent 24301f4e8c
commit 2bd4019e90

View file

@ -12,6 +12,7 @@ from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_val
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, split_file
@ -137,7 +138,7 @@ class TestObjectMetrics(ClusterTestBase):
oid,
self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(),
expire_at=current_epoch + 1,
expire_at=current_epoch + 2,
)
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
@ -158,10 +159,10 @@ class TestObjectMetrics(ClusterTestBase):
)
with reporter.step(f"Wait until remove locking 'the counter doesn't change'"):
self.tick_epochs(epochs_to_tick=2)
self.tick_epochs(epochs_to_tick=5)
check_metrics_counter(
container_nodes,
operator="<=",
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
@ -187,15 +188,41 @@ class TestObjectMetrics(ClusterTestBase):
type="user",
)
@allure.title("Object metrics, expire_at object (obj_size={object_size}, policy={container_request})")
@requires_container(
[
PUBLIC_WITH_POLICY("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", short_name="REP 1"),
PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"),
]
)
def test_object_metrics_expire_at_object(
self, default_wallet: WalletInfo, cluster: Cluster, container: str, container_request: ContainerRequest, test_file: TestFile
):
metric_step = int(re.search(r"REP\s(\d+)", container_request.policy).group(1))
with reporter.step("Search container nodes"):
container_nodes = search_nodes_with_container(
wallet=default_wallet,
cid=container,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
cluster=cluster,
)
with reporter.step("Get current metrics for metric_type=user"):
objects_metric_counter = 0
for node in container_nodes:
objects_metric_counter += get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Put object with expire_at"):
current_epoch = self.get_epoch()
oid = put_object(
put_object(
default_wallet,
test_file.path,
container,
self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(),
expire_at=current_epoch + 1,
expire_at=current_epoch + 2,
)
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
@ -215,8 +242,8 @@ class TestObjectMetrics(ClusterTestBase):
type="user",
)
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2)
with reporter.step("Tick Epochs"):
self.tick_epochs(epochs_to_tick=5)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step
@ -348,3 +375,43 @@ class TestObjectMetrics(ClusterTestBase):
type="user",
cid=cid,
)
@allure.title("Check metrics object count for abort multipart object (s3_client={s3_client})")
def test_abort_multipart_object_metrics(
self,
s3_client: S3ClientWrapper,
bucket_container_resolver: BucketContainerResolver,
complex_object_size: ObjectSize,
):
with reporter.step("Create bucket"):
bucket = s3_client.create_bucket()
with reporter.step("Generate original object and split it into parts"):
parts_count = 3
original_file = generate_file(complex_object_size.value * parts_count)
file_parts = split_file(original_file, parts_count)
object_key = s3_helper.object_key_from_file_path(original_file)
with reporter.step("Create multipart upload"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
with reporter.step(f"Upload {parts_count} parts to multipart upload"):
for i, file in enumerate(file_parts, start=1):
s3_client.upload_part(bucket, object_key, upload_id, i, file)
with reporter.step("Abort multipart upload"):
s3_client.abort_multipart_upload(bucket, object_key, upload_id)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected no uploads in bucket {bucket}"
with reporter.step(f"Get related container_id for bucket"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
with reporter.step(f"Check objects count metric, should be zero"):
check_metrics_counter(
self.cluster.cluster_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=cid,
)