diff --git a/pytest_tests/helpers/container_spec.py b/pytest_tests/helpers/container_spec.py
index b82c0193..f987ddb0 100644
--- a/pytest_tests/helpers/container_spec.py
+++ b/pytest_tests/helpers/container_spec.py
@@ -2,13 +2,23 @@ from dataclasses import dataclass
 
 from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE
 from frostfs_testlib.storage.cluster import Cluster
+from frostfs_testlib.storage.dataclasses import ape
+
+APE_PUBLIC_READ_WRITE = [ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL)]
 
 
 @dataclass
 class ContainerSpec:
     rule: str = DEFAULT_PLACEMENT_RULE
+    # TODO: Deprecated
     basic_acl: str = None
+    # TODO: Deprecated
     allow_owner_via_ape: bool = False
+    ape_rules: list[ape.Rule] = None
+
+    def __post_init__(self):
+        if self.ape_rules is None:
+            self.ape_rules = []
 
     def parsed_rule(self, cluster: Cluster):
         if self.rule is None:
@@ -21,3 +31,14 @@ class ContainerSpec:
             parsed_rule = parsed_rule.replace(sub, replacement)
 
         return parsed_rule
+
+    def __repr__(self):
+        spec_info: list[str] = []
+
+        if self.rule:
+            spec_info.append(f"rule='{self.rule}'")
+        if self.ape_rules:
+            ape_rules_list = ", ".join([f"'{rule.as_string()}'" for rule in self.ape_rules])
+            spec_info.append(f"ape_rules=[{ape_rules_list}]")
+
+        return f"ContainerSpec({', '.join(spec_info)})"
diff --git a/pytest_tests/testsuites/access/conftest.py b/pytest_tests/testsuites/access/conftest.py
index b2c1ac77..6d03faa3 100644
--- a/pytest_tests/testsuites/access/conftest.py
+++ b/pytest_tests/testsuites/access/conftest.py
@@ -1,22 +1,14 @@
 import json
-import time
 
-import allure
 import pytest
 from frostfs_testlib import reporter
-from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
-from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
-from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
 from frostfs_testlib.shell import Shell
-from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
+from frostfs_testlib.steps.cli.container import search_nodes_with_container
 from frostfs_testlib.steps.cli.object import put_object_to_random_node
 from frostfs_testlib.storage.cluster import Cluster, ClusterNode
 from frostfs_testlib.storage.dataclasses import ape
 from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
 from frostfs_testlib.testing.parallel import parallel
-from frostfs_testlib.utils import datetime_utils
-
-from ...helpers.container_spec import ContainerSpec
 
 OBJECT_COUNT = 5
 
@@ -48,81 +40,6 @@ def test_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.
     return role_to_wallet_map[role]
 
 
-@pytest.fixture
-def container(
-    default_wallet: WalletInfo,
-    frostfs_cli: FrostfsCli,
-    client_shell: Shell,
-    cluster: Cluster,
-    request: pytest.FixtureRequest,
-    rpc_endpoint: str,
-) -> str:
-    container_spec = _get_container_spec(request)
-    cid = _create_container_by_spec(default_wallet, client_shell, cluster, rpc_endpoint, container_spec)
-    if container_spec.allow_owner_via_ape:
-        _allow_owner_via_ape(frostfs_cli, cluster, cid)
-
-    return cid
-
-
-def _create_container_by_spec(
-    default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str, container_spec: ContainerSpec
-) -> str:
-    # TODO: add container spec to step message
-    with reporter.step("Create container"):
-        cid = create_container(
-            default_wallet, client_shell, rpc_endpoint, basic_acl=container_spec.basic_acl, rule=container_spec.parsed_rule(cluster)
-        )
-
-    with reporter.step("Search nodes holding the container"):
-        container_holder_nodes = search_nodes_with_container(default_wallet, cid, client_shell, cluster.default_rpc_endpoint, cluster)
-        report_data = {node.id: node.host_ip for node in container_holder_nodes}
-
-    reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
-
-    return cid
-
-
-def _get_container_spec(request: pytest.FixtureRequest) -> ContainerSpec:
-    container_marker = request.node.get_closest_marker("container")
-    # let default container to be public at the moment
-    container_spec = ContainerSpec(basic_acl=PUBLIC_ACL)
-
-    if container_marker:
-        if len(container_marker.args) != 1:
-            raise RuntimeError(f"Something wrong with container marker: {container_marker}")
-        container_spec = container_marker.args[0]
-
-    if "param" in request.__dict__:
-        container_spec = request.param
-
-    if not container_spec:
-        raise RuntimeError(
-            f"""Container specification is empty. 
-                            Either add @pytest.mark.container(ContainerSpec(...)) or 
-                            @pytest.mark.parametrize(\"container\", [ContainerSpec(...)], indirect=True) decorator"""
-        )
-
-    return container_spec
-
-
-def _allow_owner_via_ape(frostfs_cli: FrostfsCli, cluster: Cluster, container: str):
-    with reporter.step("Create allow APE rule for container owner"):
-        role_condition = ape.Condition.by_role(ape.Role.OWNER)
-        deny_rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
-
-        frostfs_cli.ape_manager.add(
-            cluster.default_rpc_endpoint,
-            deny_rule.chain_id,
-            target_name=container,
-            target_type="container",
-            rule=deny_rule.as_string(),
-        )
-
-    with reporter.step("Wait for one block"):
-        time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
-
-
 @pytest.fixture
 def objects(container: str, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, file_path: str):
     with reporter.step("Add test objects to container"):
diff --git a/pytest_tests/testsuites/conftest.py b/pytest_tests/testsuites/conftest.py
index 31900ff2..8a331dbc 100644
--- a/pytest_tests/testsuites/conftest.py
+++ b/pytest_tests/testsuites/conftest.py
@@ -1,7 +1,9 @@
+import json
 import logging
 import os
 import random
 import shutil
+import time
 from datetime import datetime, timedelta, timezone
 from typing import Optional
 
@@ -14,15 +16,24 @@ from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
 from frostfs_testlib.healthcheck.interfaces import Healthcheck
 from frostfs_testlib.hosting import Hosting
 from frostfs_testlib.resources import optionals
-from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE
+from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, MORPH_BLOCK_TIME, SIMPLE_OBJECT_SIZE
+from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
 from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
 from frostfs_testlib.s3.interfaces import BucketContainerResolver
 from frostfs_testlib.shell import LocalShell, Shell
-from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC
+from frostfs_testlib.steps.cli.container import (
+    DEFAULT_EC_PLACEMENT_RULE,
+    DEFAULT_PLACEMENT_RULE,
+    FROSTFS_CLI_EXEC,
+    create_container,
+    search_nodes_with_container,
+)
 from frostfs_testlib.steps.cli.object import get_netmap_netinfo
+from frostfs_testlib.steps.epoch import ensure_fresh_epoch
 from frostfs_testlib.steps.s3 import s3_helper
 from frostfs_testlib.storage.cluster import Cluster, ClusterNode
 from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
+from frostfs_testlib.storage.dataclasses import ape
 from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
 from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
 from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
@@ -32,8 +43,9 @@ from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
 from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
 from frostfs_testlib.testing.parallel import parallel
 from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
-from frostfs_testlib.utils import env_utils, string_utils, version_utils
+from frostfs_testlib.utils import datetime_utils, env_utils, string_utils, version_utils
 from frostfs_testlib.utils.file_utils import TestFile, generate_file
+from workspace.frostfs_testcases.pytest_tests.helpers.container_spec import ContainerSpec
 
 from ..resources.common import TEST_CYCLES_COUNT
 
@@ -158,11 +170,6 @@ def simple_object_size(max_object_size: int) -> ObjectSize:
     return ObjectSize("simple", size)
 
 
-@pytest.fixture()
-def file_path(object_size: ObjectSize) -> TestFile:
-    return generate_file(object_size.value)
-
-
 @pytest.fixture(scope="session")
 def complex_object_size(max_object_size: int) -> ObjectSize:
     size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
@@ -181,6 +188,17 @@ def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize,
     return complex_object_size
 
 
+@pytest.fixture()
+def test_file(object_size: ObjectSize) -> TestFile:
+    return generate_file(object_size.value)
+
+
+# Deprecated. Please migrate all to test_file
+@pytest.fixture()
+def file_path(test_file: TestFile) -> TestFile:
+    return test_file
+
+
 @pytest.fixture(scope="session")
 def rep_placement_policy() -> PlacementPolicy:
     return PlacementPolicy("rep", DEFAULT_PLACEMENT_RULE)
@@ -469,3 +487,108 @@ def bucket_container_resolver(node_under_test: ClusterNode) -> BucketContainerRe
     resolver_cls = plugins.load_plugin("frostfs.testlib.bucket_cid_resolver", node_under_test.host.config.product)
     resolver: BucketContainerResolver = resolver_cls()
     return resolver
+
+
+@pytest.fixture
+def container(
+    default_wallet: WalletInfo,
+    frostfs_cli: FrostfsCli,
+    client_shell: Shell,
+    cluster: Cluster,
+    request: pytest.FixtureRequest,
+    rpc_endpoint: str,
+) -> str:
+    with reporter.step("Get container specification for test"):
+        container_spec = _get_container_spec(request)
+
+    with reporter.step("Create container"):
+        cid = _create_container_by_spec(default_wallet, client_shell, cluster, rpc_endpoint, container_spec)
+    # TODO: deprecate this. Use generic ContainerSpec.ape_rule param
+    if container_spec.allow_owner_via_ape:
+        with reporter.step("Allow owner via APE on container"):
+            _allow_owner_via_ape(frostfs_cli, cluster, cid)
+
+    with reporter.step("Apply APE rules for container"):
+        if container_spec.ape_rules:
+            _apply_ape_rules(frostfs_cli, cluster, cid, container_spec.ape_rules)
+
+    return cid
+
+
+def _apply_ape_rules(frostfs_cli: FrostfsCli, cluster: Cluster, container: str, ape_rules: list[ape.Rule]):
+    for ape_rule in ape_rules:
+        rule_str = ape_rule.as_string()
+        with reporter.step(f"Apply APE rule '{rule_str}' for container {container}"):
+            frostfs_cli.ape_manager.add(
+                cluster.default_rpc_endpoint,
+                ape_rule.chain_id,
+                target_name=container,
+                target_type="container",
+                rule=rule_str,
+            )
+
+    with reporter.step("Wait for one block"):
+        time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
+
+
+def _create_container_by_spec(
+    default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str, container_spec: ContainerSpec
+) -> str:
+    # TODO: add container spec to step message
+    with reporter.step("Create container"):
+        cid = create_container(
+            default_wallet, client_shell, rpc_endpoint, basic_acl=container_spec.basic_acl, rule=container_spec.parsed_rule(cluster)
+        )
+
+    with reporter.step("Search nodes holding the container"):
+        container_holder_nodes = search_nodes_with_container(default_wallet, cid, client_shell, cluster.default_rpc_endpoint, cluster)
+        report_data = {node.id: node.host_ip for node in container_holder_nodes}
+
+    reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
+
+    return cid
+
+
+def _get_container_spec(request: pytest.FixtureRequest) -> ContainerSpec:
+    container_marker = request.node.get_closest_marker("container")
+    # let default container to be public at the moment
+    container_spec = ContainerSpec(basic_acl=PUBLIC_ACL)
+
+    if container_marker:
+        if len(container_marker.args) != 1:
+            raise RuntimeError(f"Something wrong with container marker: {container_marker}")
+        container_spec = container_marker.args[0]
+
+    if "param" in request.__dict__:
+        container_spec = request.param
+
+    if not container_spec:
+        raise RuntimeError(
+            f"""Container specification is empty. 
+                            Either add @pytest.mark.container(ContainerSpec(...)) or 
+                            @pytest.mark.parametrize(\"container\", [ContainerSpec(...)], indirect=True) decorator"""
+        )
+
+    return container_spec
+
+
+def _allow_owner_via_ape(frostfs_cli: FrostfsCli, cluster: Cluster, container: str):
+    with reporter.step("Create allow APE rule for container owner"):
+        role_condition = ape.Condition.by_role(ape.Role.OWNER)
+        ape_rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
+
+        frostfs_cli.ape_manager.add(
+            cluster.default_rpc_endpoint,
+            ape_rule.chain_id,
+            target_name=container,
+            target_type="container",
+            rule=ape_rule.as_string(),
+        )
+
+    with reporter.step("Wait for one block"):
+        time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
+
+
+@pytest.fixture()
+def new_epoch(client_shell: Shell, cluster: Cluster) -> int:
+    return ensure_fresh_epoch(client_shell, cluster)
diff --git a/pytest_tests/testsuites/object/test_object_tombstone.py b/pytest_tests/testsuites/object/test_object_tombstone.py
new file mode 100644
index 00000000..941fb9bf
--- /dev/null
+++ b/pytest_tests/testsuites/object/test_object_tombstone.py
@@ -0,0 +1,63 @@
+import allure
+import pytest
+from frostfs_testlib import reporter
+from frostfs_testlib.resources.common import EXPIRATION_EPOCH_ATTRIBUTE
+from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
+from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
+from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
+from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
+from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
+from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
+from frostfs_testlib.utils.file_utils import TestFile
+
+from ....pytest_tests.helpers.container_spec import APE_PUBLIC_READ_WRITE, ContainerSpec
+
+
+class TestObjectTombstone(ClusterTestBase):
+    @pytest.fixture()
+    @allure.title("Change tombstone lifetime")
+    def tombstone_lifetime(self, cluster_state_controller: ClusterStateController, request: pytest.FixtureRequest):
+        config_manager = cluster_state_controller.manager(ConfigStateManager)
+        config_manager.set_on_all_nodes(StorageNode, {"object:delete:tombstone_lifetime": request.param}, True)
+
+        yield f"Tombstone lifetime was changed to {request.param}"
+
+        config_manager.revert_all(True)
+
+    @pytest.mark.container(ContainerSpec(ape_rules=APE_PUBLIC_READ_WRITE))
+    @pytest.mark.parametrize("object_size, tombstone_lifetime", [("simple", 2)], indirect=True)
+    @allure.title("Tombstone object should be removed after expiration")
+    def test_tombstone_lifetime(
+        self,
+        new_epoch: int,
+        container: str,
+        grpc_client: GrpcClientWrapper,
+        test_file: TestFile,
+        rpc_endpoint: str,
+        tombstone_lifetime: str,
+    ):
+        allure.dynamic.description(tombstone_lifetime)
+
+        with reporter.step("Put object"):
+            oid = grpc_client.object.put(test_file.path, container, rpc_endpoint)
+
+        with reporter.step("Remove object"):
+            tombstone_oid = grpc_client.object.delete(container, oid, rpc_endpoint)
+
+        with reporter.step("Get tombstone object lifetime"):
+            tombstone_info = grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
+            tombstone_expiration_epoch = tombstone_info["header"]["attributes"][EXPIRATION_EPOCH_ATTRIBUTE]
+
+        with reporter.step("Tombstone lifetime should be <= 3"):
+            epochs_to_skip = int(tombstone_expiration_epoch) - new_epoch + 1
+            assert epochs_to_skip <= 3
+
+        with reporter.step("Wait for tombstone expiration"):
+            self.tick_epochs(epochs_to_skip)
+
+        with reporter.step("Tombstone should be removed after expiration"):
+            with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
+                grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
+
+            with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
+                grpc_client.object.get(container, tombstone_oid, rpc_endpoint)
diff --git a/pytest_tests/testsuites/services/http_gate/test_http_system_header.py b/pytest_tests/testsuites/services/http_gate/test_http_system_header.py
index b06029b4..50446a59 100644
--- a/pytest_tests/testsuites/services/http_gate/test_http_system_header.py
+++ b/pytest_tests/testsuites/services/http_gate/test_http_system_header.py
@@ -23,7 +23,10 @@ from frostfs_testlib.utils.file_utils import generate_file
 
 logger = logging.getLogger("NeoLogger")
 EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP"
+
+# TODO: Depreacated. Use EXPIRATION_EPOCH_ATTRIBUTE from testlib
 EXPIRATION_EPOCH_HEADER = "__SYSTEM__EXPIRATION_EPOCH"
+
 EXPIRATION_DURATION_HEADER = "__SYSTEM__EXPIRATION_DURATION"
 EXPIRATION_EXPIRATION_RFC = "__SYSTEM__EXPIRATION_RFC3339"
 SYSTEM_EXPIRATION_EPOCH = "System-Expiration-Epoch"
@@ -151,9 +154,7 @@ class Test_http_system_header(ClusterTestBase):
     def test_unable_put_negative_duration(self, user_container: str, simple_object_size: ObjectSize):
         headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
         file_path = generate_file(simple_object_size.value)
-        with reporter.step(
-            "Put object using HTTP with attribute System-Expiration-Duration where duration is negative"
-        ):
+        with reporter.step("Put object using HTTP with attribute System-Expiration-Duration where duration is negative"):
             upload_via_http_gate_curl(
                 cid=user_container,
                 filepath=file_path,
@@ -166,9 +167,7 @@ class Test_http_system_header(ClusterTestBase):
     def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: ObjectSize):
         headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
         file_path = generate_file(simple_object_size.value)
-        with reporter.step(
-            "Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"
-        ):
+        with reporter.step("Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"):
             upload_via_http_gate_curl(
                 cid=user_container,
                 filepath=file_path,
@@ -177,9 +176,7 @@ class Test_http_system_header(ClusterTestBase):
                 error_pattern=f"{EXPIRATION_TIMESTAMP_HEADER} must be in the future",
             )
 
-    @allure.title(
-        "[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past"
-    )
+    @allure.title("[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past")
     def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: ObjectSize):
         headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
         file_path = generate_file(simple_object_size.value)
@@ -204,9 +201,7 @@ class Test_http_system_header(ClusterTestBase):
         with reporter.step(
             f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
         ):
-            oid, head_info = self.oid_header_info_for_object(
-                file_path=file_path, attributes=attributes, user_container=user_container
-            )
+            oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container)
             self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
         with reporter.step("Check that object becomes unavailable when epoch is expired"):
             for _ in range(0, epoch_count + 1):
@@ -243,9 +238,7 @@ class Test_http_system_header(ClusterTestBase):
         with reporter.step(
             f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
         ):
-            oid, head_info = self.oid_header_info_for_object(
-                file_path=file_path, attributes=attributes, user_container=user_container
-            )
+            oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container)
             self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
         with reporter.step("Check that object becomes unavailable when epoch is expired"):
             for _ in range(0, epoch_count + 1):
@@ -276,17 +269,13 @@ class Test_http_system_header(ClusterTestBase):
         )
         attributes = {
             SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2),
-            SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
-                epoch_duration=epoch_duration, epoch=1, rfc3339=True
-            ),
+            SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=1, rfc3339=True),
         }
         file_path = generate_file(object_size.value)
         with reporter.step(
             f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
         ):
-            oid, head_info = self.oid_header_info_for_object(
-                file_path=file_path, attributes=attributes, user_container=user_container
-            )
+            oid, head_info = self.oid_header_info_for_object(file_path=file_path, attributes=attributes, user_container=user_container)
             self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
         with reporter.step("Check that object becomes unavailable when epoch is expired"):
             for _ in range(0, epoch_count + 1):
@@ -314,20 +303,14 @@ class Test_http_system_header(ClusterTestBase):
         ["simple"],
         indirect=True,
     )
-    def test_http_rfc_object_unavailable_after_expir(
-        self, user_container: str, object_size: ObjectSize, epoch_duration: int
-    ):
+    def test_http_rfc_object_unavailable_after_expir(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
         self.tick_epoch()
         epoch_count = 2
         expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
         logger.info(
             f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
         )
-        attributes = {
-            SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
-                epoch_duration=epoch_duration, epoch=2, rfc3339=True
-            )
-        }
+        attributes = {SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2, rfc3339=True)}
         file_path = generate_file(object_size.value)
         with reporter.step(
             f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"