Compare commits

..

1 commit

Author SHA1 Message Date
7103289a3a [#365] Change import CliWrapper class
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-02-03 14:09:17 +03:00
12 changed files with 164 additions and 168 deletions

View file

@ -67,6 +67,7 @@ markers =
failover_data_loss: failover tests in case of data loss
metabase_loss: tests for metadata loss
write_cache_loss: tests for write cache loss
time: time tests
replication: replication tests
ec_replication: replication EC
static_session_container: tests for a static session in a container

View file

@ -410,6 +410,13 @@ def collect_binary_versions(hosting: Hosting, client_shell: Shell, request: pyte
env_utils.save_env_properties(file_path, all_versions)
@reporter.step("[Autouse/Session] Test session start time")
@pytest.fixture(scope="session", autouse=True)
def session_start_time(configure_testlib):
start_time = datetime.utcnow()
return start_time
@pytest.fixture(scope="session")
def rpc_endpoint(cluster: Cluster):
return cluster.default_rpc_endpoint

View file

@ -0,0 +1,79 @@
import datetime
from time import sleep
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.steps.cli.object import neo_go_query_height
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils
@pytest.mark.order(20)
@pytest.mark.failover
class TestTime(ClusterTestBase):
@reporter.step("Neo-go should continue to release blocks")
def check_nodes_block(self, cluster_state_controller: ClusterStateController):
count_blocks = {}
with reporter.step("Get current block id"):
for cluster_node in self.cluster.cluster_nodes:
cluster_state_controller.get_node_date(cluster_node)
count_blocks[cluster_node] = neo_go_query_height(
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
)["Latest block"]
with reporter.step("Wait for 3 blocks"):
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 3)
with reporter.step("Current block id should be higher than before"):
for cluster_node in self.cluster.cluster_nodes:
shell = cluster_node.host.get_shell()
now_block = neo_go_query_height(shell=shell, endpoint=cluster_node.morph_chain.get_http_endpoint())[
"Latest block"
]
assert count_blocks[cluster_node] < now_block
@pytest.fixture()
def node_time_synchronizer(self, cluster_state_controller: ClusterStateController) -> None:
cluster_state_controller.set_sync_date_all_nodes(status="inactive")
yield
cluster_state_controller.set_sync_date_all_nodes(status="active")
@allure.title("Changing hardware and system time")
def test_system_time(self, cluster_state_controller: ClusterStateController, node_time_synchronizer: None):
cluster_nodes = self.cluster.cluster_nodes
timezone_utc = datetime.timezone.utc
node_1, node_2, node_3 = cluster_nodes[0:3]
with reporter.step("On node 1, move the system time forward by 5 days"):
cluster_state_controller.change_node_date(
node_1, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=5))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("On node 2, move the system time back 5 days."):
cluster_state_controller.change_node_date(
node_2, (datetime.datetime.now(timezone_utc) - datetime.timedelta(days=5))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("On node 3, move the system time forward by 10 days"):
cluster_state_controller.change_node_date(
node_3, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=10))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("Return the time on all nodes to the current one"):
for cluster_node in self.cluster.cluster_nodes:
cluster_state_controller.restore_node_date(cluster_node)
self.check_nodes_block(cluster_state_controller)
with reporter.step("Reboot all nodes"):
cluster_state_controller.shutdown_cluster(mode="soft")
cluster_state_controller.start_stopped_hosts()
self.check_nodes_block(cluster_state_controller)

View file

@ -18,7 +18,7 @@ from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, PUBLIC_WITH_POLICY, REP_2_2_2_PUBLIC, ContainerRequest, requires_container
from ...helpers.container_request import PUBLIC_WITH_POLICY, REP_2_2_2_PUBLIC, requires_container
logger = logging.getLogger("NeoLogger")
STORAGE_NODE_COMMUNICATION_PORT = "8080"
@ -354,11 +354,6 @@ class TestFailoverNetwork(ClusterTestBase):
@pytest.mark.interfaces
@pytest.mark.failover_baremetal
@pytest.mark.parametrize(
"container_request",
[ContainerRequest(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", APE_EVERYONE_ALLOW_ALL)],
indirect=True,
)
@pytest.mark.parametrize("interface", [Interfaces.INTERNAL_0, Interfaces.INTERNAL_1])
@allure.title("Down internal interfaces to all nodes(interface={interface})")
def test_down_internal_interface(
@ -369,7 +364,6 @@ class TestFailoverNetwork(ClusterTestBase):
simple_object_size: ObjectSize,
restore_down_interfaces: None,
interface: Interfaces,
container: str,
):
cluster_nodes = self.cluster.cluster_nodes
latest_block = {}
@ -386,21 +380,29 @@ class TestFailoverNetwork(ClusterTestBase):
with reporter.step("Tick 1 epoch and wait 2 block for sync all nodes"):
self.tick_epochs(1, alive_node=cluster_nodes[0].storage_node, wait_block=2)
with reporter.step("Create container"):
cid = create_container(
wallet=default_wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule="REP 4 CBF 1",
)
with reporter.step(f"Put object, after down {interface}"):
file_path = generate_file(simple_object_size.value)
oid = put_object(
wallet=default_wallet,
path=file_path,
cid=container,
cid=cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Get object"):
get_object(
file_get_path = get_object(
wallet=default_wallet,
cid=container,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
@ -413,7 +415,7 @@ class TestFailoverNetwork(ClusterTestBase):
now_block[cluster_node] = neo_go_query_height(
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
)
with reporter.step("Compare block"):
with reporter.step(f"Compare block"):
for cluster_node, items in now_block.items():
with reporter.step(
f"Node - {cluster_node.host_ip}, old block - {latest_block[cluster_node]['Latest block']}, "

View file

@ -12,14 +12,13 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import TestFile, generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, REP_2_1_4_PUBLIC, ContainerRequest, requires_container
from ...helpers.utility import are_numbers_similar
@pytest.mark.order(-10)
@pytest.mark.order(-5)
@pytest.mark.nightly
@pytest.mark.metrics
class TestContainerMetrics(ClusterTestBase):
@ -36,14 +35,6 @@ class TestContainerMetrics(ClusterTestBase):
except Exception as e:
return None
@wait_for_success(max_wait_time=300, interval=30)
def check_metrics_value_by_approx(self, cluster_nodes: list[ClusterNode], metric_name: str, cid: str, expected_value: int, copies: int):
futures = parallel(get_metrics_value, cluster_nodes, command=metric_name, cid=cid)
metric_values = [future.result() for future in futures if future.result()]
actual_value = sum(metric_values) // copies
assert are_numbers_similar(actual_value, expected_value, tolerance_percentage=2), "metric container size bytes value not correct"
@allure.title("Container metrics (obj_size={object_size}, policy={container_request})")
@pytest.mark.parametrize(
"container_request, copies",
@ -128,13 +119,10 @@ class TestContainerMetrics(ClusterTestBase):
]
with reporter.step("Check metric appears in all node where the object is located"):
self.check_metrics_value_by_approx(
object_nodes,
metric_name="frostfs_node_engine_container_size_bytes",
cid=container,
expected_value=object_size.value,
copies=2, # for policy REP 2, actual metric value divide by 2
act_metric = sum(
[get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=container) for node in object_nodes]
)
assert (act_metric // 2) == object_size.value
with reporter.step("Delete file, wait until gc remove object"):
id_tombstone = delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
@ -156,13 +144,15 @@ class TestContainerMetrics(ClusterTestBase):
oids = [future.result() for future in futures]
with reporter.step("Check metric appears in all nodes"):
self.check_metrics_value_by_approx(
self.cluster.cluster_nodes,
metric_name="frostfs_node_engine_container_size_bytes",
cid=container,
expected_value=object_size.value * objects_count,
copies=2, # for policy REP 2, actual metric value divide by 2
)
metric_values = [
get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=container)
for node in self.cluster.cluster_nodes
]
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
expected_value = object_size.value * objects_count
assert are_numbers_similar(
actual_value, expected_value, tolerance_percentage=2
), "metric container size bytes value not correct"
with reporter.step("Delete file, wait until gc remove object"):
tombstones_size = 0

View file

@ -1,13 +1,13 @@
import allure
from frostfs_testlib.testing import parallel
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.metrics import get_metrics_value
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.testing import parallel
from frostfs_testlib.storage.cluster import ClusterNode, Cluster
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@pytest.mark.order(-12)
@pytest.mark.order(-7)
@pytest.mark.nightly
@pytest.mark.metrics
class TestEpochMetrics(ClusterTestBase):
@ -28,11 +28,11 @@ class TestEpochMetrics(ClusterTestBase):
with reporter.step("Check that the metric values are the same in all nodes"):
assert len(set(metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes"
assert len(metrics_results) == len(cluster.cluster_nodes), "Metrics are not available in some nodes"
with reporter.step("Tick epoch"):
self.tick_epoch(wait_block=2)
with reporter.step("Check that metric value increase"):
with reporter.step('Check that metric value increase'):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name)
new_metrics_results = [future.result() for future in futures if future.result() is not None]

View file

@ -16,7 +16,7 @@ from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.order(-13)
@pytest.mark.order(-9)
@pytest.mark.nightly
@pytest.mark.metrics
class TestGarbageCollectorMetrics(ClusterTestBase):

View file

@ -18,7 +18,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.order(-11)
@pytest.mark.order(-6)
@pytest.mark.nightly
@pytest.mark.metrics
class TestGRPCMetrics(ClusterTestBase):

View file

@ -14,7 +14,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
@pytest.mark.order(-14)
@pytest.mark.order(-10)
@pytest.mark.nightly
@pytest.mark.metrics
class TestLogsMetrics(ClusterTestBase):
@ -30,13 +30,13 @@ class TestLogsMetrics(ClusterTestBase):
config_manager.csc.start_services_of_type(StorageNode)
return restart_time
@wait_for_success(max_wait_time=300, interval=30)
@wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
current_time = datetime.now(timezone.utc)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
assert counter_logs == pytest.approx(
counter_metrics, rel=0.05
counter_metrics, rel=0.02
), f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
@staticmethod

View file

@ -4,8 +4,6 @@ import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.container import delete_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
@ -14,12 +12,12 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, split_file
from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
@pytest.mark.order(-16)
@pytest.mark.order(-11)
@pytest.mark.nightly
@pytest.mark.metrics
class TestObjectMetrics(ClusterTestBase):
@ -54,12 +52,8 @@ class TestObjectMetrics(ClusterTestBase):
)
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=container)
with reporter.step("Check removed containers shouldn't appear in the storage node"):
for node in object_nodes:
try:
all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte")
except:
continue
all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte")
assert container not in all_metrics.stdout, "metrics of removed containers shouldn't appear in the storage node"
@allure.title("Object metrics, locked object (obj_size={object_size}, policy={container_request})")
@ -161,7 +155,7 @@ class TestObjectMetrics(ClusterTestBase):
self.tick_epochs(epochs_to_tick=2)
check_metrics_counter(
container_nodes,
operator="<=",
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
@ -303,48 +297,3 @@ class TestObjectMetrics(ClusterTestBase):
type="user",
cid=container,
)
@allure.title("Multipart object metrics, PUT over S3, check part counts (s3_client={s3_client}, parts_count={parts_count})")
@pytest.mark.parametrize("parts_count", [4, 5, 10])
def test_multipart_object_metrics_check_parts_count(
self, s3_client: S3ClientWrapper, bucket_container_resolver: BucketContainerResolver, parts_count: int
):
parts = []
object_size_10_mb = 10 * 1024 * 1024
original_size = object_size_10_mb * parts_count
with reporter.step("Create public container"):
bucket = s3_client.create_bucket()
with reporter.step("Generate original object and split it into parts"):
original_file = generate_file(original_size)
file_parts = split_file(original_file, parts_count)
object_key = s3_helper.object_key_from_file_path(original_file)
with reporter.step("Create multipart and upload parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(file_parts, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
with reporter.step("Complete multipart upload"):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
with reporter.step(f"Get related container_id for bucket"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
with reporter.step(f"Check metric count object should be equal count parts of multipart object"):
# plus 1, because creating an additional object when calling CompleteMultipart
# multiply by 2 because default location constraint is REP 2
expected_user_metric = (parts_count + 1) * 2
check_metrics_counter(
self.cluster.cluster_nodes,
counter_exp=expected_user_metric,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=cid,
)

View file

@ -19,7 +19,7 @@ from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.order(-15)
@pytest.mark.order(-8)
@pytest.mark.nightly
@pytest.mark.metrics
class TestShardMetrics(ClusterTestBase):
@ -88,7 +88,6 @@ class TestShardMetrics(ClusterTestBase):
@allure.title("Metric for shard mode")
def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]):
metrics_counter = 1
metric_name_blobstore = "frostfs_node_blobstore_mode"
shard1, shard2, node = two_shards_and_node
with reporter.step("Shard1 set to mode 'read-only'"):
@ -103,15 +102,6 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard1,
)
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard1,
)
with reporter.step("Shard2 set to mode 'degraded-read-only'"):
node_shard_set_mode(node.storage_node, shard2, "degraded-read-only")
@ -124,15 +114,6 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard2,
)
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will save 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard2,
)
with reporter.step("Both shards set to mode 'read-write'"):
for shard in [shard1, shard2]:
node_shard_set_mode(node.storage_node, shard, "read-write")
@ -147,16 +128,6 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard,
)
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_WRITE'"):
for shard in [shard1, shard2]:
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_WRITE",
shard_id=shard,
)
@allure.title("Metric for error count on shard")
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1"))
def test_shard_metrics_error_count(

View file

@ -1,61 +1,58 @@
import logging
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED, OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME) * 5, datetime_utils.parse_time(STORAGE_GC_TIME))
def wait_for_object_status_change_to(status: str, grpc_client: GrpcClientWrapper, cid: str, oid: str, endpoint: str) -> None:
with pytest.raises(Exception, match=status):
grpc_client.object.head(cid, oid, endpoint)
logger = logging.getLogger("NeoLogger")
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Object is removed when lifetime expired (obj_size={object_size}, policy={container_request.short_name})")
@pytest.mark.parametrize(
"container_request",
[
ContainerRequest(DEFAULT_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "REP 2"),
ContainerRequest(DEFAULT_EC_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "EC 3.1"),
],
)
def test_object_api_lifetime(self, grpc_client: GrpcClientWrapper, container: str, test_file: TestFile):
@allure.title("Object is removed when lifetime expired (obj_size={object_size})")
def test_object_api_lifetime(self, container: str, test_file: TestFile, default_wallet: WalletInfo):
"""
Test object deleted after expiration epoch.
"""
with reporter.step("Get current epoch"):
current_epoch = self.get_epoch()
last_active_epoch = current_epoch + 1
wallet = default_wallet
with reporter.step("Put object to random node"):
oid = grpc_client.object.put_to_random_node(test_file, container, self.cluster, expire_at=last_active_epoch)
epoch = get_epoch(self.shell, self.cluster)
with reporter.step("Ensure that expiration of object has expected value"):
object_info: dict = grpc_client.object.head(container, oid, self.cluster.default_rpc_endpoint)
expiration_epoch = int(object_info["header"]["attributes"]["__SYSTEM__EXPIRATION_EPOCH"])
assert expiration_epoch == last_active_epoch, f"Expiration time set for object is not expected: {expiration_epoch}"
oid = put_object_to_random_node(wallet, test_file.path, container, self.shell, self.cluster, expire_at=epoch + 1)
with expect_not_raises():
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Tick two epoch for object expiration"):
with reporter.step("Tick two epochs"):
self.tick_epochs(2)
with reporter.step("Wait until GC marks object as 'already removed' or 'not found'"):
wait_for_object_status_change_to(
f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}", grpc_client, container, oid, self.cluster.default_rpc_endpoint
)
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
with reporter.step("Try to get object from random node and make sure it is really deleted"):
with pytest.raises(Exception, match=f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}"):
grpc_client.object.get_from_random_node(container, oid, self.cluster)
with reporter.step("Check object deleted because it expires on epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)
with reporter.step("Tick additional epoch"):
self.tick_epoch()
wait_for_gc_pass_on_storage_nodes()
with reporter.step("Check object deleted because it expires on previous epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)