Compare commits

...
Sign in to create a new pull request.

7 commits

Author SHA1 Message Date
f8785fa299 [#368] Extend test_object_api_lifetime test with EC 3.1 policy
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-02-14 08:07:02 +00:00
b03f5f46b2 [#369] Added test node-blobstore metrics
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-02-09 14:56:17 +03:00
99aa2a547a [#367] Delete Time test
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-02-04 10:08:21 +03:00
a375423a4e [#365] Change import CliWrapper class
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-01-30 14:42:57 +03:00
3b120643ad [#364] Fixed epoch adn object metrics tests
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-01-30 12:50:30 +03:00
35f60af47d [#363] Updates for sanity scope
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-01-29 08:30:08 +00:00
a841251e06 [#362] Rename S3 object patch suite
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-01-28 10:27:24 +03:00
23 changed files with 106 additions and 221 deletions

View file

@ -10,6 +10,7 @@ markers =
staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job
sanity: test runs in sanity testrun sanity: test runs in sanity testrun
smoke: test runs in smoke testrun smoke: test runs in smoke testrun
exclude_sanity: tests which should not be in sanity scope
# controlling markers # controlling markers
order: manual control of test order order: manual control of test order
logs_after_session: Make the last test in session logs_after_session: Make the last test in session
@ -66,7 +67,6 @@ markers =
failover_data_loss: failover tests in case of data loss failover_data_loss: failover tests in case of data loss
metabase_loss: tests for metadata loss metabase_loss: tests for metadata loss
write_cache_loss: tests for write cache loss write_cache_loss: tests for write cache loss
time: time tests
replication: replication tests replication: replication tests
ec_replication: replication EC ec_replication: replication EC
static_session_container: tests for a static session in a container static_session_container: tests for a static session in a container

View file

@ -7,7 +7,7 @@ from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication from frostfs_testlib.utils.failover_utils import wait_object_replication

View file

@ -5,7 +5,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile

View file

@ -11,7 +11,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses import ape from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.parallel import parallel

View file

@ -3,7 +3,7 @@ import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import NO_RULE_FOUND_OBJECT from frostfs_testlib.resources.error_patterns import NO_RULE_FOUND_OBJECT
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile

View file

@ -3,7 +3,7 @@ import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import RULE_ACCESS_DENIED_OBJECT from frostfs_testlib.resources.error_patterns import RULE_ACCESS_DENIED_OBJECT
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile

View file

@ -1,11 +1,10 @@
import logging import logging
import random import random
from datetime import datetime, timedelta, timezone from datetime import datetime
from typing import Optional from typing import Optional
import allure import allure
import pytest import pytest
from dateutil import parser
from frostfs_testlib import plugins, reporter from frostfs_testlib import plugins, reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, S3HttpClient from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, S3HttpClient
@ -22,15 +21,14 @@ from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.epoch import ensure_fresh_epoch from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import cached_fixture, run_optionally, wait_for_success from frostfs_testlib.testing.test_control import cached_fixture, run_optionally
from frostfs_testlib.utils import env_utils, string_utils, version_utils from frostfs_testlib.utils import env_utils, string_utils, version_utils
from frostfs_testlib.utils.file_utils import TestFile, generate_file from frostfs_testlib.utils.file_utils import TestFile, generate_file
@ -40,7 +38,6 @@ from ..resources.common import TEST_CYCLES_COUNT
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
SERVICE_ACTIVE_TIME = 20
WALLTETS_IN_POOL = 2 WALLTETS_IN_POOL = 2
@ -155,7 +152,11 @@ def complex_object_size(max_object_size: int) -> ObjectSize:
# By default we want all tests to be executed with both object sizes # By default we want all tests to be executed with both object sizes
# This can be overriden in choosen tests if needed # This can be overriden in choosen tests if needed
@pytest.fixture( @pytest.fixture(
scope="session", params=[pytest.param("simple", marks=pytest.mark.simple), pytest.param("complex", marks=pytest.mark.complex)] scope="session",
params=[
pytest.param("simple", marks=[pytest.mark.simple, pytest.mark.exclude_sanity]),
pytest.param("complex", marks=pytest.mark.complex),
],
) )
def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize: def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize:
if request.param == "simple": if request.param == "simple":
@ -291,7 +292,7 @@ def credentials_provider(cluster: Cluster) -> CredentialsProvider:
@pytest.fixture( @pytest.fixture(
scope="session", scope="session",
params=[ params=[
pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly]), pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly, pytest.mark.exclude_sanity]),
pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]), pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]),
], ],
) )
@ -416,44 +417,11 @@ def session_start_time(configure_testlib):
return start_time return start_time
@allure.title("[Autouse/Session] After deploy healthcheck")
@pytest.fixture(scope="session", autouse=True)
@run_optionally(optionals.OPTIONAL_AUTOUSE_FIXTURES_ENABLED)
def after_deploy_healthcheck(cluster: Cluster):
with reporter.step("Wait for cluster readiness after deploy"):
parallel(readiness_on_node, cluster.cluster_nodes)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def rpc_endpoint(cluster: Cluster): def rpc_endpoint(cluster: Cluster):
return cluster.default_rpc_endpoint return cluster.default_rpc_endpoint
@wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60, title="Wait for {cluster_node} readiness")
def readiness_on_node(cluster_node: ClusterNode):
if "skip_readiness_check" in cluster_node.host.config.attributes and cluster_node.host.config.attributes["skip_readiness_check"]:
return
# TODO: Move to healtcheck classes
svc_name = cluster_node.service(StorageNode).get_service_systemctl_name()
with reporter.step(f"Check service {svc_name} is active"):
result = cluster_node.host.get_shell().exec(f"systemctl is-active {svc_name}")
assert "active" == result.stdout.strip(), f"Service {svc_name} should be in active state"
with reporter.step(f"Check service {svc_name} is active more than {SERVICE_ACTIVE_TIME} minutes"):
result = cluster_node.host.get_shell().exec(f"systemctl show {svc_name} --property ActiveEnterTimestamp | cut -d '=' -f 2")
start_time = parser.parse(result.stdout.strip())
current_time = datetime.now(tz=timezone.utc)
active_time = current_time - start_time
active_minutes = active_time.seconds // 60
active_seconds = active_time.seconds - active_minutes * 60
assert active_time > timedelta(
minutes=SERVICE_ACTIVE_TIME
), f"Service should be in active state more than {SERVICE_ACTIVE_TIME} minutes, current {active_minutes}m:{active_seconds}s"
@reporter.step("Prepare default user with wallet") @reporter.step("Prepare default user with wallet")
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES) @cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)

View file

@ -59,7 +59,7 @@ class TestContainer(ClusterTestBase):
self.tick_epoch() self.tick_epoch()
wait_for_container_deletion(wallet, cid, self.shell, rpc_endpoint) wait_for_container_deletion(wallet, cid, self.shell, rpc_endpoint)
@allure.title("Delete container without force (name={name})") @allure.title("Delete container without force")
@pytest.mark.smoke @pytest.mark.smoke
def test_container_deletion_no_force(self, container: str, default_wallet: WalletInfo, rpc_endpoint: str): def test_container_deletion_no_force(self, container: str, default_wallet: WalletInfo, rpc_endpoint: str):
with reporter.step("Delete container and check it was deleted"): with reporter.step("Delete container and check it was deleted"):
@ -68,6 +68,7 @@ class TestContainer(ClusterTestBase):
wait_for_container_deletion(default_wallet, container, self.shell, rpc_endpoint) wait_for_container_deletion(default_wallet, container, self.shell, rpc_endpoint)
@allure.title("Parallel container creation and deletion") @allure.title("Parallel container creation and deletion")
@pytest.mark.exclude_sanity
def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo, rpc_endpoint: str): def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo, rpc_endpoint: str):
containers_count = 3 containers_count = 3
wallet = default_wallet wallet = default_wallet

View file

@ -1,79 +0,0 @@
import datetime
from time import sleep
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.steps.cli.object import neo_go_query_height
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils
@pytest.mark.order(20)
@pytest.mark.failover
class TestTime(ClusterTestBase):
@reporter.step("Neo-go should continue to release blocks")
def check_nodes_block(self, cluster_state_controller: ClusterStateController):
count_blocks = {}
with reporter.step("Get current block id"):
for cluster_node in self.cluster.cluster_nodes:
cluster_state_controller.get_node_date(cluster_node)
count_blocks[cluster_node] = neo_go_query_height(
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
)["Latest block"]
with reporter.step("Wait for 3 blocks"):
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 3)
with reporter.step("Current block id should be higher than before"):
for cluster_node in self.cluster.cluster_nodes:
shell = cluster_node.host.get_shell()
now_block = neo_go_query_height(shell=shell, endpoint=cluster_node.morph_chain.get_http_endpoint())[
"Latest block"
]
assert count_blocks[cluster_node] < now_block
@pytest.fixture()
def node_time_synchronizer(self, cluster_state_controller: ClusterStateController) -> None:
cluster_state_controller.set_sync_date_all_nodes(status="inactive")
yield
cluster_state_controller.set_sync_date_all_nodes(status="active")
@allure.title("Changing hardware and system time")
def test_system_time(self, cluster_state_controller: ClusterStateController, node_time_synchronizer: None):
cluster_nodes = self.cluster.cluster_nodes
timezone_utc = datetime.timezone.utc
node_1, node_2, node_3 = cluster_nodes[0:3]
with reporter.step("On node 1, move the system time forward by 5 days"):
cluster_state_controller.change_node_date(
node_1, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=5))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("On node 2, move the system time back 5 days."):
cluster_state_controller.change_node_date(
node_2, (datetime.datetime.now(timezone_utc) - datetime.timedelta(days=5))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("On node 3, move the system time forward by 10 days"):
cluster_state_controller.change_node_date(
node_3, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=10))
)
self.check_nodes_block(cluster_state_controller)
with reporter.step("Return the time on all nodes to the current one"):
for cluster_node in self.cluster.cluster_nodes:
cluster_state_controller.restore_node_date(cluster_node)
self.check_nodes_block(cluster_state_controller)
with reporter.step("Reboot all nodes"):
cluster_state_controller.shutdown_cluster(mode="soft")
cluster_state_controller.start_stopped_hosts()
self.check_nodes_block(cluster_state_controller)

View file

@ -424,7 +424,6 @@ class TestMaintenanceMode(ClusterTestBase):
with pytest.raises(RuntimeError, match=node_under_maintenance_error): with pytest.raises(RuntimeError, match=node_under_maintenance_error):
put_object(default_wallet, file_path, cid, self.shell, endpoint) put_object(default_wallet, file_path, cid, self.shell, endpoint)
@pytest.mark.sanity
@allure.title("MAINTENANCE and OFFLINE mode transitions") @allure.title("MAINTENANCE and OFFLINE mode transitions")
def test_mode_transitions( def test_mode_transitions(
self, self,

View file

@ -3,11 +3,11 @@ from frostfs_testlib.testing import parallel
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.metrics import get_metrics_value from frostfs_testlib.steps.metrics import get_metrics_value
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode, Cluster
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@pytest.mark.order(-11) @pytest.mark.order(-7)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestEpochMetrics(ClusterTestBase): class TestEpochMetrics(ClusterTestBase):
@ -19,21 +19,21 @@ class TestEpochMetrics(ClusterTestBase):
return None return None
@allure.title("Check changes in metric frostfs_node_ir_epoch value") @allure.title("Check changes in metric frostfs_node_ir_epoch value")
def test_check_increase_epoch_metric(self): def test_check_increase_epoch_metric(self, cluster: Cluster):
metric_name = "frostfs_node_ir_epoch" metric_name = "frostfs_node_ir_epoch"
with reporter.step("Get current value of metric: {metric_name} from each nodes"): with reporter.step("Get current value of metric: {metric_name} from each nodes"):
futures = parallel(self.get_metrics_search_by_greps_parallel, self.cluster.cluster_nodes, command=metric_name) futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name)
metrics_results = [future.result() for future in futures if future.result() is not None] metrics_results = [future.result() for future in futures if future.result() is not None]
with reporter.step("Check that the metric values are the same in all nodes"): with reporter.step("Check that the metric values are the same in all nodes"):
assert len(set(metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes" assert len(set(metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes"
assert len(metrics_results) == len(self.cluster.cluster_nodes), "Metrics are not available in some nodes" assert len(metrics_results) == len(cluster.cluster_nodes), "Metrics are not available in some nodes"
with reporter.step("Tick epoch"): with reporter.step("Tick epoch"):
self.tick_epoch(wait_block=2) self.tick_epoch(wait_block=2)
with reporter.step('Check that metric value increase'): with reporter.step('Check that metric value increase'):
futures = parallel(self.get_metrics_search_by_greps_parallel, self.cluster.cluster_nodes, command=metric_name) futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name)
new_metrics_results = [future.result() for future in futures if future.result() is not None] new_metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(set(new_metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes" assert len(set(new_metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes"

View file

@ -17,7 +17,7 @@ from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
@pytest.mark.order(-7) @pytest.mark.order(-11)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.metrics @pytest.mark.metrics
class TestObjectMetrics(ClusterTestBase): class TestObjectMetrics(ClusterTestBase):
@ -89,6 +89,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter += metric_step objects_metric_counter += metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -108,6 +109,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -136,6 +138,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter += metric_step objects_metric_counter += metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -152,6 +155,7 @@ class TestObjectMetrics(ClusterTestBase):
self.tick_epochs(epochs_to_tick=2) self.tick_epochs(epochs_to_tick=2)
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -164,6 +168,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -191,6 +196,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter += metric_step objects_metric_counter += metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",
@ -210,6 +216,7 @@ class TestObjectMetrics(ClusterTestBase):
objects_metric_counter -= metric_step objects_metric_counter -= metric_step
check_metrics_counter( check_metrics_counter(
container_nodes, container_nodes,
operator=">=",
counter_exp=objects_metric_counter, counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total", command="frostfs_node_engine_objects_total",
type="user", type="user",

View file

@ -88,6 +88,7 @@ class TestShardMetrics(ClusterTestBase):
@allure.title("Metric for shard mode") @allure.title("Metric for shard mode")
def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]): def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]):
metrics_counter = 1 metrics_counter = 1
metric_name_blobstore = "frostfs_node_blobstore_mode"
shard1, shard2, node = two_shards_and_node shard1, shard2, node = two_shards_and_node
with reporter.step("Shard1 set to mode 'read-only'"): with reporter.step("Shard1 set to mode 'read-only'"):
@ -102,6 +103,15 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard1, shard_id=shard1,
) )
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard1,
)
with reporter.step("Shard2 set to mode 'degraded-read-only'"): with reporter.step("Shard2 set to mode 'degraded-read-only'"):
node_shard_set_mode(node.storage_node, shard2, "degraded-read-only") node_shard_set_mode(node.storage_node, shard2, "degraded-read-only")
@ -114,6 +124,15 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard2, shard_id=shard2,
) )
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will save 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard2,
)
with reporter.step("Both shards set to mode 'read-write'"): with reporter.step("Both shards set to mode 'read-write'"):
for shard in [shard1, shard2]: for shard in [shard1, shard2]:
node_shard_set_mode(node.storage_node, shard, "read-write") node_shard_set_mode(node.storage_node, shard, "read-write")
@ -128,6 +147,16 @@ class TestShardMetrics(ClusterTestBase):
shard_id=shard, shard_id=shard,
) )
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_WRITE'"):
for shard in [shard1, shard2]:
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_WRITE",
shard_id=shard,
)
@allure.title("Metric for error count on shard") @allure.title("Metric for error count on shard")
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1")) @requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1"))
def test_shard_metrics_error_count( def test_shard_metrics_error_count(

View file

@ -314,6 +314,7 @@ class TestObjectApi(ClusterTestBase):
assert sorted(expected_oids) == sorted(result) assert sorted(expected_oids) == sorted(result)
@allure.title("Search objects with removed items (obj_size={object_size})") @allure.title("Search objects with removed items (obj_size={object_size})")
@pytest.mark.exclude_sanity
def test_object_search_should_return_tombstone_items( def test_object_search_should_return_tombstone_items(
self, self,
default_wallet: WalletInfo, default_wallet: WalletInfo,

View file

@ -17,7 +17,7 @@ from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest from pytest import FixtureRequest

View file

@ -7,7 +7,7 @@ from frostfs_testlib.storage.constants import PlacementRule
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash

View file

@ -1,58 +1,61 @@
import logging
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED, OBJECT_NOT_FOUND
from frostfs_testlib.steps.epoch import get_epoch from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
logger = logging.getLogger("NeoLogger")
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME) * 5, datetime_utils.parse_time(STORAGE_GC_TIME))
def wait_for_object_status_change_to(status: str, grpc_client: GrpcClientWrapper, cid: str, oid: str, endpoint: str) -> None:
with pytest.raises(Exception, match=status):
grpc_client.object.head(cid, oid, endpoint)
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.sanity @pytest.mark.sanity
@pytest.mark.grpc_api @pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase): class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Object is removed when lifetime expired (obj_size={object_size})") @allure.title("Object is removed when lifetime expired (obj_size={object_size}, policy={container_request.short_name})")
def test_object_api_lifetime(self, container: str, test_file: TestFile, default_wallet: WalletInfo): @pytest.mark.parametrize(
"container_request",
[
ContainerRequest(DEFAULT_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "REP 2"),
ContainerRequest(DEFAULT_EC_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "EC 3.1"),
],
)
def test_object_api_lifetime(self, grpc_client: GrpcClientWrapper, container: str, test_file: TestFile):
""" """
Test object deleted after expiration epoch. Test object deleted after expiration epoch.
""" """
wallet = default_wallet with reporter.step("Get current epoch"):
current_epoch = self.get_epoch()
last_active_epoch = current_epoch + 1
epoch = get_epoch(self.shell, self.cluster) with reporter.step("Put object to random node"):
oid = grpc_client.object.put_to_random_node(test_file, container, self.cluster, expire_at=last_active_epoch)
oid = put_object_to_random_node(wallet, test_file.path, container, self.shell, self.cluster, expire_at=epoch + 1) with reporter.step("Ensure that expiration of object has expected value"):
with expect_not_raises(): object_info: dict = grpc_client.object.head(container, oid, self.cluster.default_rpc_endpoint)
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint) expiration_epoch = int(object_info["header"]["attributes"]["__SYSTEM__EXPIRATION_EPOCH"])
assert expiration_epoch == last_active_epoch, f"Expiration time set for object is not expected: {expiration_epoch}"
with reporter.step("Tick two epochs"): with reporter.step("Tick two epoch for object expiration"):
self.tick_epochs(2) self.tick_epochs(2)
# Wait for GC, because object with expiration is counted as alive until GC removes it with reporter.step("Wait until GC marks object as 'already removed' or 'not found'"):
wait_for_gc_pass_on_storage_nodes() wait_for_object_status_change_to(
f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}", grpc_client, container, oid, self.cluster.default_rpc_endpoint
)
with reporter.step("Check object deleted because it expires on epoch"): with reporter.step("Try to get object from random node and make sure it is really deleted"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND): with pytest.raises(Exception, match=f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}"):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint) grpc_client.object.get_from_random_node(container, oid, self.cluster)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)
with reporter.step("Tick additional epoch"):
self.tick_epoch()
wait_for_gc_pass_on_storage_nodes()
with reporter.step("Check object deleted because it expires on previous epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, container, oid, self.shell, self.cluster)

View file

@ -6,7 +6,7 @@ from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile from frostfs_testlib.utils.file_utils import TestFile

View file

@ -16,7 +16,7 @@ from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils

View file

@ -22,7 +22,6 @@ OBJECT_ATTRIBUTES = {"common_key": "common_value"}
WAIT_FOR_REPLICATION = 60 WAIT_FOR_REPLICATION = 60
# Adding failover mark because it may make cluster unhealthy # Adding failover mark because it may make cluster unhealthy
@pytest.mark.sanity
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.replication @pytest.mark.replication
class TestReplication(ClusterTestBase): class TestReplication(ClusterTestBase):

View file

@ -1,7 +1,6 @@
import allure import allure
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.epoch import get_epoch from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.http_gate import ( from frostfs_testlib.steps.http_gate import (
attr_into_header, attr_into_header,
@ -19,55 +18,12 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash
from ....helpers.container_request import REP_1_1_1_PUBLIC, REP_2_2_2_PUBLIC, requires_container from ....helpers.container_request import REP_2_2_2_PUBLIC, requires_container
from ....helpers.utility import wait_for_gc_pass_on_storage_nodes from ....helpers.utility import wait_for_gc_pass_on_storage_nodes
OBJECT_NOT_FOUND_ERROR = "not found" OBJECT_NOT_FOUND_ERROR = "not found"
@allure.link(
"https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway",
name="frostfs-http-gateway",
)
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.http_gate
class TestHttpGate(ClusterTestBase):
@allure.title("Put over gRPC, Get over HTTP (object_size={object_size})")
@requires_container(REP_1_1_1_PUBLIC)
def test_put_grpc_get_http(self, default_wallet: WalletInfo, container: str, test_file: TestFile):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create object.
2. Put object using gRPC (frostfs-cli).
3. Download object using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading).
4. Get object using gRPC (frostfs-cli).
5. Compare hashes for got object.
6. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
with reporter.step("Put object using gRPC"):
object_id = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
with reporter.step("Get object and check hash"):
verify_object_hash(
object_id,
test_file.path,
default_wallet,
container,
self.shell,
self.cluster.storage_nodes,
self.cluster.cluster_nodes[0],
)
@allure.link( @allure.link(
"https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway", "https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway",
name="frostfs-http-gateway", name="frostfs-http-gateway",

View file

@ -115,7 +115,7 @@ def another_bucket(another_s3_client: S3ClientWrapper, versioning_status: Versio
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.s3_gate @pytest.mark.s3_gate
class TestS3GateHttpObject(ClusterTestBase): class TestS3ObjectPatch(ClusterTestBase):
@allure.title("Patch simple object payload (range={patch_range}, s3_client={s3_client}, policy={placement_policy})") @allure.title("Patch simple object payload (range={patch_range}, s3_client={s3_client}, policy={placement_policy})")
@pytest.mark.parametrize("object_size", ["simple"], indirect=True) @pytest.mark.parametrize("object_size", ["simple"], indirect=True)
@pytest.mark.parametrize( @pytest.mark.parametrize(

View file

@ -18,6 +18,7 @@ def _check_version_format(version):
@allure.title("Check binaries versions") @allure.title("Check binaries versions")
@pytest.mark.nightly
@pytest.mark.check_binaries @pytest.mark.check_binaries
def test_binaries_versions(hosting: Hosting): def test_binaries_versions(hosting: Hosting):
""" """