import logging import os import shutil from datetime import datetime from typing import Optional import allure import pytest import yaml from frostfs_testlib.hosting import Hosting from frostfs_testlib.reporter import AllureHandler, get_reporter from frostfs_testlib.resources.common import ( ASSETS_DIR, COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, DEFAULT_WALLET_PASS, SIMPLE_OBJECT_SIZE, ) from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import LocalShell, Shell from frostfs_testlib.steps.cli.container import list_containers from frostfs_testlib.steps.cli.object import get_netmap_netinfo from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.s3 import s3_helper from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.utils import env_utils, version_utils from pytest_tests.resources.common import HOSTING_CONFIG_FILE, TEST_CYCLES_COUNT logger = logging.getLogger("NeoLogger") # Add logs check test even if it's not fit to mark selectors def pytest_configure(config: pytest.Config): markers = config.option.markexpr if markers != "": config.option.markexpr = f"logs_after_session or ({markers})" # pytest hook. Do not rename def pytest_collection_modifyitems(items: list[pytest.Item]): # Make network tests last based on @pytest.mark.node_mgmt and logs_test to be latest def priority(item: pytest.Item) -> int: is_node_mgmt_test = 1 if item.get_closest_marker("node_mgmt") else 0 is_logs_check_test = 100 if item.get_closest_marker("logs_after_session") else 0 return is_node_mgmt_test + is_logs_check_test items.sort(key=lambda item: priority(item)) def pytest_generate_tests(metafunc: pytest.Metafunc): if ( TEST_CYCLES_COUNT <= 1 or metafunc.definition.get_closest_marker("logs_after_session") or metafunc.definition.get_closest_marker("no_cycles") ): return metafunc.fixturenames.append("cycle") metafunc.parametrize( "cycle", range(1, TEST_CYCLES_COUNT + 1), ids=[f"cycle {cycle}" for cycle in range(1, TEST_CYCLES_COUNT + 1)], ) @pytest.fixture(scope="session") def configure_testlib(): get_reporter().register_handler(AllureHandler()) logging.getLogger("paramiko").setLevel(logging.INFO) yield @pytest.fixture(scope="session") def client_shell(configure_testlib) -> Shell: yield LocalShell() @pytest.fixture(scope="session") def hosting(configure_testlib) -> Hosting: with open(HOSTING_CONFIG_FILE, "r") as file: hosting_config = yaml.full_load(file) hosting_instance = Hosting() hosting_instance.configure(hosting_config) yield hosting_instance @pytest.fixture(scope="session") def require_multiple_hosts(hosting: Hosting): """Designates tests that require environment with multiple hosts. These tests will be skipped on an environment that has only 1 host. """ if len(hosting.hosts) <= 1: pytest.skip("Test only works with multiple hosts") yield @pytest.fixture(scope="session") def max_object_size(cluster: Cluster, client_shell: Shell) -> int: storage_node = cluster.storage_nodes[0] net_info = get_netmap_netinfo( wallet=storage_node.get_wallet_path(), wallet_config=storage_node.get_wallet_config_path(), endpoint=storage_node.get_rpc_endpoint(), shell=client_shell, ) yield net_info["maximum_object_size"] @pytest.fixture(scope="session") def simple_object_size(max_object_size: int) -> int: yield int(SIMPLE_OBJECT_SIZE) if int(SIMPLE_OBJECT_SIZE) < max_object_size else max_object_size @pytest.fixture(scope="session") def complex_object_size(max_object_size: int) -> int: return max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE) @pytest.fixture(scope="session") def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -> WalletFactory: return WalletFactory(temp_directory, client_shell, cluster) @pytest.fixture(scope="session") def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Cluster: cluster = Cluster(hosting) if cluster.is_local_devenv(): cluster.create_wallet_configs(hosting) ClusterTestBase.shell = client_shell ClusterTestBase.cluster = cluster yield cluster @allure.step("[Class]: Provide S3 policy") @pytest.fixture(scope="class") def s3_policy(request: pytest.FixtureRequest): policy = None if "param" in request.__dict__: policy = request.param return policy @pytest.fixture(scope="session") def cluster_state_controller(client_shell: Shell, cluster: Cluster) -> ClusterStateController: controller = ClusterStateController(client_shell, cluster) yield controller @allure.step("[Class]: Create S3 client") @pytest.fixture(scope="class") def s3_client( default_wallet: str, client_shell: Shell, s3_policy: Optional[str], cluster: Cluster, request: pytest.FixtureRequest, ) -> S3ClientWrapper: wallet = WalletInfo(path=default_wallet, password=DEFAULT_WALLET_PASS) (cid, access_key_id, secret_access_key) = s3_helper.init_s3_credentials( wallet, client_shell, cluster, s3gates=[cluster_node.s3_gate for cluster_node in cluster.cluster_nodes], policy=s3_policy, ) containers_list = list_containers( wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint ) assert cid in containers_list, f"Expected cid {cid} in {containers_list}" s3_client_cls = request.param client = s3_client_cls(access_key_id, secret_access_key, cluster.default_s3_gate_endpoint) yield client @allure.step("Create/delete bucket") @pytest.fixture def bucket(s3_client: S3ClientWrapper, request: pytest.FixtureRequest): bucket_name = s3_client.create_bucket() versioning_status: Optional[VersioningStatus] = None if "param" in request.__dict__: versioning_status = request.param if versioning_status: s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status) yield bucket_name s3_helper.delete_bucket_with_objects(s3_client, bucket_name) @allure.step("Create two buckets") @pytest.fixture def two_buckets(s3_client: S3ClientWrapper): bucket_1 = s3_client.create_bucket() bucket_2 = s3_client.create_bucket() yield bucket_1, bucket_2 for bucket_name in [bucket_1, bucket_2]: s3_helper.delete_bucket_with_objects(s3_client, bucket_name) @allure.step("Check binary versions") @pytest.fixture(scope="session", autouse=True) def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest): local_versions = version_utils.get_local_binaries_versions(client_shell) remote_versions = version_utils.get_remote_binaries_versions(hosting) all_versions = {**local_versions, **remote_versions} environment_dir = request.config.getoption("--alluredir") if not environment_dir: return None file_path = f"{environment_dir}/environment.properties" env_utils.save_env_properties(file_path, all_versions) @allure.step("Prepare tmp directory") @pytest.fixture(scope="session") def temp_directory(): with allure.step("Prepare tmp directory"): full_path = os.path.join(os.getcwd(), ASSETS_DIR) shutil.rmtree(full_path, ignore_errors=True) os.mkdir(full_path) yield full_path with allure.step("Remove tmp directory"): shutil.rmtree(full_path) @allure.step("[Autouse/Session] Test session start time") @pytest.fixture(scope="session", autouse=True) def session_start_time(): start_time = datetime.utcnow() return start_time @allure.step("Run health check for all storage nodes") @pytest.fixture(scope="session", autouse=True) def run_health_check(session_start_time, cluster: Cluster): failed_nodes = [] for node in cluster.storage_nodes: health_check = storage_node_healthcheck(node) if health_check.health_status != "READY" or health_check.network_status != "ONLINE": failed_nodes.append(node) if failed_nodes: raise AssertionError(f"Nodes {failed_nodes} are not healthy") @allure.step("Prepare wallet and deposit") @pytest.fixture(scope="session") def default_wallet(wallet_factory: WalletFactory) -> str: wallet = wallet_factory.create_wallet(password=DEFAULT_WALLET_PASS) allure.attach.file(wallet.path, os.path.basename(wallet.path), allure.attachment_type.JSON) return wallet.path