Merge branch 'master' into ape
Some checks failed
DCO check / Commits Check (pull_request) Failing after 8s

This commit is contained in:
ChEktn 2024-10-31 10:06:24 +00:00
commit d60e3c1c30
31 changed files with 108 additions and 54 deletions

0
__init__.py Normal file
View file

View file

@ -0,0 +1,3 @@
import os
TESTS_BASE_PATH = os.path.dirname(os.path.relpath(__file__))

View file

@ -6,7 +6,7 @@ from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from pytest_tests.helpers.object_access import (
from ..helpers.object_access import (
can_delete_object,
can_get_head_object,
can_get_object,

View file

@ -1,5 +1,8 @@
import os
from .. import TESTS_BASE_PATH
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
S3_POLICY_FILE_LOCATION = os.path.join(TESTS_BASE_PATH, "resources/files/policy.json")

View file

@ -8,8 +8,7 @@ from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import assert_full_access_to_container, assert_no_access_to_container, assert_read_only_container
from ....helpers.container_access import assert_full_access_to_container, assert_no_access_to_container, assert_read_only_container
from ....helpers.container_spec import ContainerSpec

View file

@ -12,13 +12,12 @@ from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import TestFile
from pytest_tests.helpers.container_access import (
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from ....helpers.container_spec import ContainerSpec

View file

@ -10,17 +10,16 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from pytest_tests.helpers.bearer_token import create_bearer_token
from pytest_tests.helpers.container_access import (
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
FULL_ACCESS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from pytest_tests.helpers.object_access import OBJECT_ACCESS_DENIED
from ....helpers.container_spec import ContainerSpec
from ....helpers.object_access import OBJECT_ACCESS_DENIED
@pytest.mark.nightly

View file

@ -7,8 +7,8 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile
from pytest_tests.helpers.bearer_token import create_bearer_token
from pytest_tests.helpers.container_access import (
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,

View file

@ -35,7 +35,7 @@ from frostfs_testlib.testing.test_control import run_optionally, wait_for_succes
from frostfs_testlib.utils import env_utils, string_utils, version_utils
from frostfs_testlib.utils.file_utils import TestFile, generate_file
from pytest_tests.resources.common import TEST_CYCLES_COUNT
from ..resources.common import TEST_CYCLES_COUNT
logger = logging.getLogger("NeoLogger")

View file

@ -13,7 +13,7 @@ from frostfs_testlib.steps.cli.container import (
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.utility import placement_policy_from_container
from ...helpers.utility import placement_policy_from_container
@pytest.mark.nightly

View file

@ -16,8 +16,8 @@ from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.utility import placement_policy_from_container
from pytest_tests.resources.policy_error_patterns import NOT_ENOUGH_TO_SELECT, NOT_FOUND_FILTER, NOT_FOUND_SELECTOR, NOT_PARSE_POLICY
from ...helpers.utility import placement_policy_from_container
from ...resources.policy_error_patterns import NOT_ENOUGH_TO_SELECT, NOT_FOUND_FILTER, NOT_FOUND_SELECTOR, NOT_PARSE_POLICY
@pytest.mark.nightly

View file

@ -34,6 +34,8 @@ from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_keeper import FileKeeper
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...resources.common import S3_POLICY_FILE_LOCATION
logger = logging.getLogger("NeoLogger")
stopped_nodes: list[StorageNode] = []
@ -95,9 +97,7 @@ class TestFailoverStorage(ClusterTestBase):
)
with reporter.step("Check object data is not corrupted"):
got_file_path = get_object(
wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell
)
got_file_path = get_object(wallet, cid, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with reporter.step("Return all hosts"):
@ -105,12 +105,10 @@ class TestFailoverStorage(ClusterTestBase):
with reporter.step("Check object data is not corrupted"):
replicated_nodes = wait_object_replication(cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint()
)
got_file_path = get_object(wallet, cid, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint())
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})")
def test_unhealthy_tree(
self,
@ -151,7 +149,7 @@ class TestFailoverStorage(ClusterTestBase):
wallet=default_wallet,
shell=self.shell,
endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
bucket_container_resolver=bucket_container_resolver
bucket_container_resolver=bucket_container_resolver,
)[0]
with reporter.step("Turn off all storage nodes except bucket node"):
@ -282,9 +280,7 @@ class TestEmptyMap(ClusterTestBase):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Remove all nodes from network map"):
remove_nodes_from_map_morph(
shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode)
)
remove_nodes_from_map_morph(shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode))
with reporter.step("Return all storage nodes to network map"):
self.return_nodes_after_stop_with_check_empty_map(cluster_state_controller)
@ -465,9 +461,7 @@ class TestStorageDataLoss(ClusterTestBase):
s3_client.put_object(bucket, complex_object_path)
with reporter.step("Check objects are in bucket"):
s3_helper.check_objects_in_bucket(
s3_client, bucket, expected_objects=[simple_object_key, complex_object_key]
)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[simple_object_key, complex_object_key])
with reporter.step("Stop storage services on all nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
@ -581,17 +575,13 @@ class TestStorageDataLoss(ClusterTestBase):
exception_messages.append(f"Shard {shard} changed status to {status}")
with reporter.step("No related errors should be in log"):
if node_under_test.host.is_message_in_logs(
message_regex=r"\Wno such file or directory\W", since=test_start_time
):
if node_under_test.host.is_message_in_logs(message_regex=r"\Wno such file or directory\W", since=test_start_time):
exception_messages.append(f"Node {node_under_test} have shard errors in logs")
with reporter.step("Pass test if no errors found"):
assert not exception_messages, "\n".join(exception_messages)
@allure.title(
"Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})"
)
@allure.title("Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})")
def test_s3_one_endpoint_loss(
self,
bucket,
@ -613,7 +603,7 @@ class TestStorageDataLoss(ClusterTestBase):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@allure.title("After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})")
def test_s3_one_pilorama_loss(
self,

View file

@ -44,7 +44,7 @@ from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
check_nodes: list[StorageNode] = []

View file

@ -4,11 +4,11 @@ import allure
from frostfs_testlib.testing.parallel import parallel
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container, wait_for_container_deletion
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.metrics import calc_metrics_count_from_stdout, check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@ -24,6 +24,14 @@ class TestContainerMetrics(ClusterTestBase):
def put_object_parallel(self, file_path: str, wallet: WalletInfo, cid: str):
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
return oid
@reporter.step("Get metrics value from node")
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps):
try:
content_stdout = node.metrics.storage.get_metrics_search_by_greps(greps)
return calc_metrics_count_from_stdout(content_stdout)
except Exception as e:
return None
@allure.title("Container metrics (obj_size={object_size},policy={policy})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
@ -164,3 +172,43 @@ class TestContainerMetrics(ClusterTestBase):
for act_metric in metrics_value_nodes:
assert act_metric >= 0, "Metrics value is negative"
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"
@allure.title("Container metrics (policy={policy})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
def test_container_metrics_delete_complex_objects(
self,
complex_object_size: ObjectSize,
default_wallet: WalletInfo,
cluster: Cluster,
placement_policy: str,
policy: str
):
copies = 2 if policy == "REP" else 1
objects_count = 2
metric_name = "frostfs_node_engine_container_objects_total"
with reporter.step(f"Create container"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, rule=placement_policy)
with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(complex_object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid)
oids = [future.result() for future in futures]
with reporter.step(f"Check metrics value in each nodes, should be {objects_count} for 'user'"):
check_metrics_counter(cluster.cluster_nodes, counter_exp=objects_count * copies, command=metric_name, cid=cid, type="user")
with reporter.step("Delete objects and container"):
for oid in oids:
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint)
delete_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Tick epoch and check container was deleted"):
self.tick_epoch()
wait_for_container_deletion(default_wallet, cid, shell=self.shell, endpoint=cluster.default_rpc_endpoint)
with reporter.step(f"Check metrics value in each nodes, should not be show any result"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=cid)
metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}"

View file

@ -21,8 +21,8 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest
from pytest_tests.helpers.bearer_token import create_bearer_token
from pytest_tests.helpers.container_access import assert_full_access_to_container
from ...helpers.bearer_token import create_bearer_token
from ...helpers.container_access import assert_full_access_to_container
@pytest.fixture(scope="session")

View file

@ -12,7 +12,7 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")

View file

@ -31,7 +31,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
from frostfs_testlib.utils import datetime_utils, string_utils
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")

View file

@ -22,6 +22,8 @@ from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...resources.common import S3_POLICY_FILE_LOCATION
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
if "ec_policy" not in metafunc.fixturenames:
@ -39,7 +41,8 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
100: ["EC 12.4", "EC 8.4", "EC 5.3", "EC 4.4"],
}
metafunc.parametrize("ec_policy, node_count", ((ec_policy, node_count) for ec_policy in ec_map[node_count]))
nearest_node_count = ([4] + (list(filter(lambda x: x <= node_count, ec_map.keys()))))[-1]
metafunc.parametrize("ec_policy, node_count", ((ec_policy, node_count) for ec_policy in ec_map[nearest_node_count]))
@allure.title("Initialized remote FrostfsAdm")
@ -677,7 +680,7 @@ class TestECReplication(ClusterTestBase):
)
@allure.title("Create bucket with EC policy (s3_client={s3_client})")
@pytest.mark.parametrize("s3_policy, s3_client", [("pytest_tests/resources/files/policy.json", AwsCliClient)], indirect=True)
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
def test_create_bucket_with_ec_location(
self, s3_client: S3ClientWrapper, bucket_container_resolver: BucketContainerResolver, grpc_client: GrpcClientWrapper
) -> None:
@ -692,7 +695,7 @@ class TestECReplication(ClusterTestBase):
assert container
@allure.title("Bucket object count chunks (s3_client={s3_client}, size={object_size})")
@pytest.mark.parametrize("s3_policy, s3_client", [("pytest_tests/resources/files/policy.json", AwsCliClient)], indirect=True)
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
def test_count_chunks_bucket_with_ec_location(
self,
s3_client: S3ClientWrapper,

View file

@ -15,7 +15,7 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.bearer_token import create_bearer_token
from ....helpers.bearer_token import create_bearer_token
logger = logging.getLogger("NeoLogger")

View file

@ -20,7 +20,7 @@ from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
from ....helpers.utility import wait_for_gc_pass_on_storage_nodes
OBJECT_NOT_FOUND_ERROR = "not found"

View file

@ -1,5 +1,4 @@
import json
import os
import allure
import pytest
@ -15,10 +14,12 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file
from ....resources.common import S3_POLICY_FILE_LOCATION
@pytest.mark.nightly
@pytest.mark.s3_gate
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
class TestS3GatePolicy(ClusterTestBase):
@allure.title("Bucket creation with retention policy applied (s3_client={s3_client})")
def test_s3_bucket_location(
@ -100,7 +101,6 @@ class TestS3GatePolicy(ClusterTestBase):
s3_client.get_bucket_policy(bucket)
with reporter.step("Put new policy"):
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
custom_policy = {
"Version": "2012-10-17",
"Id": "aaaa-bbbb-cccc-dddd",

View file

@ -31,7 +31,17 @@ class TestLogs:
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
issues_regex = r"\bpanic\b|\boom\b|too many|insufficient funds|insufficient amount of gas|cannot assign requested address|\bunable to process\b"
regexes = [
r"\bpanic\b",
r"\boom\b",
r"too many",
r"insufficient funds",
r"insufficient amount of gas",
r"cannot assign requested address",
r"\bunable to process\b",
r"\bmaximum number of subscriptions is reached\b",
]
issues_regex = "|".join(regexes)
exclude_filter = r"too many requests"
log_level_priority = "3" # will include 0-3 priority logs (0: emergency 1: alerts 2: critical 3: errors)

View file

@ -1,7 +1,7 @@
allure-pytest==2.13.2
allure-python-commons==2.13.2
base58==2.1.0
boto3==1.16.33
boto3==1.35.30
botocore==1.19.33
configobj==5.0.6
neo-mamba==1.0.0