Compare commits

...

10 commits

Author SHA1 Message Date
a7e905099b [] Update revision allure-validator
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-09-02 12:12:28 +03:00
65955a6b06 [#293] Integrate allure-validator into pre-commit hook
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-08-27 16:25:23 +00:00
19a690361d [#294] Fixed test metrics garbage collector 2024-08-27 08:33:55 +03:00
0a5ce7f21a [#292] Skip failing APE tests
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-08-16 17:18:27 +03:00
c8b95d98f4 [#291] Change error message for network test
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-08-16 12:07:50 +03:00
ed19a83068 [#290] Fixed tests logs metrics 2024-08-15 07:31:43 +00:00
f1fb95b40c [#285] Add missing titles to tests
Added titles to the following tests:
- `test_static_session_token_container_create`
- `test_static_session_token_container_create_with_other_verb`
- `test_static_session_token_container_create_with_other_wallet`
- `test_static_session_token_container_delete`
- `test_put_with_bearer_when_eacl_restrict`
- `test_shard_errors`

Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-08-14 11:08:01 +03:00
108aae59dd [#284] Add required parameters to test titles
Added `object_size` to `test_object_put_get_bucketname_key`

Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-08-12 12:14:04 +00:00
9e1e4610a8 [#288] Add static title for test test_container_creation
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-08-12 12:12:57 +00:00
b6aeb97193 [#289] Remove duplicate test test_more_one_ec_policy
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-08-12 14:28:44 +03:00
11 changed files with 69 additions and 70 deletions

View file

@ -9,6 +9,16 @@ repos:
hooks:
- id: isort
name: isort (python)
- repo: https://git.frostfs.info/TrueCloudLab/allure-validator
rev: 1.1.0
hooks:
- id: allure-validator
args: [
"pytest_tests/",
"--plugins",
"frostfs[-_]testlib*",
]
pass_filenames: false
ci:
autofix_prs: false

View file

@ -106,6 +106,7 @@ class TestApeFilters(ClusterTestBase):
@pytest.mark.sanity
@allure.title("Operations with request filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1243")
def test_ape_filters_request(
self,
frostfs_cli: FrostfsCli,
@ -161,6 +162,7 @@ class TestApeFilters(ClusterTestBase):
@allure.title("Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1300")
def test_ape_deny_filters_object(
self,
frostfs_cli: FrostfsCli,

View file

@ -19,12 +19,10 @@ from pytest_tests.helpers.utility import placement_policy_from_container
@pytest.mark.container
@pytest.mark.sanity
class TestContainer(ClusterTestBase):
@allure.title("Create container (name={name})")
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke
def test_container_creation(self, default_wallet: WalletInfo, name: str):
scenario_title = "with name" if name else "without name"
allure.dynamic.title(f"Create container {scenario_title}")
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@ -59,9 +57,7 @@ class TestContainer(ClusterTestBase):
with reporter.step("Check container has correct information"):
expected_policy = placement_rule.casefold()
actual_policy = placement_policy_from_container(container_info)
assert (
actual_policy == expected_policy
), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
assert actual_policy == expected_policy, f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
for info in info_to_check:
expected_info = info.casefold()
@ -112,10 +108,6 @@ class TestContainer(ClusterTestBase):
with reporter.step("Delete containers and check they were deleted"):
for cid in cids:
delete_container(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True
)
containers_list = list_containers(
wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
delete_container(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True)
containers_list = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
assert cid not in containers_list, "Container not deleted"

View file

@ -208,7 +208,7 @@ class TestFailoverNetwork(ClusterTestBase):
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
with reporter.step("Get object for target nodes to data interfaces, expect false"):
with pytest.raises(RuntimeError, match="return code: 1"):
with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: gRPC dial: context deadline exceeded"):
get_object(
wallet=default_wallet,
cid=storage_object.cid,

View file

@ -33,9 +33,7 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
return sum(map(int, result))
@allure.title("Garbage collector expire_at object")
def test_garbage_collector_metrics_expire_at_object(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster
):
def test_garbage_collector_metrics_expire_at_object(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
metrics_step = 1
@ -43,9 +41,7 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
with reporter.step("Get current garbage collector metrics for each nodes"):
metrics_counter = {}
for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(
node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total"
)
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
@ -63,18 +59,12 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
object_nodes = [
cluster_node
for cluster_node in cluster.cluster_nodes
if cluster_node.storage_node in object_storage_nodes
]
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2, wait_block=2)
with reporter.step(
f"Check garbage collector metrics 'the counter should increase by {metrics_step}' in object nodes"
):
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}' in object nodes"):
for node in object_nodes:
metrics_counter[node] += metrics_step
@ -86,30 +76,38 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
)
@allure.title("Garbage collector delete object")
def test_garbage_collector_metrics_deleted_objects(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster
):
def test_garbage_collector_metrics_deleted_objects(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
file_path = generate_file(simple_object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
metrics_step = 1
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step("Get current garbage collector metrics for selected node"):
metrics_counter = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
with reporter.step("Get current garbage collector metrics for each nodes"):
metrics_counter = {}
for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step("Put object to selected node"):
oid = put_object(default_wallet, file_path, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(
default_wallet,
file_path,
cid,
self.shell,
cluster,
)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
metrics_counter += metrics_step
for node in object_nodes:
exp_metrics_counter = metrics_counter[node] + metrics_step
check_metrics_counter(
[node], counter_exp=metrics_counter, command="frostfs_node_garbage_collector_deleted_objects_total"
[node], counter_exp=exp_metrics_counter, command="frostfs_node_garbage_collector_deleted_objects_total"
)

View file

@ -16,37 +16,42 @@ from frostfs_testlib.testing.test_control import wait_for_success
class TestLogsMetrics(ClusterTestBase):
@pytest.fixture
def revert_all(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.manager(ConfigStateManager).revert_all()
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime:
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.csc.stop_services_of_type(StorageNode)
restart_time = datetime.now(timezone.utc)
config_manager.csc.start_services_of_type(StorageNode)
yield restart_time
cluster_state_controller.manager(ConfigStateManager).revert_all()
return restart_time
@wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, log_priority)
current_time = datetime.now(timezone.utc)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
@staticmethod
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, log_priority: str):
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):
count_logs = 0
try:
logs = cluster_node.host.get_filtered_logs(log_level, unit="frostfs-storage", since=after_time, priority=log_priority)
result = re.findall(rf"\s+{log_level}\s+", logs)
logs = cluster_node.host.get_filtered_logs(
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
)
result = re.findall(rf"Z\s+{log_level}\s+", logs)
count_logs += len(result)
except RuntimeError as e:
...
return count_logs
@allure.title("Metrics for the log counter")
def test_log_counter_metrics(self, cluster: Cluster, restart_storage_service: datetime):
restart_time = restart_storage_service
def test_log_counter_metrics(self, cluster_state_controller: ClusterStateController, revert_all):
restart_time = self.restart_storage_service(cluster_state_controller)
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
node = random.choice(self.cluster.cluster_nodes)
with reporter.step(f"Check metrics count logs with level 'info'"):
self.check_metrics_in_node(

View file

@ -846,15 +846,6 @@ class TestECReplication(ClusterTestBase):
).stdout
assert container
@allure.title("[NEGATIVE] Don`t create more 1 EC policy")
def test_more_one_ec_policy(
self,
frostfs_cli: FrostfsCli,
) -> None:
with reporter.step("Create container with policy - 'EC 2.1 EC 1.1'"):
with pytest.raises(RuntimeError, match="can't parse placement policy"):
self.create_container(frostfs_cli, self.cluster.default_rpc_endpoint, "EC 2.1 EC 1.1 CBF 1 SELECT 4 FROM *")
@allure.title("Bucket object count chunks (s3_client={s3_client}, size={object_size})")
@pytest.mark.parametrize("s3_policy, s3_client", [("pytest_tests/resources/files/policy.json", AwsCliClient)], indirect=True)
def test_count_chunks_bucket_with_ec_location(

View file

@ -60,6 +60,7 @@ class Test_http_bearer(ClusterTestBase):
error_pattern="access to object operation denied",
)
@allure.title("Put object via HTTP using bearer token (object_size={object_size})")
def test_put_with_bearer_when_eacl_restrict(
self,
object_size: ObjectSize,

View file

@ -125,7 +125,7 @@ class Test_http_object(ClusterTestBase):
http_request_path=request,
)
@allure.title("Put over s3, Get over HTTP with bucket name and key")
@allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_object_put_get_bucketname_key(self, object_size: ObjectSize, s3_client: S3ClientWrapper):
"""

View file

@ -1,3 +1,4 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell
@ -22,6 +23,7 @@ class TestSessionTokenContainer(ClusterTestBase):
"""
return {verb: get_container_signed_token(owner_wallet, user_wallet, verb, client_shell, temp_directory) for verb in ContainerVerb}
@allure.title("Static session with create operation")
def test_static_session_token_container_create(
self,
owner_wallet: WalletInfo,
@ -46,6 +48,7 @@ class TestSessionTokenContainer(ClusterTestBase):
assert cid not in list_containers(user_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
assert cid in list_containers(owner_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
@allure.title("[NEGATIVE] Static session without create operation")
def test_static_session_token_container_create_with_other_verb(
self,
user_wallet: WalletInfo,
@ -65,6 +68,7 @@ class TestSessionTokenContainer(ClusterTestBase):
wait_for_creation=False,
)
@allure.title("[NEGATIVE] Static session with create operation for other wallet")
def test_static_session_token_container_create_with_other_wallet(
self,
stranger_wallet: WalletInfo,
@ -83,6 +87,7 @@ class TestSessionTokenContainer(ClusterTestBase):
wait_for_creation=False,
)
@allure.title("Static session with delete operation")
def test_static_session_token_container_delete(
self,
owner_wallet: WalletInfo,

View file

@ -31,9 +31,7 @@ class TestControlShard(ClusterTestBase):
data_path = node.storage_node.get_data_directory()
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
for data_dir in all_datas.replace(".", "").strip().split("\n"):
check_dir = node_shell.exec(
f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0"
).stdout
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout
if "1" in check_dir:
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
object_name = f"{oid[4:]}.{cid}"
@ -66,9 +64,7 @@ class TestControlShard(ClusterTestBase):
basic_acl=EACL_PUBLIC_READ_WRITE,
)
file = generate_file(round(max_object_size * 0.8))
oid = put_object(
wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
oid = put_object(wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
with reporter.step("Search node with object"):
nodes = get_object_nodes(cluster=self.cluster, cid=cid, oid=oid, alive_node=self.cluster.cluster_nodes[0])
@ -76,9 +72,7 @@ class TestControlShard(ClusterTestBase):
object_path, object_name = self.get_object_path_and_name_file(oid, cid, nodes[0])
nodes[0].host.get_shell().exec(f"chmod +r {object_path}/{object_name}")
delete_object(
wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
@staticmethod
@ -117,6 +111,7 @@ class TestControlShard(ClusterTestBase):
assert set(shards_from_config) == set(shards_from_cli)
@allure.title("Shard become read-only when errors exceeds threshold")
@pytest.mark.failover
def test_shard_errors(
self,