forked from TrueCloudLab/frostfs-testcases
Compare commits
10 commits
6a372cc1c0
...
a7e905099b
Author | SHA1 | Date | |
---|---|---|---|
a7e905099b | |||
65955a6b06 | |||
19a690361d | |||
0a5ce7f21a | |||
c8b95d98f4 | |||
ed19a83068 | |||
f1fb95b40c | |||
108aae59dd | |||
9e1e4610a8 | |||
b6aeb97193 |
11 changed files with 69 additions and 70 deletions
|
@ -9,6 +9,16 @@ repos:
|
|||
hooks:
|
||||
- id: isort
|
||||
name: isort (python)
|
||||
- repo: https://git.frostfs.info/TrueCloudLab/allure-validator
|
||||
rev: 1.1.0
|
||||
hooks:
|
||||
- id: allure-validator
|
||||
args: [
|
||||
"pytest_tests/",
|
||||
"--plugins",
|
||||
"frostfs[-_]testlib*",
|
||||
]
|
||||
pass_filenames: false
|
||||
|
||||
ci:
|
||||
autofix_prs: false
|
||||
|
|
|
@ -106,6 +106,7 @@ class TestApeFilters(ClusterTestBase):
|
|||
@pytest.mark.sanity
|
||||
@allure.title("Operations with request filter (match_type={match_type}, obj_size={object_size})")
|
||||
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
|
||||
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1243")
|
||||
def test_ape_filters_request(
|
||||
self,
|
||||
frostfs_cli: FrostfsCli,
|
||||
|
@ -161,6 +162,7 @@ class TestApeFilters(ClusterTestBase):
|
|||
|
||||
@allure.title("Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})")
|
||||
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
|
||||
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1300")
|
||||
def test_ape_deny_filters_object(
|
||||
self,
|
||||
frostfs_cli: FrostfsCli,
|
||||
|
|
|
@ -19,12 +19,10 @@ from pytest_tests.helpers.utility import placement_policy_from_container
|
|||
@pytest.mark.container
|
||||
@pytest.mark.sanity
|
||||
class TestContainer(ClusterTestBase):
|
||||
@allure.title("Create container (name={name})")
|
||||
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
|
||||
@pytest.mark.smoke
|
||||
def test_container_creation(self, default_wallet: WalletInfo, name: str):
|
||||
scenario_title = "with name" if name else "without name"
|
||||
allure.dynamic.title(f"Create container {scenario_title}")
|
||||
|
||||
wallet = default_wallet
|
||||
|
||||
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
|
||||
|
@ -59,9 +57,7 @@ class TestContainer(ClusterTestBase):
|
|||
with reporter.step("Check container has correct information"):
|
||||
expected_policy = placement_rule.casefold()
|
||||
actual_policy = placement_policy_from_container(container_info)
|
||||
assert (
|
||||
actual_policy == expected_policy
|
||||
), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
|
||||
assert actual_policy == expected_policy, f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
|
||||
|
||||
for info in info_to_check:
|
||||
expected_info = info.casefold()
|
||||
|
@ -112,10 +108,6 @@ class TestContainer(ClusterTestBase):
|
|||
|
||||
with reporter.step("Delete containers and check they were deleted"):
|
||||
for cid in cids:
|
||||
delete_container(
|
||||
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True
|
||||
)
|
||||
containers_list = list_containers(
|
||||
wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
delete_container(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True)
|
||||
containers_list = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||
assert cid not in containers_list, "Container not deleted"
|
||||
|
|
|
@ -208,7 +208,7 @@ class TestFailoverNetwork(ClusterTestBase):
|
|||
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
|
||||
|
||||
with reporter.step("Get object for target nodes to data interfaces, expect false"):
|
||||
with pytest.raises(RuntimeError, match="return code: 1"):
|
||||
with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: gRPC dial: context deadline exceeded"):
|
||||
get_object(
|
||||
wallet=default_wallet,
|
||||
cid=storage_object.cid,
|
||||
|
|
|
@ -33,9 +33,7 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
|||
return sum(map(int, result))
|
||||
|
||||
@allure.title("Garbage collector expire_at object")
|
||||
def test_garbage_collector_metrics_expire_at_object(
|
||||
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster
|
||||
):
|
||||
def test_garbage_collector_metrics_expire_at_object(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||
metrics_step = 1
|
||||
|
@ -43,9 +41,7 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
|||
with reporter.step("Get current garbage collector metrics for each nodes"):
|
||||
metrics_counter = {}
|
||||
for node in cluster.cluster_nodes:
|
||||
metrics_counter[node] = get_metrics_value(
|
||||
node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total"
|
||||
)
|
||||
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total")
|
||||
|
||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
||||
|
@ -63,18 +59,12 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
|||
|
||||
with reporter.step("Get object nodes"):
|
||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
||||
object_nodes = [
|
||||
cluster_node
|
||||
for cluster_node in cluster.cluster_nodes
|
||||
if cluster_node.storage_node in object_storage_nodes
|
||||
]
|
||||
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
||||
|
||||
with reporter.step("Tick Epoch"):
|
||||
self.tick_epochs(epochs_to_tick=2, wait_block=2)
|
||||
|
||||
with reporter.step(
|
||||
f"Check garbage collector metrics 'the counter should increase by {metrics_step}' in object nodes"
|
||||
):
|
||||
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}' in object nodes"):
|
||||
for node in object_nodes:
|
||||
metrics_counter[node] += metrics_step
|
||||
|
||||
|
@ -86,30 +76,38 @@ class TestGarbageCollectorMetrics(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Garbage collector delete object")
|
||||
def test_garbage_collector_metrics_deleted_objects(
|
||||
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster
|
||||
):
|
||||
def test_garbage_collector_metrics_deleted_objects(self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||
metrics_step = 1
|
||||
|
||||
with reporter.step("Select random node"):
|
||||
node = random.choice(cluster.cluster_nodes)
|
||||
|
||||
with reporter.step("Get current garbage collector metrics for selected node"):
|
||||
metrics_counter = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
|
||||
with reporter.step("Get current garbage collector metrics for each nodes"):
|
||||
metrics_counter = {}
|
||||
for node in cluster.cluster_nodes:
|
||||
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
|
||||
|
||||
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
||||
|
||||
with reporter.step("Put object to selected node"):
|
||||
oid = put_object(default_wallet, file_path, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||
with reporter.step("Put object to random node"):
|
||||
oid = put_object_to_random_node(
|
||||
default_wallet,
|
||||
file_path,
|
||||
cid,
|
||||
self.shell,
|
||||
cluster,
|
||||
)
|
||||
|
||||
with reporter.step("Get object nodes"):
|
||||
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
||||
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
|
||||
|
||||
with reporter.step("Delete file, wait until gc remove object"):
|
||||
delete_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||
|
||||
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
|
||||
metrics_counter += metrics_step
|
||||
check_metrics_counter(
|
||||
[node], counter_exp=metrics_counter, command="frostfs_node_garbage_collector_deleted_objects_total"
|
||||
)
|
||||
for node in object_nodes:
|
||||
exp_metrics_counter = metrics_counter[node] + metrics_step
|
||||
check_metrics_counter(
|
||||
[node], counter_exp=exp_metrics_counter, command="frostfs_node_garbage_collector_deleted_objects_total"
|
||||
)
|
||||
|
|
|
@ -16,37 +16,42 @@ from frostfs_testlib.testing.test_control import wait_for_success
|
|||
|
||||
class TestLogsMetrics(ClusterTestBase):
|
||||
@pytest.fixture
|
||||
def revert_all(self, cluster_state_controller: ClusterStateController):
|
||||
yield
|
||||
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
||||
|
||||
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime:
|
||||
config_manager = cluster_state_controller.manager(ConfigStateManager)
|
||||
config_manager.csc.stop_services_of_type(StorageNode)
|
||||
restart_time = datetime.now(timezone.utc)
|
||||
config_manager.csc.start_services_of_type(StorageNode)
|
||||
yield restart_time
|
||||
|
||||
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
||||
return restart_time
|
||||
|
||||
@wait_for_success(interval=10)
|
||||
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
|
||||
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, log_priority)
|
||||
current_time = datetime.now(timezone.utc)
|
||||
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
|
||||
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
|
||||
assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
|
||||
|
||||
@staticmethod
|
||||
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, log_priority: str):
|
||||
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):
|
||||
count_logs = 0
|
||||
try:
|
||||
logs = cluster_node.host.get_filtered_logs(log_level, unit="frostfs-storage", since=after_time, priority=log_priority)
|
||||
result = re.findall(rf"\s+{log_level}\s+", logs)
|
||||
logs = cluster_node.host.get_filtered_logs(
|
||||
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
|
||||
)
|
||||
result = re.findall(rf"Z\s+{log_level}\s+", logs)
|
||||
count_logs += len(result)
|
||||
except RuntimeError as e:
|
||||
...
|
||||
return count_logs
|
||||
|
||||
@allure.title("Metrics for the log counter")
|
||||
def test_log_counter_metrics(self, cluster: Cluster, restart_storage_service: datetime):
|
||||
restart_time = restart_storage_service
|
||||
def test_log_counter_metrics(self, cluster_state_controller: ClusterStateController, revert_all):
|
||||
restart_time = self.restart_storage_service(cluster_state_controller)
|
||||
with reporter.step("Select random node"):
|
||||
node = random.choice(cluster.cluster_nodes)
|
||||
node = random.choice(self.cluster.cluster_nodes)
|
||||
|
||||
with reporter.step(f"Check metrics count logs with level 'info'"):
|
||||
self.check_metrics_in_node(
|
||||
|
|
|
@ -846,15 +846,6 @@ class TestECReplication(ClusterTestBase):
|
|||
).stdout
|
||||
assert container
|
||||
|
||||
@allure.title("[NEGATIVE] Don`t create more 1 EC policy")
|
||||
def test_more_one_ec_policy(
|
||||
self,
|
||||
frostfs_cli: FrostfsCli,
|
||||
) -> None:
|
||||
with reporter.step("Create container with policy - 'EC 2.1 EC 1.1'"):
|
||||
with pytest.raises(RuntimeError, match="can't parse placement policy"):
|
||||
self.create_container(frostfs_cli, self.cluster.default_rpc_endpoint, "EC 2.1 EC 1.1 CBF 1 SELECT 4 FROM *")
|
||||
|
||||
@allure.title("Bucket object count chunks (s3_client={s3_client}, size={object_size})")
|
||||
@pytest.mark.parametrize("s3_policy, s3_client", [("pytest_tests/resources/files/policy.json", AwsCliClient)], indirect=True)
|
||||
def test_count_chunks_bucket_with_ec_location(
|
||||
|
|
|
@ -60,6 +60,7 @@ class Test_http_bearer(ClusterTestBase):
|
|||
error_pattern="access to object operation denied",
|
||||
)
|
||||
|
||||
@allure.title("Put object via HTTP using bearer token (object_size={object_size})")
|
||||
def test_put_with_bearer_when_eacl_restrict(
|
||||
self,
|
||||
object_size: ObjectSize,
|
||||
|
|
|
@ -125,7 +125,7 @@ class Test_http_object(ClusterTestBase):
|
|||
http_request_path=request,
|
||||
)
|
||||
|
||||
@allure.title("Put over s3, Get over HTTP with bucket name and key")
|
||||
@allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})")
|
||||
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
|
||||
def test_object_put_get_bucketname_key(self, object_size: ObjectSize, s3_client: S3ClientWrapper):
|
||||
"""
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.shell import Shell
|
||||
|
@ -22,6 +23,7 @@ class TestSessionTokenContainer(ClusterTestBase):
|
|||
"""
|
||||
return {verb: get_container_signed_token(owner_wallet, user_wallet, verb, client_shell, temp_directory) for verb in ContainerVerb}
|
||||
|
||||
@allure.title("Static session with create operation")
|
||||
def test_static_session_token_container_create(
|
||||
self,
|
||||
owner_wallet: WalletInfo,
|
||||
|
@ -46,6 +48,7 @@ class TestSessionTokenContainer(ClusterTestBase):
|
|||
assert cid not in list_containers(user_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||
assert cid in list_containers(owner_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||
|
||||
@allure.title("[NEGATIVE] Static session without create operation")
|
||||
def test_static_session_token_container_create_with_other_verb(
|
||||
self,
|
||||
user_wallet: WalletInfo,
|
||||
|
@ -65,6 +68,7 @@ class TestSessionTokenContainer(ClusterTestBase):
|
|||
wait_for_creation=False,
|
||||
)
|
||||
|
||||
@allure.title("[NEGATIVE] Static session with create operation for other wallet")
|
||||
def test_static_session_token_container_create_with_other_wallet(
|
||||
self,
|
||||
stranger_wallet: WalletInfo,
|
||||
|
@ -83,6 +87,7 @@ class TestSessionTokenContainer(ClusterTestBase):
|
|||
wait_for_creation=False,
|
||||
)
|
||||
|
||||
@allure.title("Static session with delete operation")
|
||||
def test_static_session_token_container_delete(
|
||||
self,
|
||||
owner_wallet: WalletInfo,
|
||||
|
|
|
@ -31,9 +31,7 @@ class TestControlShard(ClusterTestBase):
|
|||
data_path = node.storage_node.get_data_directory()
|
||||
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
|
||||
for data_dir in all_datas.replace(".", "").strip().split("\n"):
|
||||
check_dir = node_shell.exec(
|
||||
f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0"
|
||||
).stdout
|
||||
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout
|
||||
if "1" in check_dir:
|
||||
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
|
||||
object_name = f"{oid[4:]}.{cid}"
|
||||
|
@ -66,9 +64,7 @@ class TestControlShard(ClusterTestBase):
|
|||
basic_acl=EACL_PUBLIC_READ_WRITE,
|
||||
)
|
||||
file = generate_file(round(max_object_size * 0.8))
|
||||
oid = put_object(
|
||||
wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
oid = put_object(wallet=default_wallet, path=file, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||
with reporter.step("Search node with object"):
|
||||
nodes = get_object_nodes(cluster=self.cluster, cid=cid, oid=oid, alive_node=self.cluster.cluster_nodes[0])
|
||||
|
||||
|
@ -76,9 +72,7 @@ class TestControlShard(ClusterTestBase):
|
|||
|
||||
object_path, object_name = self.get_object_path_and_name_file(oid, cid, nodes[0])
|
||||
nodes[0].host.get_shell().exec(f"chmod +r {object_path}/{object_name}")
|
||||
delete_object(
|
||||
wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
delete_object(wallet=default_wallet, cid=cid, oid=oid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||
delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||
|
||||
@staticmethod
|
||||
|
@ -117,6 +111,7 @@ class TestControlShard(ClusterTestBase):
|
|||
|
||||
assert set(shards_from_config) == set(shards_from_cli)
|
||||
|
||||
@allure.title("Shard become read-only when errors exceeds threshold")
|
||||
@pytest.mark.failover
|
||||
def test_shard_errors(
|
||||
self,
|
||||
|
|
Loading…
Reference in a new issue