forked from TrueCloudLab/frostfs-testcases
Compare commits
21 commits
master
...
support/v0
Author | SHA1 | Date | |
---|---|---|---|
ba79bf46cb | |||
b9ee97eb24 | |||
7b1094a810 | |||
6c0dfe510d | |||
3a1e67863b | |||
6094f06d1f | |||
02773ef94e | |||
1f182dfa97 | |||
c4bfcd531a | |||
a47634be5a | |||
86b6810cb5 | |||
2f4efa94a2 | |||
c930496716 | |||
a7dea66608 | |||
6025d97f0d | |||
97a39810ca | |||
bc14897919 | |||
ae3e1a848d | |||
4db9d8a4a6 | |||
b5ce51a72e | |||
9eac53965a |
24 changed files with 1394 additions and 302 deletions
|
@ -12,6 +12,7 @@ markers =
|
||||||
smoke: test runs in smoke testrun
|
smoke: test runs in smoke testrun
|
||||||
# controlling markers
|
# controlling markers
|
||||||
no_healthcheck: skip healthcheck for this test
|
no_healthcheck: skip healthcheck for this test
|
||||||
|
logs_after_session: Make the last test in session
|
||||||
# functional markers
|
# functional markers
|
||||||
maintenance: tests for change mode node
|
maintenance: tests for change mode node
|
||||||
container: tests for container creation
|
container: tests for container creation
|
||||||
|
|
|
@ -12,9 +12,7 @@ from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses.acl import EACLRole
|
from frostfs_testlib.storage.dataclasses.acl import EACLRole
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
|
||||||
|
|
||||||
OBJECT_COUNT = 5
|
OBJECT_COUNT = 5
|
||||||
|
|
||||||
|
@ -60,11 +58,6 @@ def wallets(default_wallet: WalletInfo, credentials_provider: CredentialsProvide
|
||||||
return wallets_collection
|
return wallets_collection
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def file_path(object_size: ObjectSize) -> str:
|
|
||||||
yield generate_file(object_size.value)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
@pytest.fixture(scope="function")
|
||||||
def eacl_container_with_objects(
|
def eacl_container_with_objects(
|
||||||
wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str
|
wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str
|
||||||
|
|
|
@ -92,7 +92,7 @@ class TestEACLContainer(ClusterTestBase):
|
||||||
cluster=self.cluster,
|
cluster=self.cluster,
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step(f"Check {not_deny_role_wallet} has full access to eACL public container"):
|
with reporter.step(f"Check {not_deny_role_str} has full access to eACL public container"):
|
||||||
check_full_access_to_container(
|
check_full_access_to_container(
|
||||||
not_deny_role_wallet,
|
not_deny_role_wallet,
|
||||||
cid,
|
cid,
|
||||||
|
|
|
@ -387,6 +387,7 @@ class TestEACLFilters(ClusterTestBase):
|
||||||
|
|
||||||
@allure.title("Operations with allow eACL user headers filters (match_type={match_type}, obj_size={object_size})")
|
@allure.title("Operations with allow eACL user headers filters (match_type={match_type}, obj_size={object_size})")
|
||||||
@pytest.mark.parametrize("match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL])
|
@pytest.mark.parametrize("match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL])
|
||||||
|
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
|
||||||
def test_extended_acl_allow_filters_object(
|
def test_extended_acl_allow_filters_object(
|
||||||
self,
|
self,
|
||||||
wallets: Wallets,
|
wallets: Wallets,
|
||||||
|
|
|
@ -23,7 +23,7 @@ from frostfs_testlib.resources.common import (
|
||||||
)
|
)
|
||||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.shell import LocalShell, Shell
|
from frostfs_testlib.shell import LocalShell, Shell
|
||||||
from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE, DEFAULT_EC_PLACEMENT_RULE
|
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE
|
||||||
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
|
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage import get_service_registry
|
from frostfs_testlib.storage import get_service_registry
|
||||||
|
@ -37,6 +37,7 @@ from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.testing.parallel import parallel
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
from frostfs_testlib.utils import env_utils, version_utils
|
from frostfs_testlib.utils import env_utils, version_utils
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
from pytest_tests.resources.common import HOSTING_CONFIG_FILE, TEST_CYCLES_COUNT
|
from pytest_tests.resources.common import HOSTING_CONFIG_FILE, TEST_CYCLES_COUNT
|
||||||
|
|
||||||
|
@ -190,6 +191,11 @@ def simple_object_size(max_object_size: int) -> ObjectSize:
|
||||||
return ObjectSize("simple", size)
|
return ObjectSize("simple", size)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def file_path(object_size: ObjectSize) -> str:
|
||||||
|
yield generate_file(object_size.value)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def complex_object_size(max_object_size: int) -> ObjectSize:
|
def complex_object_size(max_object_size: int) -> ObjectSize:
|
||||||
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
|
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
|
||||||
|
@ -210,6 +216,7 @@ def object_size(
|
||||||
|
|
||||||
return complex_object_size
|
return complex_object_size
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def rep_placement_policy() -> PlacementPolicy:
|
def rep_placement_policy() -> PlacementPolicy:
|
||||||
return PlacementPolicy("rep", DEFAULT_PLACEMENT_RULE)
|
return PlacementPolicy("rep", DEFAULT_PLACEMENT_RULE)
|
||||||
|
@ -222,10 +229,7 @@ def ec_placement_policy() -> PlacementPolicy:
|
||||||
|
|
||||||
# By default we want all tests to be executed with both storage policies.
|
# By default we want all tests to be executed with both storage policies.
|
||||||
# This can be overriden in choosen tests if needed.
|
# This can be overriden in choosen tests if needed.
|
||||||
@pytest.fixture(
|
@pytest.fixture(scope="session", params=[pytest.param("rep", marks=pytest.mark.rep)])
|
||||||
scope="session",
|
|
||||||
params=[pytest.param("rep", marks=pytest.mark.rep), pytest.param("ec", marks=pytest.mark.ec)],
|
|
||||||
)
|
|
||||||
def placement_policy(
|
def placement_policy(
|
||||||
rep_placement_policy: PlacementPolicy, ec_placement_policy: PlacementPolicy, request: pytest.FixtureRequest
|
rep_placement_policy: PlacementPolicy, ec_placement_policy: PlacementPolicy, request: pytest.FixtureRequest
|
||||||
) -> PlacementPolicy:
|
) -> PlacementPolicy:
|
||||||
|
@ -234,6 +238,7 @@ def placement_policy(
|
||||||
|
|
||||||
return ec_placement_policy
|
return ec_placement_policy
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Cluster:
|
def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Cluster:
|
||||||
cluster = Cluster(hosting)
|
cluster = Cluster(hosting)
|
||||||
|
@ -267,9 +272,18 @@ def healthcheck(cluster: Cluster) -> Healthcheck:
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def cluster_state_controller(client_shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> ClusterStateController:
|
def cluster_state_controller_session(
|
||||||
|
client_shell: Shell, cluster: Cluster, healthcheck: Healthcheck
|
||||||
|
) -> ClusterStateController:
|
||||||
controller = ClusterStateController(client_shell, cluster, healthcheck)
|
controller = ClusterStateController(client_shell, cluster, healthcheck)
|
||||||
yield controller
|
return controller
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def cluster_state_controller(cluster_state_controller_session: ClusterStateController) -> ClusterStateController:
|
||||||
|
yield cluster_state_controller_session
|
||||||
|
cluster_state_controller_session.start_stopped_hosts()
|
||||||
|
cluster_state_controller_session.start_all_stopped_services()
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@ -337,15 +351,20 @@ def two_buckets(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
|
||||||
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
||||||
|
|
||||||
|
|
||||||
@allure.title("[Autouse/Session] Check binary versions")
|
@allure.title("[Autouse/Session] Collect binary versions")
|
||||||
@pytest.fixture(scope="session", autouse=True)
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
|
def collect_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
|
||||||
local_versions = version_utils.get_local_binaries_versions(client_shell)
|
local_versions = version_utils.get_local_binaries_versions(client_shell)
|
||||||
remote_versions, exсeptions_remote_binaries_versions = version_utils.get_remote_binaries_versions(hosting)
|
remote_versions = version_utils.get_remote_binaries_versions(hosting)
|
||||||
|
remote_versions_keys = list(remote_versions.keys())
|
||||||
|
|
||||||
all_versions = {
|
all_versions = {
|
||||||
**local_versions,
|
**local_versions,
|
||||||
**{binary_name: binary["version"] for binary_name, binary in remote_versions.items()},
|
**{
|
||||||
|
f"{name}_{remote_versions_keys.index(host) + 1:02d}": version
|
||||||
|
for host, versions in remote_versions.items()
|
||||||
|
for name, version in versions.items()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
environment_dir = request.config.getoption("--alluredir")
|
environment_dir = request.config.getoption("--alluredir")
|
||||||
|
|
|
@ -39,10 +39,10 @@ class TestPolicy(ClusterTestBase):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def fill_field_price(self, cluster_state_controller: ClusterStateController):
|
def fill_field_price(self, cluster_state_controller_session: ClusterStateController):
|
||||||
prices = ["15", "10", "65", "55"]
|
prices = ["15", "10", "65", "55"]
|
||||||
|
|
||||||
config_manager = cluster_state_controller.manager(ConfigStateManager)
|
config_manager = cluster_state_controller_session.manager(ConfigStateManager)
|
||||||
for i in zip(self.cluster.cluster_nodes, prices):
|
for i in zip(self.cluster.cluster_nodes, prices):
|
||||||
config_manager.set_on_node(i[0], StorageNode, {"node:attribute_5": f"Price:{i[1]}"})
|
config_manager.set_on_node(i[0], StorageNode, {"node:attribute_5": f"Price:{i[1]}"})
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ class TestPolicy(ClusterTestBase):
|
||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
cluster_state_controller_session.manager(ConfigStateManager).revert_all()
|
||||||
|
|
||||||
@allure.title("[NEGATIVE] Placement policy: Can't parse placement policy")
|
@allure.title("[NEGATIVE] Placement policy: Can't parse placement policy")
|
||||||
def test_placement_policy_negative(self, default_wallet):
|
def test_placement_policy_negative(self, default_wallet):
|
||||||
|
|
|
@ -6,7 +6,7 @@ import pytest
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.controllers import ShardsWatcher
|
from frostfs_testlib.storage.controllers import ShardsWatcher
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import TestFile, generate_file
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
|
@ -25,7 +25,6 @@ def test_start_time() -> datetime:
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
@allure.title("Generate simple size file")
|
@allure.title("Generate simple size file")
|
||||||
def simple_file(simple_object_size: ObjectSize) -> str:
|
def simple_file(simple_object_size: ObjectSize) -> TestFile:
|
||||||
path_file = generate_file(size=simple_object_size.value)
|
path_file = generate_file(size=simple_object_size.value)
|
||||||
yield path_file
|
return path_file
|
||||||
os.remove(path_file)
|
|
||||||
|
|
|
@ -387,6 +387,7 @@ class TestMaintenanceMode(ClusterTestBase):
|
||||||
expected_status == node_snapshot.node_status
|
expected_status == node_snapshot.node_status
|
||||||
), f"{node_under_test} status should be {expected_status}, but was {node_snapshot.node_status}. See netmap:\n{netmap}"
|
), f"{node_under_test} status should be {expected_status}, but was {node_snapshot.node_status}. See netmap:\n{netmap}"
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="The test is not stable for 0.39")
|
||||||
@allure.title("Test of basic node operations in maintenance mode")
|
@allure.title("Test of basic node operations in maintenance mode")
|
||||||
def test_maintenance_mode(
|
def test_maintenance_mode(
|
||||||
self,
|
self,
|
||||||
|
@ -456,6 +457,7 @@ class TestMaintenanceMode(ClusterTestBase):
|
||||||
|
|
||||||
os.remove(file_path)
|
os.remove(file_path)
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="The test is not stable for 0.39")
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
@allure.title("MAINTENANCE and OFFLINE mode transitions")
|
@allure.title("MAINTENANCE and OFFLINE mode transitions")
|
||||||
def test_mode_transitions(
|
def test_mode_transitions(
|
||||||
|
@ -558,6 +560,7 @@ class TestMaintenanceMode(ClusterTestBase):
|
||||||
with reporter.step("Check node status is 'maintenance'"):
|
with reporter.step("Check node status is 'maintenance'"):
|
||||||
self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint)
|
self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint)
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="The test is not stable for 0.39")
|
||||||
@allure.title("A node cannot go into maintenance if maintenance is prohibited globally in the network")
|
@allure.title("A node cannot go into maintenance if maintenance is prohibited globally in the network")
|
||||||
def test_maintenance_globally_forbidden(
|
def test_maintenance_globally_forbidden(
|
||||||
self,
|
self,
|
||||||
|
|
87
pytest_tests/testsuites/metrics/test_container_metrics.py
Normal file
87
pytest_tests/testsuites/metrics/test_container_metrics.py
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
import math
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.steps.cli.container import create_container
|
||||||
|
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
|
||||||
|
from frostfs_testlib.steps.metrics import check_metrics_counter
|
||||||
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.container
|
||||||
|
class TestContainerMetrics(ClusterTestBase):
|
||||||
|
@allure.title("Container metrics (obj_size={object_size})")
|
||||||
|
def test_container_metrics(
|
||||||
|
self, object_size: ObjectSize, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster
|
||||||
|
):
|
||||||
|
file_path = generate_file(object_size.value)
|
||||||
|
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||||
|
copies = 2
|
||||||
|
object_chunks = 0
|
||||||
|
head_object = 1
|
||||||
|
link_object = 0
|
||||||
|
if object_size.value > max_object_size:
|
||||||
|
object_chunks = math.ceil(object_size.value / max_object_size)
|
||||||
|
link_object = 1
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
||||||
|
|
||||||
|
with reporter.step("Put object to random node"):
|
||||||
|
oid = put_object_to_random_node(
|
||||||
|
wallet=default_wallet,
|
||||||
|
path=file_path,
|
||||||
|
cid=cid,
|
||||||
|
shell=self.shell,
|
||||||
|
cluster=cluster,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get object nodes"):
|
||||||
|
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
||||||
|
object_nodes = [
|
||||||
|
cluster_node
|
||||||
|
for cluster_node in cluster.cluster_nodes
|
||||||
|
if cluster_node.storage_node in object_storage_nodes
|
||||||
|
]
|
||||||
|
|
||||||
|
with reporter.step("Check metric appears in node where the object is located"):
|
||||||
|
count_metrics = (object_chunks + head_object + link_object) * copies
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="phy"
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=cid, type="logic"
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=copies, command="container_objects_total", cid=cid, type="user"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Delete file, wait until gc remove object"):
|
||||||
|
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Check container metrics 'the counter should equal {len(object_nodes)}' in object nodes"):
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="phy"
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=cid, type="logic"
|
||||||
|
)
|
||||||
|
check_metrics_counter(object_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user")
|
||||||
|
|
||||||
|
with reporter.step("Check metrics(Phy, Logic, User) in each nodes"):
|
||||||
|
# Phy and Logic metrics are 4, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4
|
||||||
|
check_metrics_counter(
|
||||||
|
cluster.cluster_nodes, counter_exp=4, command="container_objects_total", cid=cid, type="phy"
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
cluster.cluster_nodes, counter_exp=4, command="container_objects_total", cid=cid, type="logic"
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
cluster.cluster_nodes, counter_exp=0, command="container_objects_total", cid=cid, type="user"
|
||||||
|
)
|
|
@ -0,0 +1,115 @@
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.steps.cli.container import create_container
|
||||||
|
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
|
||||||
|
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
||||||
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
class TestGarbageCollectorMetrics(ClusterTestBase):
|
||||||
|
@wait_for_success(interval=10)
|
||||||
|
def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str):
|
||||||
|
counter_act = 0
|
||||||
|
try:
|
||||||
|
metric_result = cluster_node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
|
||||||
|
counter_act += self.calc_metrics_count_from_stdout(metric_result.stdout)
|
||||||
|
except RuntimeError as e:
|
||||||
|
...
|
||||||
|
assert counter_act == counter_exp, f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def calc_metrics_count_from_stdout(metric_result_stdout: str):
|
||||||
|
result = re.findall(r"}\s(\d+)", metric_result_stdout)
|
||||||
|
return sum(map(int, result))
|
||||||
|
|
||||||
|
@allure.title("Garbage collector expire_at object")
|
||||||
|
def test_garbage_collector_metrics_expire_at_object(
|
||||||
|
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster
|
||||||
|
):
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||||
|
metrics_step = 1
|
||||||
|
|
||||||
|
with reporter.step("Get current garbage collector metrics for each nodes"):
|
||||||
|
metrics_counter = {}
|
||||||
|
for node in cluster.cluster_nodes:
|
||||||
|
metrics_counter[node] = get_metrics_value(
|
||||||
|
node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
||||||
|
|
||||||
|
with reporter.step("Put object to random node with expire_at"):
|
||||||
|
current_epoch = self.get_epoch()
|
||||||
|
oid = put_object_to_random_node(
|
||||||
|
default_wallet,
|
||||||
|
file_path,
|
||||||
|
cid,
|
||||||
|
self.shell,
|
||||||
|
cluster,
|
||||||
|
expire_at=current_epoch + 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get object nodes"):
|
||||||
|
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
||||||
|
object_nodes = [
|
||||||
|
cluster_node
|
||||||
|
for cluster_node in cluster.cluster_nodes
|
||||||
|
if cluster_node.storage_node in object_storage_nodes
|
||||||
|
]
|
||||||
|
|
||||||
|
with reporter.step("Tick Epoch"):
|
||||||
|
self.tick_epochs(epochs_to_tick=2, wait_block=2)
|
||||||
|
|
||||||
|
with reporter.step(
|
||||||
|
f"Check garbage collector metrics 'the counter should increase by {metrics_step}' in object nodes"
|
||||||
|
):
|
||||||
|
for node in object_nodes:
|
||||||
|
metrics_counter[node] += metrics_step
|
||||||
|
|
||||||
|
for node, counter in metrics_counter.items():
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=counter,
|
||||||
|
command="frostfs_node_garbage_collector_marked_for_removal_objects_total",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("Garbage collector delete object")
|
||||||
|
def test_garbage_collector_metrics_deleted_objects(
|
||||||
|
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster
|
||||||
|
):
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||||
|
metrics_step = 1
|
||||||
|
|
||||||
|
with reporter.step("Select random node"):
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
|
with reporter.step("Get current garbage collector metrics for selected node"):
|
||||||
|
metrics_counter = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
||||||
|
|
||||||
|
with reporter.step("Put object to selected node"):
|
||||||
|
oid = put_object(default_wallet, file_path, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step("Delete file, wait until gc remove object"):
|
||||||
|
delete_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
|
||||||
|
metrics_counter += metrics_step
|
||||||
|
check_metrics_counter(
|
||||||
|
[node], counter_exp=metrics_counter, command="frostfs_node_garbage_collector_deleted_objects_total"
|
||||||
|
)
|
224
pytest_tests/testsuites/metrics/test_grpc_metrics.py
Normal file
224
pytest_tests/testsuites/metrics/test_grpc_metrics.py
Normal file
|
@ -0,0 +1,224 @@
|
||||||
|
import random
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
|
from frostfs_testlib.steps.cli.container import create_container, get_container, list_containers
|
||||||
|
from frostfs_testlib.steps.cli.object import get_object, head_object, put_object, search_object
|
||||||
|
from frostfs_testlib.steps.cli.tree import get_tree_list
|
||||||
|
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
class TestGRPCMetrics(ClusterTestBase):
|
||||||
|
@pytest.fixture
|
||||||
|
def disable_policer(self, cluster_state_controller: ClusterStateController):
|
||||||
|
config_manager = cluster_state_controller.manager(ConfigStateManager)
|
||||||
|
config_manager.set_on_all_nodes(StorageNode, {"policer:unsafe_disable": "true"})
|
||||||
|
yield
|
||||||
|
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
||||||
|
|
||||||
|
@allure.title("GRPC metrics container operations")
|
||||||
|
def test_grpc_metrics_container_operations(self, default_wallet: WalletInfo, cluster: Cluster):
|
||||||
|
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
|
|
||||||
|
with reporter.step("Select random node"):
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for method 'Put'"):
|
||||||
|
metrics_counter_put = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="ContainerService", method="Put"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
|
||||||
|
metrics_counter_put += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter_put,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="ContainerService",
|
||||||
|
method="Put",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for method 'Get'"):
|
||||||
|
metrics_counter_get = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="ContainerService", method="Get"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Get container"):
|
||||||
|
get_container(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
|
||||||
|
metrics_counter_get += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter_get,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="ContainerService",
|
||||||
|
method="Get",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for method 'List'"):
|
||||||
|
metrics_counter_list = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="ContainerService", method="List"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Get container list"):
|
||||||
|
list_containers(default_wallet, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics method=List, 'the counter should increase by 1'"):
|
||||||
|
metrics_counter_list += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter_list,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="ContainerService",
|
||||||
|
method="List",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("GRPC metrics object operations")
|
||||||
|
def test_grpc_metrics_object_operations(
|
||||||
|
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, disable_policer
|
||||||
|
):
|
||||||
|
file_path = generate_file(simple_object_size.value)
|
||||||
|
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
|
|
||||||
|
with reporter.step("Select random node"):
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for method 'Put'"):
|
||||||
|
metrics_counter_put = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="ObjectService", method="Put"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object to selected node"):
|
||||||
|
oid = put_object(default_wallet, file_path, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
|
||||||
|
metrics_counter_put += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter_put,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="ObjectService",
|
||||||
|
method="Put",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for method 'Get'"):
|
||||||
|
metrics_counter_get = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="ObjectService", method="Get"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Get object"):
|
||||||
|
get_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
|
||||||
|
metrics_counter_get += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter_get,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="ObjectService",
|
||||||
|
method="Get",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for method 'Search'"):
|
||||||
|
metrics_counter_search = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="ObjectService", method="Search"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Search object"):
|
||||||
|
search_object(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by 1'"):
|
||||||
|
metrics_counter_search += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter_search,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="ObjectService",
|
||||||
|
method="Search",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for method 'Head'"):
|
||||||
|
metrics_counter_head = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="ObjectService", method="Head"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Head object"):
|
||||||
|
head_object(default_wallet, cid, oid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by 1'"):
|
||||||
|
metrics_counter_head += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter_head,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="ObjectService",
|
||||||
|
method="Head",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("GRPC metrics Tree healthcheck")
|
||||||
|
def test_grpc_metrics_tree_service(self, cluster: Cluster, healthcheck: Healthcheck):
|
||||||
|
with reporter.step("Select random node"):
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for Healthcheck"):
|
||||||
|
metrics_counter = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="TreeService", method="Healthcheck"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Query Tree healthcheck status"):
|
||||||
|
healthcheck.tree_healthcheck(node)
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics for Healthcheck, 'the counter should increase'"):
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
">",
|
||||||
|
metrics_counter,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="TreeService",
|
||||||
|
method="Healthcheck",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("GRPC metrics Tree list")
|
||||||
|
def test_grpc_metrics_tree_list(self, default_wallet: WalletInfo, cluster: Cluster):
|
||||||
|
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
|
|
||||||
|
with reporter.step("Select random node"):
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
|
||||||
|
|
||||||
|
with reporter.step("Get current gRPC metrics for Tree List"):
|
||||||
|
metrics_counter = get_metrics_value(
|
||||||
|
node, command="grpc_server_handled_total", service="TreeService", method="TreeList"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Query Tree List"):
|
||||||
|
get_tree_list(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
|
||||||
|
|
||||||
|
with reporter.step(f"Check gRPC metrics for Tree List, 'the counter should increase by 1'"):
|
||||||
|
metrics_counter += 1
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter,
|
||||||
|
command="grpc_server_handled_total",
|
||||||
|
service="TreeService",
|
||||||
|
method="TreeList",
|
||||||
|
)
|
72
pytest_tests/testsuites/metrics/test_logs_metrics.py
Normal file
72
pytest_tests/testsuites/metrics/test_logs_metrics.py
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.steps.metrics import get_metrics_value
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
|
||||||
|
|
||||||
|
class TestLogsMetrics(ClusterTestBase):
|
||||||
|
@pytest.fixture
|
||||||
|
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime:
|
||||||
|
config_manager = cluster_state_controller.manager(ConfigStateManager)
|
||||||
|
config_manager.csc.stop_services_of_type(StorageNode)
|
||||||
|
restart_time = datetime.now(timezone.utc)
|
||||||
|
config_manager.csc.start_services_of_type(StorageNode)
|
||||||
|
yield restart_time
|
||||||
|
|
||||||
|
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
||||||
|
|
||||||
|
@wait_for_success(interval=10)
|
||||||
|
def check_metrics_in_node(
|
||||||
|
self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps
|
||||||
|
):
|
||||||
|
counter_logs = self.get_count_logs_by_level(
|
||||||
|
cluster_node, metrics_greps.get("level"), restart_time, log_priority
|
||||||
|
)
|
||||||
|
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
|
||||||
|
assert (
|
||||||
|
counter_logs == counter_metrics
|
||||||
|
), f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, log_priority: str):
|
||||||
|
count_logs = 0
|
||||||
|
try:
|
||||||
|
logs = cluster_node.host.get_filtered_logs(
|
||||||
|
log_level, unit="frostfs-storage", since=after_time, priority=log_priority
|
||||||
|
)
|
||||||
|
result = re.findall(rf"\s+{log_level}\s+", logs)
|
||||||
|
count_logs += len(result)
|
||||||
|
except RuntimeError as e:
|
||||||
|
...
|
||||||
|
return count_logs
|
||||||
|
|
||||||
|
@allure.title("Metrics for the log counter")
|
||||||
|
def test_log_counter_metrics(self, cluster: Cluster, restart_storage_service: datetime):
|
||||||
|
restart_time = restart_storage_service
|
||||||
|
with reporter.step("Select random node"):
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics count logs with level 'info'"):
|
||||||
|
self.check_metrics_in_node(
|
||||||
|
node,
|
||||||
|
restart_time,
|
||||||
|
log_priority="6..6",
|
||||||
|
command="frostfs_node_logger_entry_count",
|
||||||
|
level="info",
|
||||||
|
dropped="false",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics count logs with level 'error'"):
|
||||||
|
self.check_metrics_in_node(
|
||||||
|
node, restart_time, command="frostfs_node_logger_entry_count", level="error", dropped="false"
|
||||||
|
)
|
323
pytest_tests/testsuites/metrics/test_object_metrics.py
Normal file
323
pytest_tests/testsuites/metrics/test_object_metrics.py
Normal file
|
@ -0,0 +1,323 @@
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container
|
||||||
|
from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node
|
||||||
|
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
||||||
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
class TestObjectMetrics(ClusterTestBase):
|
||||||
|
@allure.title("Object metrics of removed container (obj_size={object_size})")
|
||||||
|
def test_object_metrics_removed_container(
|
||||||
|
self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster
|
||||||
|
):
|
||||||
|
file_path = generate_file(object_size.value)
|
||||||
|
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||||
|
copies = 2
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
||||||
|
|
||||||
|
with reporter.step("Put object to random node"):
|
||||||
|
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, cluster)
|
||||||
|
|
||||||
|
with reporter.step("Check metric appears in node where the object is located"):
|
||||||
|
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
||||||
|
object_nodes = [
|
||||||
|
cluster_node
|
||||||
|
for cluster_node in cluster.cluster_nodes
|
||||||
|
if cluster_node.storage_node in object_storage_nodes
|
||||||
|
]
|
||||||
|
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes,
|
||||||
|
counter_exp=copies,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
cid=cid,
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Delete container"):
|
||||||
|
delete_container(default_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step("Tick Epoch"):
|
||||||
|
self.tick_epochs(epochs_to_tick=2, wait_block=2)
|
||||||
|
|
||||||
|
with reporter.step("Check metrics of removed containers doesn't appear in the storage node"):
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=0, command="frostfs_node_engine_container_objects_total", cid=cid, type="user"
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=cid
|
||||||
|
)
|
||||||
|
|
||||||
|
for node in object_nodes:
|
||||||
|
all_metrics = node.metrics.storage.get_metrics_search_by_greps(
|
||||||
|
command="frostfs_node_engine_container_size_byte"
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
cid not in all_metrics.stdout
|
||||||
|
), "metrics of removed containers shouldn't appear in the storage node"
|
||||||
|
|
||||||
|
@allure.title("Object metrics, locked object (obj_size={object_size}, policy={placement_policy})")
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"placement_policy", ["REP 1 IN X CBF 1 SELECT 1 FROM * AS X", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"]
|
||||||
|
)
|
||||||
|
def test_object_metrics_blocked_object(
|
||||||
|
self, object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, placement_policy: str
|
||||||
|
):
|
||||||
|
file_path = generate_file(object_size.value)
|
||||||
|
metric_step = int(re.search(r"REP\s(\d+)", placement_policy).group(1))
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, placement_policy)
|
||||||
|
|
||||||
|
with reporter.step("Search container nodes"):
|
||||||
|
container_nodes = search_nodes_with_container(
|
||||||
|
wallet=default_wallet,
|
||||||
|
cid=cid,
|
||||||
|
shell=self.shell,
|
||||||
|
endpoint=self.cluster.default_rpc_endpoint,
|
||||||
|
cluster=cluster,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get current metrics for metric_type=user"):
|
||||||
|
objects_metric_counter = 0
|
||||||
|
for node in container_nodes:
|
||||||
|
objects_metric_counter += get_metrics_value(
|
||||||
|
node, command="frostfs_node_engine_objects_total", type="user"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object to container node"):
|
||||||
|
oid = put_object(
|
||||||
|
default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint()
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
|
||||||
|
objects_metric_counter += metric_step
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=objects_metric_counter,
|
||||||
|
command="frostfs_node_engine_objects_total",
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=metric_step,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
cid=cid,
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Delete object"):
|
||||||
|
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
|
||||||
|
objects_metric_counter -= metric_step
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=objects_metric_counter,
|
||||||
|
command="frostfs_node_engine_objects_total",
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=0,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
cid=cid,
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object and lock it to next epoch"):
|
||||||
|
oid = put_object(
|
||||||
|
default_wallet, file_path, cid, self.shell, container_nodes[0].storage_node.get_rpc_endpoint()
|
||||||
|
)
|
||||||
|
current_epoch = self.get_epoch()
|
||||||
|
lock_object(
|
||||||
|
default_wallet,
|
||||||
|
cid,
|
||||||
|
oid,
|
||||||
|
self.shell,
|
||||||
|
container_nodes[0].storage_node.get_rpc_endpoint(),
|
||||||
|
expire_at=current_epoch + 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
|
||||||
|
objects_metric_counter += metric_step
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=objects_metric_counter,
|
||||||
|
command="frostfs_node_engine_objects_total",
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=metric_step,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
cid=cid,
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Wait until remove locking 'the counter doesn't change'"):
|
||||||
|
self.tick_epochs(epochs_to_tick=2)
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=objects_metric_counter,
|
||||||
|
command="frostfs_node_engine_objects_total",
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Delete object"):
|
||||||
|
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
|
||||||
|
objects_metric_counter -= metric_step
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=objects_metric_counter,
|
||||||
|
command="frostfs_node_engine_objects_total",
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=0,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
cid=cid,
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object with expire_at"):
|
||||||
|
current_epoch = self.get_epoch()
|
||||||
|
oid = put_object(
|
||||||
|
default_wallet,
|
||||||
|
file_path,
|
||||||
|
cid,
|
||||||
|
self.shell,
|
||||||
|
container_nodes[0].storage_node.get_rpc_endpoint(),
|
||||||
|
expire_at=current_epoch + 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
|
||||||
|
objects_metric_counter += metric_step
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=objects_metric_counter,
|
||||||
|
command="frostfs_node_engine_objects_total",
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=metric_step,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
cid=cid,
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Tick Epoch"):
|
||||||
|
self.tick_epochs(epochs_to_tick=2)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
|
||||||
|
objects_metric_counter -= metric_step
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=objects_metric_counter,
|
||||||
|
command="frostfs_node_engine_objects_total",
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
container_nodes,
|
||||||
|
counter_exp=0,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
cid=cid,
|
||||||
|
type="user",
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("Object metrics, stop the node (obj_size={object_size})")
|
||||||
|
def test_object_metrics_stop_node(
|
||||||
|
self,
|
||||||
|
object_size: ObjectSize,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
cluster_state_controller: ClusterStateController,
|
||||||
|
):
|
||||||
|
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||||
|
file_path = generate_file(object_size.value)
|
||||||
|
copies = 2
|
||||||
|
|
||||||
|
with reporter.step(f"Create container with policy {placement_policy}"):
|
||||||
|
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy)
|
||||||
|
|
||||||
|
with reporter.step(f"Check object metrics in container 'should be zero'"):
|
||||||
|
check_metrics_counter(
|
||||||
|
self.cluster.cluster_nodes,
|
||||||
|
counter_exp=0,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
type="user",
|
||||||
|
cid=cid,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get current metrics for each nodes"):
|
||||||
|
objects_metric_counter: dict[ClusterNode:int] = {}
|
||||||
|
for node in self.cluster.cluster_nodes:
|
||||||
|
objects_metric_counter[node] = get_metrics_value(
|
||||||
|
node, command="frostfs_node_engine_objects_total", type="user"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object"):
|
||||||
|
oid = put_object(default_wallet, file_path, cid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step("Get object nodes"):
|
||||||
|
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, self.cluster.storage_nodes)
|
||||||
|
object_nodes = [
|
||||||
|
cluster_node
|
||||||
|
for cluster_node in self.cluster.cluster_nodes
|
||||||
|
if cluster_node.storage_node in object_storage_nodes
|
||||||
|
]
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics in object nodes 'the counter should increase by {copies}'"):
|
||||||
|
counter_exp = sum(objects_metric_counter[node] for node in object_nodes) + copies
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes, counter_exp=counter_exp, command="frostfs_node_engine_objects_total", type="user"
|
||||||
|
)
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes,
|
||||||
|
counter_exp=copies,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
type="user",
|
||||||
|
cid=cid,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Select node to stop"):
|
||||||
|
node_to_stop = random.choice(object_nodes)
|
||||||
|
alive_nodes = set(object_nodes).difference({node_to_stop})
|
||||||
|
|
||||||
|
with reporter.step(f"Stop the node, wait until the object is replicated to another node"):
|
||||||
|
cluster_state_controller.stop_node_host(node_to_stop, "hard")
|
||||||
|
objects_metric_counter[node_to_stop] += 1
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric in alive nodes 'the counter should increase'"):
|
||||||
|
counter_exp = sum(objects_metric_counter[node] for node in alive_nodes)
|
||||||
|
check_metrics_counter(
|
||||||
|
alive_nodes, ">=", counter_exp, command="frostfs_node_engine_objects_total", type="user"
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Start node"):
|
||||||
|
cluster_state_controller.start_node_host(node_to_stop)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metric in restarted node, 'the counter doesn't change'"):
|
||||||
|
check_metrics_counter(
|
||||||
|
object_nodes,
|
||||||
|
counter_exp=copies,
|
||||||
|
command="frostfs_node_engine_container_objects_total",
|
||||||
|
type="user",
|
||||||
|
cid=cid,
|
||||||
|
)
|
177
pytest_tests/testsuites/metrics/test_shard_metrics.py
Normal file
177
pytest_tests/testsuites/metrics/test_shard_metrics.py
Normal file
|
@ -0,0 +1,177 @@
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||||
|
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
|
||||||
|
from frostfs_testlib.steps.cli.container import create_container
|
||||||
|
from frostfs_testlib.steps.cli.object import get_object, put_object
|
||||||
|
from frostfs_testlib.steps.metrics import check_metrics_counter
|
||||||
|
from frostfs_testlib.steps.node_management import node_shard_list, node_shard_set_mode
|
||||||
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.controllers import ShardsWatcher
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing import parallel, wait_for_success
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
|
||||||
|
class TestShardMetrics(ClusterTestBase):
|
||||||
|
@pytest.fixture()
|
||||||
|
@allure.title("Get two shards for set mode")
|
||||||
|
def two_shards_and_node(self, cluster: Cluster) -> tuple[str, str, ClusterNode]:
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
shards = node_shard_list(node.storage_node)
|
||||||
|
two_shards = random.sample(shards, k=2)
|
||||||
|
|
||||||
|
yield two_shards[0], two_shards[1], node
|
||||||
|
|
||||||
|
for shard in two_shards:
|
||||||
|
node_shard_set_mode(node.storage_node, shard, "read-write")
|
||||||
|
|
||||||
|
node_shard_list(node.storage_node)
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
@allure.title("Revert all shards mode")
|
||||||
|
def revert_all_shards_mode(self):
|
||||||
|
yield
|
||||||
|
parallel(self.set_shard_rw_mode, self.cluster.cluster_nodes)
|
||||||
|
|
||||||
|
def set_shard_rw_mode(self, node: ClusterNode):
|
||||||
|
watcher = ShardsWatcher(node)
|
||||||
|
shards = watcher.get_shards()
|
||||||
|
for shard in shards:
|
||||||
|
watcher.set_shard_mode(shard["shard_id"], mode="read-write")
|
||||||
|
watcher.await_for_all_shards_status(status="read-write")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_error_count_from_logs(cluster_node: ClusterNode, object_path: str, object_name: str):
|
||||||
|
error_count = 0
|
||||||
|
try:
|
||||||
|
logs = cluster_node.host.get_filtered_logs("error count", unit="frostfs-storage")
|
||||||
|
# search error logs for current object
|
||||||
|
for error_line in logs.split("\n"):
|
||||||
|
if object_path in error_line and object_name in error_line:
|
||||||
|
result = re.findall(r'"error\scount":\s(\d+)', error_line)
|
||||||
|
error_count += sum(map(int, result))
|
||||||
|
except RuntimeError as e:
|
||||||
|
...
|
||||||
|
|
||||||
|
return error_count
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@wait_for_success(180, 30)
|
||||||
|
def get_object_path_and_name_file(oid: str, cid: str, node: ClusterNode) -> tuple[str, str]:
|
||||||
|
oid_path = f"{oid[0]}/{oid[1]}/{oid[2]}/{oid[3]}"
|
||||||
|
object_path = None
|
||||||
|
|
||||||
|
with reporter.step("Search object file"):
|
||||||
|
node_shell = node.storage_node.host.get_shell()
|
||||||
|
data_path = node.storage_node.get_data_directory()
|
||||||
|
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
|
||||||
|
for data_dir in all_datas.replace(".", "").strip().split("\n"):
|
||||||
|
check_dir = node_shell.exec(
|
||||||
|
f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0"
|
||||||
|
).stdout
|
||||||
|
if "1" in check_dir:
|
||||||
|
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
|
||||||
|
object_name = f"{oid[4:]}.{cid}"
|
||||||
|
break
|
||||||
|
|
||||||
|
assert object_path is not None, f"{oid} object not found in directory - {data_path}/data"
|
||||||
|
return object_path, object_name
|
||||||
|
|
||||||
|
@allure.title("Metric for shard mode")
|
||||||
|
def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]):
|
||||||
|
metrics_counter = 1
|
||||||
|
shard1, shard2, node = two_shards_and_node
|
||||||
|
|
||||||
|
with reporter.step("Shard1 set to mode 'read-only'"):
|
||||||
|
node_shard_set_mode(node.storage_node, shard1, "read-only")
|
||||||
|
|
||||||
|
with reporter.step(f"Check shard metrics, 'the mode will change to 'READ_ONLY'"):
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter,
|
||||||
|
command="frostfs_node_engine_mode_info",
|
||||||
|
mode="READ_ONLY",
|
||||||
|
shard_id=shard1,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Shard2 set to mode 'degraded-read-only'"):
|
||||||
|
node_shard_set_mode(node.storage_node, shard2, "degraded-read-only")
|
||||||
|
|
||||||
|
with reporter.step(f"Check shard metrics, 'the mode will change to 'DEGRADED_READ_ONLY'"):
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter,
|
||||||
|
command="frostfs_node_engine_mode_info",
|
||||||
|
mode="DEGRADED_READ_ONLY",
|
||||||
|
shard_id=shard2,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Both shards set to mode 'read-write'"):
|
||||||
|
for shard in [shard1, shard2]:
|
||||||
|
node_shard_set_mode(node.storage_node, shard, "read-write")
|
||||||
|
|
||||||
|
with reporter.step(f"Check shard metrics, 'the mode will change to 'READ_WRITE'"):
|
||||||
|
for shard in [shard1, shard2]:
|
||||||
|
check_metrics_counter(
|
||||||
|
[node],
|
||||||
|
counter_exp=metrics_counter,
|
||||||
|
command="frostfs_node_engine_mode_info",
|
||||||
|
mode="READ_WRITE",
|
||||||
|
shard_id=shard,
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("Metric for error count on shard")
|
||||||
|
def test_shard_metrics_error_count(
|
||||||
|
self, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster, revert_all_shards_mode
|
||||||
|
):
|
||||||
|
file_path = generate_file(round(max_object_size * 0.8))
|
||||||
|
|
||||||
|
with reporter.step(f"Create container"):
|
||||||
|
cid = create_container(
|
||||||
|
wallet=default_wallet,
|
||||||
|
shell=self.shell,
|
||||||
|
endpoint=cluster.default_rpc_endpoint,
|
||||||
|
rule="REP 1 CBF 1",
|
||||||
|
basic_acl=EACL_PUBLIC_READ_WRITE,
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Put object"):
|
||||||
|
oid = put_object(default_wallet, file_path, cid, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step("Get object nodes"):
|
||||||
|
object_storage_nodes = get_nodes_with_object(cid, oid, self.shell, cluster.storage_nodes)
|
||||||
|
object_nodes = [
|
||||||
|
cluster_node
|
||||||
|
for cluster_node in cluster.cluster_nodes
|
||||||
|
if cluster_node.storage_node in object_storage_nodes
|
||||||
|
]
|
||||||
|
node = random.choice(object_nodes)
|
||||||
|
|
||||||
|
with reporter.step("Search object in system."):
|
||||||
|
object_path, object_name = self.get_object_path_and_name_file(oid, cid, node)
|
||||||
|
|
||||||
|
with reporter.step("Block read file"):
|
||||||
|
node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}")
|
||||||
|
|
||||||
|
with reporter.step("Get object, expect error"):
|
||||||
|
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
|
||||||
|
get_object(
|
||||||
|
wallet=default_wallet,
|
||||||
|
cid=cid,
|
||||||
|
oid=oid,
|
||||||
|
shell=self.shell,
|
||||||
|
endpoint=node.storage_node.get_rpc_endpoint(),
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step(f"Get shard error count from logs"):
|
||||||
|
counter = self.get_error_count_from_logs(node, object_path, object_name)
|
||||||
|
|
||||||
|
with reporter.step(f"Check shard error metrics"):
|
||||||
|
check_metrics_counter([node], counter_exp=counter, command="frostfs_node_engine_errors_total")
|
|
@ -13,19 +13,20 @@ from frostfs_testlib.resources.error_patterns import (
|
||||||
OUT_OF_RANGE,
|
OUT_OF_RANGE,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import create_container
|
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
|
||||||
from frostfs_testlib.steps.cli.object import (
|
from frostfs_testlib.steps.cli.object import (
|
||||||
get_object_from_random_node,
|
get_object_from_random_node,
|
||||||
get_range,
|
get_range,
|
||||||
get_range_hash,
|
get_range_hash,
|
||||||
head_object,
|
head_object,
|
||||||
|
put_object,
|
||||||
put_object_to_random_node,
|
put_object_to_random_node,
|
||||||
search_object,
|
search_object,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges
|
from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges
|
||||||
from frostfs_testlib.steps.storage_object import delete_objects
|
from frostfs_testlib.steps.storage_object import delete_objects
|
||||||
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
|
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
|
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
|
@ -89,16 +90,45 @@ def generate_ranges(
|
||||||
return file_ranges_to_test
|
return file_ranges_to_test
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def common_container(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster) -> str:
|
||||||
|
rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
||||||
|
with reporter.step(f"Create container with {rule} and put object"):
|
||||||
|
cid = create_container(default_wallet, client_shell, cluster.default_rpc_endpoint, rule)
|
||||||
|
|
||||||
|
return cid
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def container_nodes(
|
||||||
|
default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, common_container: str
|
||||||
|
) -> list[ClusterNode]:
|
||||||
|
return search_nodes_with_container(
|
||||||
|
default_wallet, common_container, client_shell, cluster.default_rpc_endpoint, cluster
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def non_container_nodes(cluster: Cluster, container_nodes: list[ClusterNode]) -> list[ClusterNode]:
|
||||||
|
return list(set(cluster.cluster_nodes) - set(container_nodes))
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(
|
@pytest.fixture(
|
||||||
# Scope session to upload/delete each files set only once
|
# Scope session to upload/delete each files set only once
|
||||||
scope="module"
|
scope="module"
|
||||||
)
|
)
|
||||||
def storage_objects(
|
def storage_objects(
|
||||||
default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, object_size: ObjectSize, placement_policy: PlacementPolicy
|
default_wallet: WalletInfo,
|
||||||
|
client_shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
object_size: ObjectSize,
|
||||||
|
placement_policy: PlacementPolicy,
|
||||||
) -> list[StorageObjectInfo]:
|
) -> list[StorageObjectInfo]:
|
||||||
wallet = default_wallet
|
wallet = default_wallet
|
||||||
# Separate containers for complex/simple objects to avoid side-effects
|
# Separate containers for complex/simple objects to avoid side-effects
|
||||||
cid = create_container(wallet, shell=client_shell, rule=placement_policy.value, endpoint=cluster.default_rpc_endpoint)
|
cid = create_container(
|
||||||
|
wallet, shell=client_shell, rule=placement_policy.value, endpoint=cluster.default_rpc_endpoint
|
||||||
|
)
|
||||||
|
|
||||||
file_path = generate_file(object_size.value)
|
file_path = generate_file(object_size.value)
|
||||||
file_hash = get_file_hash(file_path)
|
file_hash = get_file_hash(file_path)
|
||||||
|
@ -461,6 +491,80 @@ class TestObjectApi(ClusterTestBase):
|
||||||
range_cut=range_cut,
|
range_cut=range_cut,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@allure.title("Get range from container and non-container nodes (object_size={object_size})")
|
||||||
|
def test_get_range_from_different_node(
|
||||||
|
self,
|
||||||
|
default_wallet: str,
|
||||||
|
common_container: str,
|
||||||
|
container_nodes: list[ClusterNode],
|
||||||
|
non_container_nodes: list[ClusterNode],
|
||||||
|
file_path: str,
|
||||||
|
):
|
||||||
|
|
||||||
|
with reporter.step("Put object to container"):
|
||||||
|
container_node = random.choice(container_nodes)
|
||||||
|
oid = put_object(
|
||||||
|
default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint()
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get range from container node endpoint"):
|
||||||
|
get_range(
|
||||||
|
default_wallet,
|
||||||
|
common_container,
|
||||||
|
oid,
|
||||||
|
"0:10",
|
||||||
|
self.shell,
|
||||||
|
container_node.storage_node.get_rpc_endpoint(),
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get range from non-container node endpoint"):
|
||||||
|
non_container_node = random.choice(non_container_nodes)
|
||||||
|
get_range(
|
||||||
|
default_wallet,
|
||||||
|
common_container,
|
||||||
|
oid,
|
||||||
|
"0:10",
|
||||||
|
self.shell,
|
||||||
|
non_container_node.storage_node.get_rpc_endpoint(),
|
||||||
|
)
|
||||||
|
|
||||||
|
@allure.title("Get range hash from container and non-container nodes (object_size={object_size})")
|
||||||
|
def test_get_range_hash_from_different_node(
|
||||||
|
self,
|
||||||
|
default_wallet: str,
|
||||||
|
common_container: str,
|
||||||
|
container_nodes: list[ClusterNode],
|
||||||
|
non_container_nodes: list[ClusterNode],
|
||||||
|
file_path: str,
|
||||||
|
):
|
||||||
|
|
||||||
|
with reporter.step("Put object to container"):
|
||||||
|
container_node = random.choice(container_nodes)
|
||||||
|
oid = put_object(
|
||||||
|
default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint()
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get range hash from container node endpoint"):
|
||||||
|
get_range_hash(
|
||||||
|
default_wallet,
|
||||||
|
common_container,
|
||||||
|
oid,
|
||||||
|
"0:10",
|
||||||
|
self.shell,
|
||||||
|
container_node.storage_node.get_rpc_endpoint(),
|
||||||
|
)
|
||||||
|
|
||||||
|
with reporter.step("Get range hash from non-container node endpoint"):
|
||||||
|
non_container_node = random.choice(non_container_nodes)
|
||||||
|
get_range_hash(
|
||||||
|
default_wallet,
|
||||||
|
common_container,
|
||||||
|
oid,
|
||||||
|
"0:10",
|
||||||
|
self.shell,
|
||||||
|
non_container_node.storage_node.get_rpc_endpoint(),
|
||||||
|
)
|
||||||
|
|
||||||
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
|
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
|
||||||
for key_to_check, val_to_check in object_header.items():
|
for key_to_check, val_to_check in object_header.items():
|
||||||
assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}"
|
assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}"
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL
|
||||||
|
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS
|
||||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
|
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
@ -16,47 +18,31 @@ class TestS3GateACL:
|
||||||
file_path = generate_file(simple_object_size.value)
|
file_path = generate_file(simple_object_size.value)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
|
|
||||||
with reporter.step("Put object into bucket, Check ACL is empty"):
|
with reporter.step("Put object into bucket"):
|
||||||
s3_client.put_object(bucket, file_path)
|
s3_client.put_object(bucket, file_path)
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
|
||||||
assert obj_acl == [], f"Expected ACL is empty, got {obj_acl}"
|
|
||||||
|
|
||||||
with reporter.step("Put object ACL = public-read"):
|
with reporter.step("Verify private ACL is default"):
|
||||||
s3_client.put_object_acl(bucket, file_name, "public-read")
|
object_grants = s3_client.get_object_acl(bucket, file_name)
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS)
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
|
||||||
|
|
||||||
with reporter.step("Put object ACL = private"):
|
with reporter.step("Verify put object ACL is restricted"):
|
||||||
s3_client.put_object_acl(bucket, file_name, "private")
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
object_grants = s3_client.put_object_acl(bucket, file_name, acl="public-read")
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
|
|
||||||
|
|
||||||
with reporter.step("Put object with grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"):
|
|
||||||
s3_client.put_object_acl(
|
|
||||||
bucket,
|
|
||||||
file_name,
|
|
||||||
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
|
|
||||||
)
|
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
|
||||||
|
|
||||||
@allure.title("Bucket ACL (s3_client={s3_client})")
|
@allure.title("Bucket ACL (s3_client={s3_client})")
|
||||||
@pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
@pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||||
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
|
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
|
||||||
with reporter.step("Create bucket with ACL = public-read-write"):
|
with reporter.step("Create bucket with public-read-write ACL"):
|
||||||
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
|
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
|
||||||
bucket_acl = s3_client.get_bucket_acl(bucket)
|
bucket_grants = s3_client.get_bucket_acl(bucket)
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
|
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS)
|
||||||
|
|
||||||
with reporter.step("Change bucket ACL to private"):
|
with reporter.step("Change bucket ACL to private"):
|
||||||
s3_client.put_bucket_acl(bucket, acl="private")
|
s3_client.put_bucket_acl(bucket, acl="private")
|
||||||
bucket_acl = s3_client.get_bucket_acl(bucket)
|
bucket_grants = s3_client.get_bucket_acl(bucket)
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
|
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS)
|
||||||
|
|
||||||
with reporter.step("Change bucket acl to --grant-write uri=http://acs.amazonaws.com/groups/global/AllUsers"):
|
with reporter.step("Change bucket ACL to public-read"):
|
||||||
s3_client.put_bucket_acl(
|
s3_client.put_bucket_acl(bucket, acl="public-read")
|
||||||
bucket,
|
bucket_grants = s3_client.get_bucket_acl(bucket)
|
||||||
grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
|
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS)
|
||||||
)
|
|
||||||
bucket_acl = s3_client.get_bucket_acl(bucket)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ from datetime import datetime, timedelta
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS, PUBLIC_READ_GRANTS, PUBLIC_READ_WRITE_GRANTS
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper
|
from frostfs_testlib.s3 import S3ClientWrapper
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
@ -17,50 +18,18 @@ class TestS3GateBucket:
|
||||||
|
|
||||||
with reporter.step("Create bucket with ACL private"):
|
with reporter.step("Create bucket with ACL private"):
|
||||||
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private")
|
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private")
|
||||||
bucket_acl = s3_client.get_bucket_acl(bucket)
|
bucket_grants = s3_client.get_bucket_acl(bucket)
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
|
s3_helper.verify_acl_permissions(bucket_grants, PRIVATE_GRANTS)
|
||||||
|
|
||||||
with reporter.step("Create bucket with ACL = public-read"):
|
with reporter.step("Create bucket with ACL public-read"):
|
||||||
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read")
|
read_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read")
|
||||||
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
|
bucket_grants = s3_client.get_bucket_acl(read_bucket)
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
|
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_GRANTS)
|
||||||
|
|
||||||
with reporter.step("Create bucket with ACL public-read-write"):
|
with reporter.step("Create bucket with ACL public-read-write"):
|
||||||
bucket_2 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
|
public_rw_bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
|
||||||
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
|
bucket_grants = s3_client.get_bucket_acl(public_rw_bucket)
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
|
s3_helper.verify_acl_permissions(bucket_grants, PUBLIC_READ_WRITE_GRANTS)
|
||||||
|
|
||||||
with reporter.step("Create bucket with ACL = authenticated-read"):
|
|
||||||
bucket_3 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="authenticated-read")
|
|
||||||
bucket_acl_3 = s3_client.get_bucket_acl(bucket_3)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers")
|
|
||||||
|
|
||||||
@allure.title("Create Bucket with different ACL by grant (s3_client={s3_client})")
|
|
||||||
def test_s3_create_bucket_with_grands(self, s3_client: S3ClientWrapper):
|
|
||||||
|
|
||||||
with reporter.step("Create bucket with --grant-read"):
|
|
||||||
bucket = s3_client.create_bucket(
|
|
||||||
object_lock_enabled_for_bucket=True,
|
|
||||||
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
|
|
||||||
)
|
|
||||||
bucket_acl = s3_client.get_bucket_acl(bucket)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
|
|
||||||
|
|
||||||
with reporter.step("Create bucket with --grant-wtite"):
|
|
||||||
bucket_1 = s3_client.create_bucket(
|
|
||||||
object_lock_enabled_for_bucket=True,
|
|
||||||
grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
|
|
||||||
)
|
|
||||||
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
|
|
||||||
|
|
||||||
with reporter.step("Create bucket with --grant-full-control"):
|
|
||||||
bucket_2 = s3_client.create_bucket(
|
|
||||||
object_lock_enabled_for_bucket=True,
|
|
||||||
grant_full_control="uri=http://acs.amazonaws.com/groups/global/AllUsers",
|
|
||||||
)
|
|
||||||
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
|
|
||||||
|
|
||||||
@allure.title("Create bucket with object lock (s3_client={s3_client})")
|
@allure.title("Create bucket with object lock (s3_client={s3_client})")
|
||||||
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
|
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
|
||||||
|
|
|
@ -6,13 +6,14 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.epoch import tick_epoch
|
from frostfs_testlib.steps.epoch import tick_epoch
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.utils.file_utils import (
|
from frostfs_testlib.utils.file_utils import (
|
||||||
|
TestFile,
|
||||||
generate_file,
|
generate_file,
|
||||||
generate_file_with_content,
|
generate_file_with_content,
|
||||||
get_file_content,
|
get_file_content,
|
||||||
|
@ -140,14 +141,14 @@ class TestS3Gate:
|
||||||
"""
|
"""
|
||||||
Test checks sync directory with AWS CLI utility.
|
Test checks sync directory with AWS CLI utility.
|
||||||
"""
|
"""
|
||||||
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
|
test_file_1 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1"))
|
||||||
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
|
test_file_2 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2"))
|
||||||
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
|
key_to_path = {"test_file_1": test_file_1.path, "test_file_2": test_file_2.path}
|
||||||
|
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, test_file_1)
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_2)
|
generate_file_with_content(simple_object_size.value, test_file_2)
|
||||||
|
|
||||||
s3_client.sync(bucket=bucket, dir_path=os.path.dirname(file_path_1))
|
s3_client.sync(bucket, os.path.dirname(test_file_1))
|
||||||
|
|
||||||
with reporter.step("Check objects are synced"):
|
with reporter.step("Check objects are synced"):
|
||||||
objects = s3_client.list_objects(bucket)
|
objects = s3_client.list_objects(bucket)
|
||||||
|
|
|
@ -17,15 +17,23 @@ PART_SIZE = 5 * 1024 * 1024
|
||||||
class TestS3GateMultipart(ClusterTestBase):
|
class TestS3GateMultipart(ClusterTestBase):
|
||||||
NO_SUCH_UPLOAD = "The upload ID may be invalid, or the upload may have been aborted or completed."
|
NO_SUCH_UPLOAD = "The upload ID may be invalid, or the upload may have been aborted or completed."
|
||||||
|
|
||||||
@allure.title("Object Multipart API (s3_client={s3_client})")
|
@allure.title("Object Multipart API (s3_client={s3_client}, bucket versioning = {versioning_status})")
|
||||||
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED, VersioningStatus.UNDEFINED], indirect=True)
|
||||||
def test_s3_object_multipart(self, s3_client: S3ClientWrapper, bucket: str):
|
def test_s3_object_multipart(
|
||||||
|
self, s3_client: S3ClientWrapper, bucket: str, default_wallet: WalletInfo, versioning_status: str
|
||||||
|
):
|
||||||
parts_count = 5
|
parts_count = 5
|
||||||
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
|
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
|
||||||
object_key = s3_helper.object_key_from_file_path(file_name_large)
|
object_key = s3_helper.object_key_from_file_path(file_name_large)
|
||||||
part_files = split_file(file_name_large, parts_count)
|
part_files = split_file(file_name_large, parts_count)
|
||||||
parts = []
|
parts = []
|
||||||
|
|
||||||
|
with reporter.step(f"Get related container_id for bucket"):
|
||||||
|
for cluster_node in self.cluster.cluster_nodes:
|
||||||
|
container_id = search_container_by_name(bucket, cluster_node)
|
||||||
|
if container_id:
|
||||||
|
break
|
||||||
|
|
||||||
with reporter.step("Upload first part"):
|
with reporter.step("Upload first part"):
|
||||||
upload_id = s3_client.create_multipart_upload(bucket, object_key)
|
upload_id = s3_client.create_multipart_upload(bucket, object_key)
|
||||||
uploads = s3_client.list_multipart_uploads(bucket)
|
uploads = s3_client.list_multipart_uploads(bucket)
|
||||||
|
@ -39,7 +47,11 @@ class TestS3GateMultipart(ClusterTestBase):
|
||||||
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
|
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
|
||||||
parts.append((part_id, etag))
|
parts.append((part_id, etag))
|
||||||
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
||||||
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
response = s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
||||||
|
|
||||||
|
version_id = None
|
||||||
|
if versioning_status == VersioningStatus.ENABLED:
|
||||||
|
version_id = response["VersionId"]
|
||||||
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
|
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
|
||||||
|
|
||||||
with reporter.step("Check upload list is empty"):
|
with reporter.step("Check upload list is empty"):
|
||||||
|
@ -50,6 +62,21 @@ class TestS3GateMultipart(ClusterTestBase):
|
||||||
got_object = s3_client.get_object(bucket, object_key)
|
got_object = s3_client.get_object(bucket, object_key)
|
||||||
assert get_file_hash(got_object) == get_file_hash(file_name_large)
|
assert get_file_hash(got_object) == get_file_hash(file_name_large)
|
||||||
|
|
||||||
|
if version_id:
|
||||||
|
with reporter.step("Delete the object version"):
|
||||||
|
s3_client.delete_object(bucket, object_key, version_id)
|
||||||
|
else:
|
||||||
|
with reporter.step("Delete the object"):
|
||||||
|
s3_client.delete_object(bucket, object_key)
|
||||||
|
|
||||||
|
with reporter.step("List objects in the bucket, expect to be empty"):
|
||||||
|
objects_list = s3_client.list_objects(bucket)
|
||||||
|
assert not objects_list, f"Expected empty bucket, got {objects_list}"
|
||||||
|
|
||||||
|
with reporter.step("List objects in the container via rpc, expect to be empty"):
|
||||||
|
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
|
||||||
|
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
|
||||||
|
|
||||||
@allure.title("Abort Multipart Upload (s3_client={s3_client})")
|
@allure.title("Abort Multipart Upload (s3_client={s3_client})")
|
||||||
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
||||||
def test_s3_abort_multipart(
|
def test_s3_abort_multipart(
|
||||||
|
|
|
@ -9,13 +9,20 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
|
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
|
||||||
from frostfs_testlib.resources.error_patterns import S3_MALFORMED_XML_REQUEST
|
from frostfs_testlib.resources.error_patterns import S3_BUCKET_DOES_NOT_ALLOW_ACL, S3_MALFORMED_XML_REQUEST
|
||||||
|
from frostfs_testlib.resources.s3_acl_grants import PRIVATE_GRANTS
|
||||||
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
from frostfs_testlib.utils.file_utils import concat_files, generate_file, generate_file_with_content, get_file_hash
|
from frostfs_testlib.utils.file_utils import (
|
||||||
|
TestFile,
|
||||||
|
concat_files,
|
||||||
|
generate_file,
|
||||||
|
generate_file_with_content,
|
||||||
|
get_file_hash,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
|
@ -115,20 +122,23 @@ class TestS3GateObject:
|
||||||
|
|
||||||
@allure.title("Copy with acl (s3_client={s3_client})")
|
@allure.title("Copy with acl (s3_client={s3_client})")
|
||||||
def test_s3_copy_acl(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
def test_s3_copy_acl(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||||
version_1_content = "Version 1"
|
file_path = generate_file_with_content(simple_object_size.value)
|
||||||
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
|
file_name = os.path.basename(file_path)
|
||||||
obj_key = os.path.basename(file_name_simple)
|
|
||||||
|
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
|
|
||||||
with reporter.step("Put several versions of object into bucket"):
|
with reporter.step("Put object into bucket"):
|
||||||
s3_client.put_object(bucket, file_name_simple)
|
s3_client.put_object(bucket, file_path)
|
||||||
s3_helper.check_objects_in_bucket(s3_client, bucket, [obj_key])
|
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
|
||||||
|
|
||||||
with reporter.step("Copy object and check acl attribute"):
|
with reporter.step("[NEGATIVE] Copy object with public-read-write ACL"):
|
||||||
copy_obj_path = s3_client.copy_object(bucket, obj_key, acl="public-read-write")
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
obj_acl = s3_client.get_object_acl(bucket, copy_obj_path)
|
copy_path = s3_client.copy_object(bucket, file_name, acl="public-read-write")
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
|
|
||||||
|
with reporter.step("Copy object with private ACL"):
|
||||||
|
copy_path = s3_client.copy_object(bucket, file_name, acl="private")
|
||||||
|
object_grants = s3_client.get_object_acl(bucket, copy_path)
|
||||||
|
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS)
|
||||||
|
|
||||||
@allure.title("Copy object with metadata (s3_client={s3_client})")
|
@allure.title("Copy object with metadata (s3_client={s3_client})")
|
||||||
def test_s3_copy_metadate(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
def test_s3_copy_metadate(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||||
|
@ -233,9 +243,7 @@ class TestS3GateObject:
|
||||||
|
|
||||||
with reporter.step("Put several versions of object into bucket"):
|
with reporter.step("Put several versions of object into bucket"):
|
||||||
version_id_1 = s3_client.put_object(bucket, file_name_simple)
|
version_id_1 = s3_client.put_object(bucket, file_name_simple)
|
||||||
file_name_1 = generate_file_with_content(
|
file_name_1 = generate_file_with_content(simple_object_size.value, file_name_simple, version_2_content)
|
||||||
simple_object_size.value, file_path=file_name_simple, content=version_2_content
|
|
||||||
)
|
|
||||||
version_id_2 = s3_client.put_object(bucket, file_name_1)
|
version_id_2 = s3_client.put_object(bucket, file_name_1)
|
||||||
|
|
||||||
with reporter.step("Check bucket shows all versions"):
|
with reporter.step("Check bucket shows all versions"):
|
||||||
|
@ -254,16 +262,16 @@ class TestS3GateObject:
|
||||||
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
||||||
|
|
||||||
with reporter.step("Delete second version of object"):
|
with reporter.step("Delete second version of object"):
|
||||||
delete_obj = s3_client.delete_object(bucket, obj_key, version_id=version_id_2)
|
delete_obj = s3_client.delete_object(bucket, obj_key, version_id_2)
|
||||||
versions = s3_client.list_objects_versions(bucket)
|
versions = s3_client.list_objects_versions(bucket)
|
||||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
||||||
assert not obj_versions, "Expected object not found"
|
assert not obj_versions, "Expected object not found"
|
||||||
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
||||||
|
|
||||||
with reporter.step("Put new object into bucket"):
|
with reporter.step("Put new object into bucket"):
|
||||||
file_name_simple = generate_file(complex_object_size.value)
|
file_name_complex = generate_file(complex_object_size.value)
|
||||||
obj_key = os.path.basename(file_name_simple)
|
obj_key = os.path.basename(file_name_complex)
|
||||||
s3_client.put_object(bucket, file_name_simple)
|
s3_client.put_object(bucket, file_name_complex)
|
||||||
|
|
||||||
with reporter.step("Delete last object"):
|
with reporter.step("Delete last object"):
|
||||||
delete_obj = s3_client.delete_object(bucket, obj_key)
|
delete_obj = s3_client.delete_object(bucket, obj_key)
|
||||||
|
@ -284,17 +292,11 @@ class TestS3GateObject:
|
||||||
|
|
||||||
with reporter.step("Put several versions of object into bucket"):
|
with reporter.step("Put several versions of object into bucket"):
|
||||||
version_id_1 = s3_client.put_object(bucket, file_name_1)
|
version_id_1 = s3_client.put_object(bucket, file_name_1)
|
||||||
file_name_2 = generate_file_with_content(
|
file_name_2 = generate_file_with_content(simple_object_size.value, file_name_1, version_2_content)
|
||||||
simple_object_size.value, file_path=file_name_1, content=version_2_content
|
|
||||||
)
|
|
||||||
version_id_2 = s3_client.put_object(bucket, file_name_2)
|
version_id_2 = s3_client.put_object(bucket, file_name_2)
|
||||||
file_name_3 = generate_file_with_content(
|
file_name_3 = generate_file_with_content(simple_object_size.value, file_name_1, version_3_content)
|
||||||
simple_object_size.value, file_path=file_name_1, content=version_3_content
|
|
||||||
)
|
|
||||||
version_id_3 = s3_client.put_object(bucket, file_name_3)
|
version_id_3 = s3_client.put_object(bucket, file_name_3)
|
||||||
file_name_4 = generate_file_with_content(
|
file_name_4 = generate_file_with_content(simple_object_size.value, file_name_1, version_4_content)
|
||||||
simple_object_size.value, file_path=file_name_1, content=version_4_content
|
|
||||||
)
|
|
||||||
version_id_4 = s3_client.put_object(bucket, file_name_4)
|
version_id_4 = s3_client.put_object(bucket, file_name_4)
|
||||||
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
|
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
|
||||||
|
|
||||||
|
@ -638,71 +640,38 @@ class TestS3GateObject:
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
second_wallet_public_key: str,
|
second_wallet_public_key: str,
|
||||||
):
|
):
|
||||||
file_path_1 = generate_file(complex_object_size.value)
|
file_path = generate_file(complex_object_size.value)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path_1)
|
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||||
if bucket_versioning == "ENABLED":
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus[bucket_versioning])
|
||||||
status = VersioningStatus.ENABLED
|
|
||||||
elif bucket_versioning == "SUSPENDED":
|
|
||||||
status = VersioningStatus.SUSPENDED
|
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket, status)
|
|
||||||
|
|
||||||
with reporter.step("Put object with acl private"):
|
with reporter.step("Put object with acl private"):
|
||||||
s3_client.put_object(bucket, file_path_1, acl="private")
|
s3_client.put_object(bucket, file_path, acl="private")
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
object_grants = s3_client.get_object_acl(bucket, file_name)
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
|
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS)
|
||||||
object_1 = s3_client.get_object(bucket, file_name)
|
object = s3_client.get_object(bucket, file_name)
|
||||||
assert get_file_hash(file_path_1) == get_file_hash(object_1), "Hashes must be the same"
|
assert get_file_hash(file_path) == get_file_hash(object), "Hashes must be the same"
|
||||||
|
|
||||||
with reporter.step("Put object with acl public-read"):
|
with reporter.step("[NEGATIVE] Put object with acl public-read"):
|
||||||
file_path_2 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, file_path)
|
||||||
s3_client.put_object(bucket, file_path_2, acl="public-read")
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
s3_client.put_object(bucket, file_path, acl="public-read")
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
|
||||||
object_2 = s3_client.get_object(bucket, file_name)
|
|
||||||
assert get_file_hash(file_path_2) == get_file_hash(object_2), "Hashes must be the same"
|
|
||||||
|
|
||||||
with reporter.step("Put object with acl public-read-write"):
|
with reporter.step("[NEGATIVE] Put object with acl public-read-write"):
|
||||||
file_path_3 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, file_path)
|
||||||
s3_client.put_object(bucket, file_path_3, acl="public-read-write")
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
s3_client.put_object(bucket, file_path, acl="public-read-write")
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
|
||||||
object_3 = s3_client.get_object(bucket, file_name)
|
|
||||||
assert get_file_hash(file_path_3) == get_file_hash(object_3), "Hashes must be the same"
|
|
||||||
|
|
||||||
with reporter.step("Put object with acl authenticated-read"):
|
with reporter.step("[NEGATIVE] Put object with --grant-full-control id=mycanonicaluserid"):
|
||||||
file_path_4 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
s3_client.put_object(bucket, file_path_4, acl="authenticated-read")
|
s3_client.put_object(bucket, file_path, grant_full_control=f"id={second_wallet_public_key}")
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
|
||||||
object_4 = s3_client.get_object(bucket, file_name)
|
|
||||||
assert get_file_hash(file_path_4) == get_file_hash(object_4), "Hashes must be the same"
|
|
||||||
|
|
||||||
file_path_5 = generate_file(complex_object_size.value)
|
with reporter.step(
|
||||||
file_name_5 = s3_helper.object_key_from_file_path(file_path_5)
|
"[NEGATIVE] Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
|
||||||
|
):
|
||||||
with reporter.step("Put object with --grant-full-control id=mycanonicaluserid"):
|
with pytest.raises(Exception, match=S3_BUCKET_DOES_NOT_ALLOW_ACL):
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_5)
|
|
||||||
s3_client.put_object(
|
s3_client.put_object(
|
||||||
bucket,
|
bucket, file_path, grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers"
|
||||||
file_path_5,
|
|
||||||
grant_full_control=f"id={second_wallet_public_key}",
|
|
||||||
)
|
)
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name_5)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
|
|
||||||
object_5 = s3_client.get_object(bucket, file_name_5)
|
|
||||||
assert get_file_hash(file_path_5) == get_file_hash(object_5), "Hashes must be the same"
|
|
||||||
|
|
||||||
with reporter.step("Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"):
|
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_5)
|
|
||||||
s3_client.put_object(
|
|
||||||
bucket,
|
|
||||||
file_path_5,
|
|
||||||
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
|
|
||||||
)
|
|
||||||
obj_acl = s3_client.get_object_acl(bucket, file_name_5)
|
|
||||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
|
||||||
object_6 = s3_client.get_object(bucket, file_name_5)
|
|
||||||
assert get_file_hash(file_path_5) == get_file_hash(object_6), "Hashes must be the same"
|
|
||||||
|
|
||||||
@allure.title("Put object with lock-mode (s3_client={s3_client})")
|
@allure.title("Put object with lock-mode (s3_client={s3_client})")
|
||||||
def test_s3_put_object_lock_mode(
|
def test_s3_put_object_lock_mode(
|
||||||
|
@ -787,29 +756,19 @@ class TestS3GateObject:
|
||||||
bucket: str,
|
bucket: str,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
):
|
):
|
||||||
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
|
test_file_1 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1"))
|
||||||
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
|
test_file_2 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2"))
|
||||||
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
|
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
|
||||||
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
|
key_to_path = {"test_file_1": test_file_1.path, "test_file_2": test_file_2.path}
|
||||||
|
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, test_file_1)
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_2)
|
generate_file_with_content(simple_object_size.value, test_file_2)
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
# TODO: return ACL, when https://github.com/nspcc-dev/neofs-s3-gw/issues/685 will be closed
|
|
||||||
if sync_type == "sync":
|
if sync_type == "sync":
|
||||||
s3_client.sync(
|
s3_client.sync(bucket, os.path.dirname(test_file_1), metadata=object_metadata)
|
||||||
bucket=bucket,
|
|
||||||
dir_path=os.path.dirname(file_path_1),
|
|
||||||
# acl="public-read-write",
|
|
||||||
metadata=object_metadata,
|
|
||||||
)
|
|
||||||
elif sync_type == "cp":
|
elif sync_type == "cp":
|
||||||
s3_client.cp(
|
s3_client.cp(bucket, os.path.dirname(test_file_1), metadata=object_metadata)
|
||||||
bucket=bucket,
|
|
||||||
dir_path=os.path.dirname(file_path_1),
|
|
||||||
# acl="public-read-write",
|
|
||||||
metadata=object_metadata,
|
|
||||||
)
|
|
||||||
|
|
||||||
with reporter.step("Check objects are synced"):
|
with reporter.step("Check objects are synced"):
|
||||||
objects = s3_client.list_objects(bucket)
|
objects = s3_client.list_objects(bucket)
|
||||||
|
@ -823,9 +782,8 @@ class TestS3GateObject:
|
||||||
), "Expected hashes are the same"
|
), "Expected hashes are the same"
|
||||||
obj_head = s3_client.head_object(bucket, obj_key)
|
obj_head = s3_client.head_object(bucket, obj_key)
|
||||||
assert obj_head.get("Metadata") == object_metadata, f"Metadata of object is {object_metadata}"
|
assert obj_head.get("Metadata") == object_metadata, f"Metadata of object is {object_metadata}"
|
||||||
# Uncomment after https://github.com/nspcc-dev/neofs-s3-gw/issues/685 is solved
|
object_grants = s3_client.get_object_acl(bucket, obj_key)
|
||||||
# obj_acl = s3_client.get_object_acl(bucket, obj_key)
|
s3_helper.verify_acl_permissions(object_grants, PRIVATE_GRANTS)
|
||||||
# s3_helper.assert_s3_acl(acl_grants = obj_acl, permitted_users = "AllUsers")
|
|
||||||
|
|
||||||
@allure.title("Put 10 nested level object (s3_client={s3_client})")
|
@allure.title("Put 10 nested level object (s3_client={s3_client})")
|
||||||
def test_s3_put_10_folder(
|
def test_s3_put_10_folder(
|
||||||
|
@ -836,7 +794,7 @@ class TestS3GateObject:
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
):
|
):
|
||||||
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
|
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
|
||||||
file_path_1 = os.path.join(temp_directory, path, "test_file_1")
|
file_path_1 = TestFile(os.path.join(temp_directory, path, "test_file_1"))
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path_1)
|
file_name = s3_helper.object_key_from_file_path(file_path_1)
|
||||||
objects_list = s3_client.list_objects(bucket)
|
objects_list = s3_client.list_objects(bucket)
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
|
from botocore.exceptions import ClientError
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.steps.cli.container import search_container_by_name
|
from frostfs_testlib.steps.cli.container import search_container_by_name
|
||||||
|
@ -87,23 +89,24 @@ class TestS3GatePolicy(ClusterTestBase):
|
||||||
|
|
||||||
@allure.title("Bucket policy (s3_client={s3_client})")
|
@allure.title("Bucket policy (s3_client={s3_client})")
|
||||||
def test_s3_bucket_policy(self, s3_client: S3ClientWrapper):
|
def test_s3_bucket_policy(self, s3_client: S3ClientWrapper):
|
||||||
with reporter.step("Create bucket with default policy"):
|
with reporter.step("Create bucket"):
|
||||||
bucket = s3_client.create_bucket()
|
bucket = s3_client.create_bucket()
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
|
|
||||||
with reporter.step("GetBucketPolicy"):
|
with reporter.step("GetBucketPolicy"):
|
||||||
|
with pytest.raises((RuntimeError, ClientError)):
|
||||||
s3_client.get_bucket_policy(bucket)
|
s3_client.get_bucket_policy(bucket)
|
||||||
|
|
||||||
with reporter.step("Put new policy"):
|
with reporter.step("Put new policy"):
|
||||||
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
|
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
|
||||||
custom_policy = {
|
custom_policy = {
|
||||||
"Version": "2008-10-17",
|
"Version": "2012-10-17",
|
||||||
"Id": "aaaa-bbbb-cccc-dddd",
|
"Id": "aaaa-bbbb-cccc-dddd",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
{
|
{
|
||||||
"Sid": "AddPerm",
|
"Sid": "AddPerm",
|
||||||
"Effect": "Allow",
|
"Effect": "Allow",
|
||||||
"Principal": {"AWS": "*"},
|
"Principal": "*",
|
||||||
"Action": ["s3:GetObject"],
|
"Action": ["s3:GetObject"],
|
||||||
"Resource": [f"arn:aws:s3:::{bucket}/*"],
|
"Resource": [f"arn:aws:s3:::{bucket}/*"],
|
||||||
}
|
}
|
||||||
|
@ -112,8 +115,16 @@ class TestS3GatePolicy(ClusterTestBase):
|
||||||
|
|
||||||
s3_client.put_bucket_policy(bucket, custom_policy)
|
s3_client.put_bucket_policy(bucket, custom_policy)
|
||||||
with reporter.step("GetBucketPolicy"):
|
with reporter.step("GetBucketPolicy"):
|
||||||
policy_1 = s3_client.get_bucket_policy(bucket)
|
returned_policy = json.loads(s3_client.get_bucket_policy(bucket))
|
||||||
print(policy_1)
|
|
||||||
|
assert returned_policy == custom_policy, "Wrong policy was received"
|
||||||
|
|
||||||
|
with reporter.step("Delete the policy"):
|
||||||
|
s3_client.delete_bucket_policy(bucket)
|
||||||
|
|
||||||
|
with reporter.step("GetBucketPolicy"):
|
||||||
|
with pytest.raises((RuntimeError, ClientError)):
|
||||||
|
s3_client.get_bucket_policy(bucket)
|
||||||
|
|
||||||
@allure.title("Bucket CORS (s3_client={s3_client})")
|
@allure.title("Bucket CORS (s3_client={s3_client})")
|
||||||
def test_s3_cors(self, s3_client: S3ClientWrapper):
|
def test_s3_cors(self, s3_client: S3ClientWrapper):
|
||||||
|
|
|
@ -1,77 +1,42 @@
|
||||||
import logging
|
import logging
|
||||||
import os
|
from re import fullmatch
|
||||||
from http import HTTPStatus
|
|
||||||
from re import fullmatch, match
|
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
import requests
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.hosting import Hosting
|
from frostfs_testlib.hosting import Hosting
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
|
||||||
from frostfs_testlib.utils.env_utils import read_env_properties, save_env_properties
|
|
||||||
from frostfs_testlib.utils.version_utils import get_remote_binaries_versions
|
from frostfs_testlib.utils.version_utils import get_remote_binaries_versions
|
||||||
from pytest import FixtureRequest
|
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
VERSION_REGEX = r"^([a-zA-Z0-9]*/)?\d+\.\d+\.\d+(-.*)?(?<!dirty)"
|
||||||
|
VERSION_ERROR_MSG = "{name} [{host}]: Actual version doesn't conform to format '0.0.0-000-aaaaaaa': {version}"
|
||||||
|
|
||||||
|
|
||||||
|
def _check_version_format(version):
|
||||||
|
return fullmatch(VERSION_REGEX, version)
|
||||||
|
|
||||||
|
|
||||||
@allure.title("Check binaries versions")
|
@allure.title("Check binaries versions")
|
||||||
@pytest.mark.check_binaries
|
@pytest.mark.check_binaries
|
||||||
def test_binaries_versions(request: FixtureRequest, hosting: Hosting):
|
def test_binaries_versions(hosting: Hosting):
|
||||||
"""
|
"""
|
||||||
Compare binaries versions from external source (url) and deployed on servers.
|
Compare binaries versions from external source (url) and deployed on servers.
|
||||||
"""
|
"""
|
||||||
with reporter.step("Get binaries versions from servers"):
|
with reporter.step("Get binaries versions from servers"):
|
||||||
got_versions, exсeptions_remote_binaries_versions = get_remote_binaries_versions(hosting)
|
versions_by_host = get_remote_binaries_versions(hosting)
|
||||||
|
|
||||||
environment_dir = request.config.getoption("--alluredir") or ASSETS_DIR
|
|
||||||
env_file = os.path.join(environment_dir, "environment.properties")
|
|
||||||
env_properties = read_env_properties(env_file)
|
|
||||||
|
|
||||||
# compare versions from servers and file
|
|
||||||
exсeptions = []
|
exсeptions = []
|
||||||
additional_env_properties = {}
|
|
||||||
|
|
||||||
for binary_name, binary in got_versions.items():
|
last_host, versions_on_last_host = versions_by_host.popitem()
|
||||||
version = binary["version"]
|
for name, version in versions_on_last_host.items():
|
||||||
requires_check = binary["check"]
|
for host, versions_on_host in versions_by_host.items():
|
||||||
if requires_check and not fullmatch(r"^\d+\.\d+\.\d+(-.*)?(?<!dirty)", version):
|
if versions_on_host[name] != version:
|
||||||
exсeptions.append(f"{binary_name}: Actual version doesn't conform to format '0.0.0-000-aaaaaaa': {version}")
|
exсeptions.append(f"Binary of {name} has inconsistent version {versions_on_host[name]} on host {host}")
|
||||||
|
if not _check_version_format(versions_on_host[name]):
|
||||||
|
exсeptions.append(VERSION_ERROR_MSG.format(name=name, host=host, version=version))
|
||||||
|
|
||||||
# If some binary was not listed in the env properties file, let's add it
|
if not _check_version_format(version):
|
||||||
# so that we have full information about versions in allure report
|
exсeptions.append(VERSION_ERROR_MSG.format(name=name, host=last_host, version=version))
|
||||||
if env_properties and binary_name not in env_properties:
|
|
||||||
additional_env_properties[binary_name] = version
|
|
||||||
|
|
||||||
if env_properties and additional_env_properties:
|
assert not exсeptions, "\n".join(exсeptions)
|
||||||
save_env_properties(env_file, additional_env_properties)
|
|
||||||
|
|
||||||
exсeptions.extend(exсeptions_remote_binaries_versions)
|
|
||||||
|
|
||||||
# create clear beautiful error with aggregation info
|
|
||||||
if exсeptions:
|
|
||||||
msg = "\n".join(exсeptions)
|
|
||||||
raise AssertionError(f"Found binaries with unexpected versions:\n{msg}")
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Download versions info from {url}")
|
|
||||||
def download_versions_info(url: str) -> dict:
|
|
||||||
binaries_to_version = {}
|
|
||||||
|
|
||||||
response = requests.get(url)
|
|
||||||
|
|
||||||
assert response.status_code == HTTPStatus.OK, f"Got {response.status_code} code. Content {response.json()}"
|
|
||||||
|
|
||||||
content = response.text
|
|
||||||
assert content, f"Expected file with content, got {response}"
|
|
||||||
|
|
||||||
for line in content.split("\n"):
|
|
||||||
m = match("(.*)=(.*)", line)
|
|
||||||
if not m:
|
|
||||||
logger.warning(f"Could not get binary/version from {line}")
|
|
||||||
continue
|
|
||||||
bin_name, bin_version = m.group(1), m.group(2)
|
|
||||||
binaries_to_version[bin_name] = bin_version
|
|
||||||
|
|
||||||
return binaries_to_version
|
|
||||||
|
|
|
@ -144,5 +144,5 @@ class TestControlShard(ClusterTestBase):
|
||||||
for shard in ShardsWatcher(node).get_shards():
|
for shard in ShardsWatcher(node).get_shards():
|
||||||
if shard["blobstor"][1]["path"] in object_path:
|
if shard["blobstor"][1]["path"] in object_path:
|
||||||
with reporter.step(f"Shard - {shard['shard_id']} to {node.host_ip}, mode - {shard['mode']}"):
|
with reporter.step(f"Shard - {shard['shard_id']} to {node.host_ip}, mode - {shard['mode']}"):
|
||||||
assert shard["mode"] == "degraded-read-only"
|
assert shard["mode"] == "read-only"
|
||||||
break
|
break
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import time
|
import time
|
||||||
from datetime import datetime
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -20,24 +20,72 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestLogs:
|
|
||||||
@allure.title("Check logs from frostfs-testcases with marks '{request.config.option.markexpr}'")
|
|
||||||
@pytest.mark.logs_after_session
|
@pytest.mark.logs_after_session
|
||||||
@pytest.mark.no_healthcheck
|
class TestLogs:
|
||||||
def test_logs_after_session(
|
@pytest.mark.order(1000)
|
||||||
|
@allure.title("Check logs from frostfs-testcases with marks '{request.config.option.markexpr}' - search errors")
|
||||||
|
def test_logs_search_errors(
|
||||||
self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest
|
self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest
|
||||||
):
|
):
|
||||||
"""
|
end_time = datetime.now(timezone.utc)
|
||||||
This test automatically added to any test run to check logs from cluster for critical errors.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
end_time = datetime.utcnow()
|
|
||||||
logs_dir = os.path.join(temp_directory, "logs")
|
logs_dir = os.path.join(temp_directory, "logs")
|
||||||
|
if not os.path.exists(logs_dir):
|
||||||
os.makedirs(logs_dir)
|
os.makedirs(logs_dir)
|
||||||
# Using \b here because 'oom' and 'panic' can sometimes be found in OID or CID
|
|
||||||
issues_regex = r"\bpanic\b|\boom\b|too many|insufficient funds|insufficient amount of gas|wallet passwd|secret \bkey\b|access \bkey\b|cannot assign requested address"
|
issues_regex = r"\bpanic\b|\boom\b|too many|insufficient funds|insufficient amount of gas|cannot assign requested address|\bunable to process\b"
|
||||||
exclude_filter = r"too many requests"
|
exclude_filter = r"too many requests"
|
||||||
|
log_level_priority = "3" # will include 0-3 priority logs (0: emergency 1: alerts 2: critical 3: errors)
|
||||||
|
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
futures = parallel(
|
||||||
|
self._collect_logs_on_host,
|
||||||
|
cluster.hosts,
|
||||||
|
logs_dir,
|
||||||
|
issues_regex,
|
||||||
|
session_start_time,
|
||||||
|
end_time,
|
||||||
|
exclude_filter,
|
||||||
|
priority=log_level_priority,
|
||||||
|
)
|
||||||
|
|
||||||
|
hosts_with_problems = [
|
||||||
|
future.result() for future in futures if not future.exception() and future.result() is not None
|
||||||
|
]
|
||||||
|
if hosts_with_problems:
|
||||||
|
self._attach_logs(logs_dir)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
not hosts_with_problems
|
||||||
|
), f"The following hosts contains critical errors in system logs: {', '.join(hosts_with_problems)}"
|
||||||
|
|
||||||
|
@pytest.mark.order(1001)
|
||||||
|
@allure.title(
|
||||||
|
"Check logs from frostfs-testcases with marks '{request.config.option.markexpr}' - identify sensitive data"
|
||||||
|
)
|
||||||
|
def test_logs_identify_sensitive_data(
|
||||||
|
self, temp_directory: str, cluster: Cluster, session_start_time: datetime, request: pytest.FixtureRequest
|
||||||
|
):
|
||||||
|
end_time = datetime.now(timezone.utc)
|
||||||
|
logs_dir = os.path.join(temp_directory, "logs")
|
||||||
|
if not os.path.exists(logs_dir):
|
||||||
|
os.makedirs(logs_dir)
|
||||||
|
|
||||||
|
_regex = {
|
||||||
|
"authorization_basic": r"basic [a-zA-Z0-9=:_\+\/-]{16,100}",
|
||||||
|
"authorization_bearer": r"bearer [a-zA-Z0-9_\-\.=:_\+\/]{16,100}",
|
||||||
|
"access_token": r"\"access_token\":\"[0-9a-z]{16}\$[0-9a-f]{32}\"",
|
||||||
|
"api_token": r"\"api_token\":\"(xox[a-zA-Z]-[a-zA-Z0-9-]+)\"",
|
||||||
|
"yadro_access_token": r"[a-zA-Z0-9_-]*:[a-zA-Z0-9_\-]+@yadro\.com*",
|
||||||
|
"SSH_privKey": r"([-]+BEGIN [^\s]+ PRIVATE KEY[-]+[\s]*[^-]*[-]+END [^\s]+ PRIVATE KEY[-]+)",
|
||||||
|
"possible_Creds": r"(?i)("
|
||||||
|
r"password\s*[`=:]+\s*[^\s]+|"
|
||||||
|
r"password is\s*[`=:]+\s*[^\s]+|"
|
||||||
|
r"passwd\s*[`=:]+\s*[^\s]+)",
|
||||||
|
}
|
||||||
|
|
||||||
|
issues_regex = "|".join(_regex.values())
|
||||||
|
exclude_filter = r"COMMAND=\|--\sBoot\s"
|
||||||
|
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
||||||
|
@ -59,13 +107,22 @@ class TestLogs:
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
not hosts_with_problems
|
not hosts_with_problems
|
||||||
), f"The following hosts contains contain critical errors in system logs: {', '.join(hosts_with_problems)}"
|
), f"The following hosts contains sensitive data in system logs: {', '.join(hosts_with_problems)}"
|
||||||
|
|
||||||
def _collect_logs_on_host(
|
def _collect_logs_on_host(
|
||||||
self, host: Host, logs_dir: str, regex: str, since: datetime, until: datetime, exclude_filter: str
|
self,
|
||||||
|
host: Host,
|
||||||
|
logs_dir: str,
|
||||||
|
regex: str,
|
||||||
|
since: datetime,
|
||||||
|
until: datetime,
|
||||||
|
exclude_filter: str,
|
||||||
|
priority: str = None,
|
||||||
):
|
):
|
||||||
with reporter.step(f"Get logs from {host.config.address}"):
|
with reporter.step(f"Get logs from {host.config.address}"):
|
||||||
logs = host.get_filtered_logs(filter_regex=regex, since=since, until=until, exclude_filter=exclude_filter)
|
logs = host.get_filtered_logs(
|
||||||
|
filter_regex=regex, since=since, until=until, exclude_filter=exclude_filter, priority=priority
|
||||||
|
)
|
||||||
|
|
||||||
if not logs:
|
if not logs:
|
||||||
return None
|
return None
|
||||||
|
|
Loading…
Reference in a new issue