[#161] Improve logging
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
parent
3b071f02f7
commit
873d6e3d14
40 changed files with 980 additions and 950 deletions
|
@ -10,10 +10,10 @@ import allure
|
|||
import pytest
|
||||
import yaml
|
||||
from dateutil import parser
|
||||
from frostfs_testlib import plugins
|
||||
from frostfs_testlib import plugins, reporter
|
||||
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||
from frostfs_testlib.hosting import Hosting
|
||||
from frostfs_testlib.reporter import AllureHandler, get_reporter
|
||||
from frostfs_testlib.reporter import AllureHandler, StepsLogger
|
||||
from frostfs_testlib.resources.common import (
|
||||
ASSETS_DIR,
|
||||
COMPLEX_OBJECT_CHUNKS_COUNT,
|
||||
|
@ -48,6 +48,9 @@ def pytest_configure(config: pytest.Config):
|
|||
config.option.markexpr = f"logs_after_session or ({markers})"
|
||||
|
||||
|
||||
number_key = pytest.StashKey[str]()
|
||||
start_time = pytest.StashKey[int]()
|
||||
test_outcome = pytest.StashKey[str]()
|
||||
# pytest hook. Do not rename
|
||||
def pytest_collection_modifyitems(items: list[pytest.Item]):
|
||||
# Make network tests last based on @pytest.mark.node_mgmt and logs_test to be latest
|
||||
|
@ -60,6 +63,35 @@ def pytest_collection_modifyitems(items: list[pytest.Item]):
|
|||
items.sort(key=lambda item: priority(item))
|
||||
|
||||
|
||||
# pytest hook. Do not rename
|
||||
def pytest_collection_finish(session: pytest.Session):
|
||||
items_total = len(session.items)
|
||||
for number, item in enumerate(session.items, 1):
|
||||
item.stash[number_key] = f"[{number}/{items_total}]"
|
||||
item.stash[test_outcome] = ""
|
||||
|
||||
|
||||
# pytest hook. Do not rename
|
||||
def pytest_runtest_setup(item: pytest.Item):
|
||||
item.stash[start_time] = int(datetime.now().timestamp())
|
||||
logger.info(f"STARTED {item.stash[number_key]}: {item.name}")
|
||||
|
||||
|
||||
# pytest hook. Do not rename
|
||||
def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo):
|
||||
if call.excinfo is not None:
|
||||
item.stash[test_outcome] += f"FAILED on {call.when}; "
|
||||
|
||||
if call.when == "teardown":
|
||||
duration = int(datetime.now().timestamp()) - item.stash[start_time]
|
||||
if not item.stash[test_outcome]:
|
||||
outcome = "PASSED "
|
||||
else:
|
||||
outcome = item.stash[test_outcome]
|
||||
logger.info(f"ENDED {item.stash[number_key]}: {item.name}: {outcome}(duration={duration}s)")
|
||||
|
||||
|
||||
# pytest hook. Do not rename
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if (
|
||||
TEST_CYCLES_COUNT <= 1
|
||||
|
@ -78,7 +110,9 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
|
|||
|
||||
@pytest.fixture(scope="session")
|
||||
def configure_testlib():
|
||||
get_reporter().register_handler(AllureHandler())
|
||||
reporter.get_reporter().register_handler(AllureHandler())
|
||||
reporter.get_reporter().register_handler(StepsLogger())
|
||||
|
||||
logging.getLogger("paramiko").setLevel(logging.INFO)
|
||||
|
||||
# Register Services for cluster
|
||||
|
@ -186,7 +220,7 @@ def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Clust
|
|||
yield cluster
|
||||
|
||||
|
||||
@allure.step("[Class]: Provide S3 policy")
|
||||
@reporter.step("[Class]: Provide S3 policy")
|
||||
@pytest.fixture(scope="class")
|
||||
def s3_policy(request: pytest.FixtureRequest):
|
||||
policy = None
|
||||
|
@ -212,7 +246,7 @@ def cluster_state_controller(client_shell: Shell, cluster: Cluster, healthcheck:
|
|||
yield controller
|
||||
|
||||
|
||||
@allure.step("[Class]: Create S3 client")
|
||||
@reporter.step("[Class]: Create S3 client")
|
||||
@pytest.fixture(
|
||||
scope="class",
|
||||
params=[
|
||||
|
@ -254,7 +288,7 @@ def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
|
|||
return VersioningStatus.UNDEFINED
|
||||
|
||||
|
||||
@allure.step("Create/delete bucket")
|
||||
@reporter.step("Create/delete bucket")
|
||||
@pytest.fixture
|
||||
def bucket(s3_client: S3ClientWrapper, versioning_status: VersioningStatus, request: pytest.FixtureRequest):
|
||||
|
||||
|
@ -269,7 +303,7 @@ def bucket(s3_client: S3ClientWrapper, versioning_status: VersioningStatus, requ
|
|||
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
||||
|
||||
|
||||
@allure.step("Create two buckets")
|
||||
@reporter.step("Create two buckets")
|
||||
@pytest.fixture
|
||||
def two_buckets(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
|
||||
bucket_1 = s3_client.create_bucket()
|
||||
|
@ -281,13 +315,16 @@ def two_buckets(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
|
|||
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
||||
|
||||
|
||||
@allure.step("[Autouse/Session] Check binary versions")
|
||||
@reporter.step("[Autouse/Session] Check binary versions")
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
|
||||
local_versions = version_utils.get_local_binaries_versions(client_shell)
|
||||
remote_versions = version_utils.get_remote_binaries_versions(hosting)
|
||||
|
||||
all_versions = {**local_versions, **remote_versions}
|
||||
all_versions = {
|
||||
**local_versions,
|
||||
**{binary_name: binary["version"] for binary_name, binary in remote_versions.items()},
|
||||
}
|
||||
|
||||
environment_dir = request.config.getoption("--alluredir")
|
||||
if not environment_dir:
|
||||
|
@ -297,23 +334,23 @@ def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest
|
|||
env_utils.save_env_properties(file_path, all_versions)
|
||||
|
||||
|
||||
@allure.step("Prepare tmp directory")
|
||||
@reporter.step("Prepare tmp directory")
|
||||
@pytest.fixture(scope="session")
|
||||
def temp_directory():
|
||||
with allure.step("Prepare tmp directory"):
|
||||
def temp_directory(configure_testlib):
|
||||
with reporter.step("Prepare tmp directory"):
|
||||
full_path = os.path.join(os.getcwd(), ASSETS_DIR)
|
||||
shutil.rmtree(full_path, ignore_errors=True)
|
||||
os.mkdir(full_path)
|
||||
|
||||
yield full_path
|
||||
|
||||
with allure.step("Remove tmp directory"):
|
||||
with reporter.step("Remove tmp directory"):
|
||||
shutil.rmtree(full_path)
|
||||
|
||||
|
||||
@allure.step("[Autouse/Session] Test session start time")
|
||||
@reporter.step("[Autouse/Session] Test session start time")
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def session_start_time():
|
||||
def session_start_time(configure_testlib):
|
||||
start_time = datetime.utcnow()
|
||||
return start_time
|
||||
|
||||
|
@ -321,23 +358,22 @@ def session_start_time():
|
|||
@allure.title("[Autouse/Session] After deploy healthcheck")
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def after_deploy_healthcheck(cluster: Cluster):
|
||||
with allure.step("Wait for cluster readiness after deploy"):
|
||||
with reporter.step("Wait for cluster readiness after deploy"):
|
||||
parallel(readiness_on_node, cluster.cluster_nodes)
|
||||
|
||||
|
||||
SERVICE_ACTIVE_TIME = 20
|
||||
|
||||
|
||||
@wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60)
|
||||
@allure.step("Check readiness on node {cluster_node}")
|
||||
@wait_for_success(60 * SERVICE_ACTIVE_TIME * 3, 60, title="Wait for {cluster_node} readiness")
|
||||
def readiness_on_node(cluster_node: ClusterNode):
|
||||
# TODO: Move to healtcheck classes
|
||||
svc_name = cluster_node.service(StorageNode).get_service_systemctl_name()
|
||||
with allure.step(f"Check service {svc_name} is active"):
|
||||
with reporter.step(f"Check service {svc_name} is active"):
|
||||
result = cluster_node.host.get_shell().exec(f"systemctl is-active {svc_name}")
|
||||
assert "active" == result.stdout.strip(), f"Service {svc_name} should be in active state"
|
||||
|
||||
with allure.step(f"Check service {svc_name} is active more than {SERVICE_ACTIVE_TIME} minutes"):
|
||||
with reporter.step(f"Check service {svc_name} is active more than {SERVICE_ACTIVE_TIME} minutes"):
|
||||
result = cluster_node.host.get_shell().exec(
|
||||
f"systemctl show {svc_name} --property ActiveEnterTimestamp | cut -d '=' -f 2"
|
||||
)
|
||||
|
@ -362,15 +398,15 @@ def run_health_check(healthcheck: Healthcheck, cluster: Cluster, request: pytest
|
|||
parallel(healthcheck.storage_healthcheck, cluster.cluster_nodes)
|
||||
|
||||
|
||||
@allure.step("Prepare wallet and deposit")
|
||||
@reporter.step("Prepare wallet and deposit")
|
||||
@pytest.fixture(scope="session")
|
||||
def default_wallet(wallet_factory: WalletFactory) -> str:
|
||||
wallet = wallet_factory.create_wallet(password=DEFAULT_WALLET_PASS)
|
||||
allure.attach.file(wallet.path, os.path.basename(wallet.path), allure.attachment_type.JSON)
|
||||
reporter.attach(wallet.path, os.path.basename(wallet.path))
|
||||
return wallet.path
|
||||
|
||||
|
||||
@allure.step("[Class]: Container placement policy for keys")
|
||||
@reporter.step("[Class]: Container placement policy for keys")
|
||||
@pytest.fixture(scope="class")
|
||||
def auth_container_placement_policy(cluster: Cluster, request: pytest.FixtureRequest):
|
||||
placeholders = {
|
||||
|
@ -389,5 +425,5 @@ def auth_container_placement_policy(cluster: Cluster, request: pytest.FixtureReq
|
|||
@allure.title("Select random node for testing")
|
||||
def node_under_test(cluster: Cluster) -> ClusterNode:
|
||||
selected_node = random.choice(cluster.cluster_nodes)
|
||||
allure.attach(f"{selected_node}", "Selected node", allure.attachment_type.TEXT)
|
||||
reporter.attach(f"{selected_node}", "Selected node")
|
||||
return selected_node
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue