Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
146 changes: 144 additions & 2 deletions scripts/tests/twister/test_harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import pytest
import re
import logging as logger
import textwrap

# ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
from conftest import ZEPHYR_BASE
Expand All @@ -30,7 +31,7 @@
Test,
)
from twisterlib.statuses import TwisterStatus
from twisterlib.testsuite import TestSuite
from twisterlib.testsuite import TestSuite, TestCase
from twisterlib.testinstance import TestInstance

GTEST_START_STATE = " RUN "
Expand Down Expand Up @@ -608,7 +609,7 @@ def test_pytest__generate_parameters_for_hardware(tmp_path, pty_value, hardware_
assert "--twister-fixture=fixture2" in command


def test__update_command_with_env_dependencies():
def test_pytest__update_command_with_env_dependencies():
cmd = ["cmd"]
pytest_test = Pytest()
mock.patch.object(Pytest, "PYTEST_PLUGIN_INSTALLED", False)
Expand Down Expand Up @@ -662,6 +663,147 @@ def test_pytest_run(tmp_path, caplog):
assert exp_out in caplog.text



class FakeTestInstance:

def __init__(self):
self.testcases = []
self.reason = ""

def add_testcase(self, name):
tc = TestCase(name)
self.testcases.append(tc)
return tc


def get_test_case_by_name(testcases, name):
for tc in testcases:
if tc.name == name:
return tc


@pytest.fixture
def pytest_harness():
py_harness = Pytest()
py_harness.id = "tests.test_foobar"
py_harness.instance = FakeTestInstance()
return py_harness


EXAMPLE_TESTS = textwrap.dedent("""\
import pytest

@pytest.fixture
def raise_exception():
raise Exception("Something went wrong")

def test_pass():
assert 1

def test_fail():
assert 0, "Not True"

def test_error(raise_exception):
assert 1

@pytest.mark.skip("WIP")
def test_skip():
assert 1
""")


def test_if_pytest_harness_parses_report_with_all_kinds_of_statuses(tmp_path, testdir, pytest_harness):
# Create JunitXml report
report_xml = tmp_path / "results.xml"
testdir.makepyfile(EXAMPLE_TESTS)
testdir.runpytest("--junitxml", str(report_xml))

pytest_harness._parse_report_file(report_xml)

assert pytest_harness.status == "failed"
assert pytest_harness.instance.reason == "1/4 pytest scenario(s) failed"
assert len(pytest_harness.instance.testcases) == 4
assert {tc.name for tc in pytest_harness.instance.testcases} == {
"tests.test_foobar.test_pass",
"tests.test_foobar.test_fail",
"tests.test_foobar.test_error",
"tests.test_foobar.test_skip"
}

passed_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_pass")
assert passed_tc.status == "passed"
assert passed_tc.reason is None
assert passed_tc.output == ""
assert isinstance(passed_tc.duration, float)

failed_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_fail")
assert failed_tc.status == "failed"
assert failed_tc.reason == "AssertionError: Not True\nassert 0"
assert failed_tc.output != ""
assert isinstance(failed_tc.duration, float)

error_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_error")
assert error_tc.status == "error"
assert error_tc.reason == 'failed on setup with "Exception: Something went wrong"'
assert error_tc.output != ""
assert isinstance(error_tc.duration, float)

skipped_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_skip")
assert skipped_tc.status == "skipped"
assert skipped_tc.reason == 'WIP'
assert skipped_tc.output != ""
assert isinstance(skipped_tc.duration, float)


def test_if_pytest_harness_parses_report_with_passed_and_skipped_tests(tmp_path, testdir, pytest_harness):
# Create JunitXml report
report_xml = tmp_path / "results.xml"
testdir.makepyfile(EXAMPLE_TESTS)
testdir.runpytest("-k", "(test_pass or test_skip)", "--junitxml", str(report_xml))

pytest_harness._parse_report_file(report_xml)

assert pytest_harness.status == "passed"
assert pytest_harness.instance.reason == ""
assert len(pytest_harness.instance.testcases) == 2
assert {tc.name for tc in pytest_harness.instance.testcases} == {
"tests.test_foobar.test_pass",
"tests.test_foobar.test_skip"
}


def test_if_pytest_harness_parses_report_with_passed_and_error_tests(tmp_path, testdir, pytest_harness):
# Create JunitXml report
report_xml = tmp_path / "results.xml"
testdir.makepyfile(EXAMPLE_TESTS)
testdir.runpytest("-k", "(test_pass or test_error)", "--junitxml", str(report_xml))

pytest_harness._parse_report_file(report_xml)

assert pytest_harness.status == "error"
assert pytest_harness.instance.reason == "Error during pytest execution"
assert len(pytest_harness.instance.testcases) == 2
assert {tc.name for tc in pytest_harness.instance.testcases} == {
"tests.test_foobar.test_pass",
"tests.test_foobar.test_error"
}

def test_if_pytest_harness_parses_report_with_skipped_tests_only(tmp_path, testdir, pytest_harness):
# Create JunitXml report
report_xml = tmp_path / "results.xml"
testdir.makepyfile(EXAMPLE_TESTS)
testdir.runpytest("-k", "test_skip", "--junitxml", str(report_xml))

pytest_harness._parse_report_file(report_xml)

assert pytest_harness.status == "skipped"
assert pytest_harness.instance.reason == ""
assert len(pytest_harness.instance.testcases) == 1
assert {tc.name for tc in pytest_harness.instance.testcases} == {
"tests.test_foobar.test_skip"
}


TEST_DATA_6 = [(None), ("Test")]


Expand Down
Loading