|
12 | 12 | import pytest
|
13 | 13 | import re
|
14 | 14 | import logging as logger
|
| 15 | +import textwrap |
15 | 16 |
|
16 | 17 | # ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
17 | 18 | from conftest import ZEPHYR_BASE
|
|
30 | 31 | Test,
|
31 | 32 | )
|
32 | 33 | from twisterlib.statuses import TwisterStatus
|
33 |
| -from twisterlib.testsuite import TestSuite |
| 34 | +from twisterlib.testsuite import TestSuite, TestCase |
34 | 35 | from twisterlib.testinstance import TestInstance
|
35 | 36 |
|
36 | 37 | GTEST_START_STATE = " RUN "
|
@@ -608,7 +609,7 @@ def test_pytest__generate_parameters_for_hardware(tmp_path, pty_value, hardware_
|
608 | 609 | assert "--twister-fixture=fixture2" in command
|
609 | 610 |
|
610 | 611 |
|
611 |
| -def test__update_command_with_env_dependencies(): |
| 612 | +def test_pytest__update_command_with_env_dependencies(): |
612 | 613 | cmd = ["cmd"]
|
613 | 614 | pytest_test = Pytest()
|
614 | 615 | mock.patch.object(Pytest, "PYTEST_PLUGIN_INSTALLED", False)
|
@@ -662,6 +663,147 @@ def test_pytest_run(tmp_path, caplog):
|
662 | 663 | assert exp_out in caplog.text
|
663 | 664 |
|
664 | 665 |
|
| 666 | + |
| 667 | +class FakeTestInstance: |
| 668 | + |
| 669 | + def __init__(self): |
| 670 | + self.testcases = [] |
| 671 | + self.reason = "" |
| 672 | + |
| 673 | + def add_testcase(self, name): |
| 674 | + tc = TestCase(name) |
| 675 | + self.testcases.append(tc) |
| 676 | + return tc |
| 677 | + |
| 678 | + |
| 679 | +def get_test_case_by_name(testcases, name): |
| 680 | + for tc in testcases: |
| 681 | + if tc.name == name: |
| 682 | + return tc |
| 683 | + |
| 684 | + |
| 685 | +@pytest.fixture |
| 686 | +def pytest_harness(): |
| 687 | + py_harness = Pytest() |
| 688 | + py_harness.id = "tests.test_foobar" |
| 689 | + py_harness.instance = FakeTestInstance() |
| 690 | + return py_harness |
| 691 | + |
| 692 | + |
| 693 | +EXAMPLE_TESTS = textwrap.dedent("""\ |
| 694 | + import pytest |
| 695 | +
|
| 696 | + @pytest.fixture |
| 697 | + def raise_exception(): |
| 698 | + raise Exception("Something went wrong") |
| 699 | +
|
| 700 | + def test_pass(): |
| 701 | + assert 1 |
| 702 | +
|
| 703 | + def test_fail(): |
| 704 | + assert 0, "Not True" |
| 705 | +
|
| 706 | + def test_error(raise_exception): |
| 707 | + assert 1 |
| 708 | +
|
| 709 | + @pytest.mark.skip("WIP") |
| 710 | + def test_skip(): |
| 711 | + assert 1 |
| 712 | +""") |
| 713 | + |
| 714 | + |
| 715 | +def test_if_pytest_harness_parses_report_with_all_kinds_of_statuses(tmp_path, testdir, pytest_harness): |
| 716 | + # Create JunitXml report |
| 717 | + report_xml = tmp_path / "results.xml" |
| 718 | + testdir.makepyfile(EXAMPLE_TESTS) |
| 719 | + testdir.runpytest("--junitxml", str(report_xml)) |
| 720 | + |
| 721 | + pytest_harness._parse_report_file(report_xml) |
| 722 | + |
| 723 | + assert pytest_harness.status == "failed" |
| 724 | + assert pytest_harness.instance.reason == "1/4 pytest scenario(s) failed" |
| 725 | + assert len(pytest_harness.instance.testcases) == 4 |
| 726 | + assert {tc.name for tc in pytest_harness.instance.testcases} == { |
| 727 | + "tests.test_foobar.test_pass", |
| 728 | + "tests.test_foobar.test_fail", |
| 729 | + "tests.test_foobar.test_error", |
| 730 | + "tests.test_foobar.test_skip" |
| 731 | + } |
| 732 | + |
| 733 | + passed_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_pass") |
| 734 | + assert passed_tc.status == "passed" |
| 735 | + assert passed_tc.reason is None |
| 736 | + assert passed_tc.output == "" |
| 737 | + assert isinstance(passed_tc.duration, float) |
| 738 | + |
| 739 | + failed_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_fail") |
| 740 | + assert failed_tc.status == "failed" |
| 741 | + assert failed_tc.reason == "AssertionError: Not True\nassert 0" |
| 742 | + assert failed_tc.output != "" |
| 743 | + assert isinstance(failed_tc.duration, float) |
| 744 | + |
| 745 | + error_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_error") |
| 746 | + assert error_tc.status == "error" |
| 747 | + assert error_tc.reason == 'failed on setup with "Exception: Something went wrong"' |
| 748 | + assert error_tc.output != "" |
| 749 | + assert isinstance(error_tc.duration, float) |
| 750 | + |
| 751 | + skipped_tc = get_test_case_by_name(pytest_harness.instance.testcases, "tests.test_foobar.test_skip") |
| 752 | + assert skipped_tc.status == "skipped" |
| 753 | + assert skipped_tc.reason == 'WIP' |
| 754 | + assert skipped_tc.output != "" |
| 755 | + assert isinstance(skipped_tc.duration, float) |
| 756 | + |
| 757 | + |
| 758 | +def test_if_pytest_harness_parses_report_with_passed_and_skipped_tests(tmp_path, testdir, pytest_harness): |
| 759 | + # Create JunitXml report |
| 760 | + report_xml = tmp_path / "results.xml" |
| 761 | + testdir.makepyfile(EXAMPLE_TESTS) |
| 762 | + testdir.runpytest("-k", "(test_pass or test_skip)", "--junitxml", str(report_xml)) |
| 763 | + |
| 764 | + pytest_harness._parse_report_file(report_xml) |
| 765 | + |
| 766 | + assert pytest_harness.status == "passed" |
| 767 | + assert pytest_harness.instance.reason == "" |
| 768 | + assert len(pytest_harness.instance.testcases) == 2 |
| 769 | + assert {tc.name for tc in pytest_harness.instance.testcases} == { |
| 770 | + "tests.test_foobar.test_pass", |
| 771 | + "tests.test_foobar.test_skip" |
| 772 | + } |
| 773 | + |
| 774 | + |
| 775 | +def test_if_pytest_harness_parses_report_with_passed_and_error_tests(tmp_path, testdir, pytest_harness): |
| 776 | + # Create JunitXml report |
| 777 | + report_xml = tmp_path / "results.xml" |
| 778 | + testdir.makepyfile(EXAMPLE_TESTS) |
| 779 | + testdir.runpytest("-k", "(test_pass or test_error)", "--junitxml", str(report_xml)) |
| 780 | + |
| 781 | + pytest_harness._parse_report_file(report_xml) |
| 782 | + |
| 783 | + assert pytest_harness.status == "error" |
| 784 | + assert pytest_harness.instance.reason == "Error during pytest execution" |
| 785 | + assert len(pytest_harness.instance.testcases) == 2 |
| 786 | + assert {tc.name for tc in pytest_harness.instance.testcases} == { |
| 787 | + "tests.test_foobar.test_pass", |
| 788 | + "tests.test_foobar.test_error" |
| 789 | + } |
| 790 | + |
| 791 | +def test_if_pytest_harness_parses_report_with_skipped_tests_only(tmp_path, testdir, pytest_harness): |
| 792 | + # Create JunitXml report |
| 793 | + report_xml = tmp_path / "results.xml" |
| 794 | + testdir.makepyfile(EXAMPLE_TESTS) |
| 795 | + testdir.runpytest("-k", "test_skip", "--junitxml", str(report_xml)) |
| 796 | + |
| 797 | + pytest_harness._parse_report_file(report_xml) |
| 798 | + |
| 799 | + assert pytest_harness.status == "skipped" |
| 800 | + assert pytest_harness.instance.reason == "" |
| 801 | + assert len(pytest_harness.instance.testcases) == 1 |
| 802 | + assert {tc.name for tc in pytest_harness.instance.testcases} == { |
| 803 | + "tests.test_foobar.test_skip" |
| 804 | + } |
| 805 | + |
| 806 | + |
665 | 807 | TEST_DATA_6 = [(None), ("Test")]
|
666 | 808 |
|
667 | 809 |
|
|
0 commit comments