Skip to content

Commit bd0a57f

Browse files
committed
twister: Add support for Cpputest
Similar to gTest, CppuTest is a CPP framework for unit tests. This commit adds support to detect Cpputest console output when verbose mode is enabled (-v). Signed-off-by: Victor Chavez <[email protected]>
1 parent 4d0a8c1 commit bd0a57f

File tree

4 files changed

+218
-11
lines changed

4 files changed

+218
-11
lines changed

doc/develop/test/twister.rst

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -435,15 +435,17 @@ harness: <string>
435435
- pytest
436436
- gtest
437437
- robot
438-
439-
Harnesses ``ztest``, ``gtest`` and ``console`` are based on parsing of the
440-
output and matching certain phrases. ``ztest`` and ``gtest`` harnesses look
441-
for pass/fail/etc. frames defined in those frameworks. Use ``gtest``
442-
harness if you've already got tests written in the gTest framework and do
443-
not wish to update them to zTest. The ``console`` harness tells Twister to
444-
parse a test's text output for a regex defined in the test's YAML file.
445-
The ``robot`` harness is used to execute Robot Framework test suites
446-
in the Renode simulation framework.
438+
- cpputest
439+
440+
Harnesses ``ztest``, ``gtest``, ``cpputest`` and ``console`` are based on parsing of the
441+
output and matching certain phrases. ``ztest``, ``gtest`` and ``cpputest`` harnesses look
442+
for pass/fail/etc. frames defined in those frameworks. Use ``gtest`` or ``cpputest``
443+
harness if you've already got tests written with either of these framework and do
444+
not wish to update them to zTest. If using ``cpputest`` be sure to pass the
445+
verbose argument ``-v`` to `CommandLineTestRunner::RunAllTests`.
446+
The ``console`` harness tells Twister to parse a test's text output
447+
for a regex defined in the test's YAML file. The ``robot`` harness is used
448+
to execute Robot Framework test suites in the Renode simulation framework.
447449

448450
Some widely used harnesses that are not supported yet:
449451

scripts/pylib/twister/twisterlib/harness.py

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -718,6 +718,82 @@ def handle(self, line):
718718
tc.reason = "Test failure"
719719

720720

721+
class Cpputest(Harness):
722+
TEST_START_PATTERN = r".*(?<!Failure in )TEST\((?P<suite_name>[^,]+), (?P<test_name>[^\)]+)\)"
723+
TEST_FAIL_PATTERN = r".*Failure in TEST\((?P<suite_name>[^,]+), (?P<test_name>[^\)]+)\).*"
724+
FINISHED_PATTERN = r".*(OK|Errors) \(\d+ tests, \d+ ran, \d+ checks, \d+ ignored, \d+ filtered out, \d+ ms\)"
725+
726+
def __init__(self):
727+
super().__init__()
728+
self.tc = None
729+
self.has_failures = False
730+
731+
def handle(self, line):
732+
if self.state:
733+
return
734+
735+
# Check if a new test starts
736+
test_start_match = re.search(self.TEST_START_PATTERN, line)
737+
if test_start_match:
738+
# If a new test starts and there is an unfinished test, mark it as passed
739+
if self.tc is not None:
740+
self.tc.status = "passed"
741+
self.tc.output = self.testcase_output
742+
self.testcase_output = ""
743+
self.tc = None
744+
745+
suite_name = test_start_match.group("suite_name")
746+
test_name = test_start_match.group("test_name")
747+
if suite_name not in self.detected_suite_names:
748+
self.detected_suite_names.append(suite_name)
749+
750+
name = "{}.{}.{}".format(self.id, suite_name, test_name)
751+
752+
tc = self.instance.get_case_by_name(name)
753+
assert tc is None, "CppUTest error, {} running twice".format(name)
754+
755+
tc = self.instance.get_case_or_create(name)
756+
self.tc = tc
757+
self.tc.status = "started"
758+
self.testcase_output += line + "\n"
759+
self._match = True
760+
761+
# Check if a test failure occurred
762+
test_fail_match = re.search(self.TEST_FAIL_PATTERN, line)
763+
if test_fail_match:
764+
suite_name = test_fail_match.group("suite_name")
765+
test_name = test_fail_match.group("test_name")
766+
name = "{}.{}.{}".format(self.id, suite_name, test_name)
767+
768+
tc = self.instance.get_case_by_name(name)
769+
if tc is not None:
770+
tc.status = "failed"
771+
self.has_failures = True
772+
tc.output = self.testcase_output
773+
self.testcase_output = ""
774+
self.tc = None
775+
return
776+
777+
# Check if the test run finished
778+
finished_match = re.search(self.FINISHED_PATTERN, line)
779+
if finished_match:
780+
# No need to check result if previously there was a failure
781+
# or no tests were run
782+
if self.has_failures or self.tc is None:
783+
return
784+
785+
tc = self.instance.get_case_or_create(self.tc.name)
786+
787+
finish_result = finished_match.group(1)
788+
if finish_result == "OK":
789+
self.state = "passed"
790+
tc.status = "passed"
791+
else:
792+
self.state = "failed"
793+
tc.status = "failed"
794+
return
795+
796+
721797
class Ztest(Test):
722798
pass
723799

scripts/pylib/twister/twisterlib/testinstance.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ def get_case_or_create(self, name):
176176
def testsuite_runnable(testsuite, fixtures):
177177
can_run = False
178178
# console harness allows us to run the test and capture data.
179-
if testsuite.harness in [ 'console', 'ztest', 'pytest', 'test', 'gtest', 'robot']:
179+
if testsuite.harness in [ 'console', 'ztest', 'pytest', 'test', 'gtest', 'robot','cpputest']:
180180
can_run = True
181181
# if we have a fixture that is also being supplied on the
182182
# command-line, then we need to run the test, not just build it.

scripts/tests/twister/test_harness.py

Lines changed: 130 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
1717
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
1818

19-
from twisterlib.harness import Gtest, Bsim
19+
from twisterlib.harness import Gtest, Bsim, Cpputest
2020
from twisterlib.harness import Harness
2121
from twisterlib.harness import Robot
2222
from twisterlib.harness import Test
@@ -38,6 +38,17 @@
3838
"[00:00:00.000,000] <inf> label: [==========] Done running all tests."
3939
)
4040

41+
SAMPLE_CPPUTEST_NO_TESTS = (
42+
"Errors (ran nothing, 0 tests, 0 ran, 0 checks, 0 ignored, 0 filtered out, 0 ms)")
43+
SAMPLE_CPPUTEST_START_FMT = "[00:00:00.000,000] <inf> label: TEST({suite}, {test})"
44+
SAMPLE_CPPUTEST_END_PASS_FMT = "[00:00:00.000,000] <inf> label: OK ({tests} tests" \
45+
", {ran} ran, {checks} checks, {ignored} ignored," \
46+
" {filtered} filtered out, {time} ms)"
47+
SAMPLE_CPPUTEST_FAIL_FMT = "[00:00:00.000,000] <inf> label: Failure in TEST({suite}, {test})"
48+
SAMPLE_CPPUTEST_END_FAIL_FMT = "[00:00:00.000,000] <inf> label: Errors({failures} failures" \
49+
", {tests} tests, {ran} ran, {checks} checks, {ignored} ignored," \
50+
" {filtered} filtered out, {time} ms)"
51+
4152

4253
def process_logs(harness, logs):
4354
for line in logs:
@@ -795,6 +806,124 @@ def test_gtest_repeated_run(gtest):
795806
)
796807

797808

809+
@pytest.fixture
810+
def cpputest(tmp_path):
811+
mock_platform = mock.Mock()
812+
mock_platform.name = "mock_platform"
813+
mock_platform.normalized_name = "mock_platform"
814+
mock_testsuite = mock.Mock()
815+
mock_testsuite.name = "mock_testsuite"
816+
mock_testsuite.detailed_test_id = True
817+
mock_testsuite.id = "id"
818+
mock_testsuite.testcases = []
819+
mock_testsuite.harness_config = {}
820+
outdir = tmp_path / 'cpputest_out'
821+
outdir.mkdir()
822+
823+
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
824+
825+
harness = Cpputest()
826+
harness.configure(instance)
827+
return harness
828+
829+
830+
def test_cpputest_start_test_no_suites_detected(cpputest):
831+
process_logs(cpputest, [SAMPLE_CPPUTEST_NO_TESTS])
832+
assert len(cpputest.detected_suite_names) == 0
833+
assert cpputest.state is None
834+
835+
836+
def test_cpputest_start_test(cpputest):
837+
process_logs(
838+
cpputest,
839+
[
840+
SAMPLE_CPPUTEST_START_FMT.format(
841+
suite="suite_name", test="test_name"
842+
),
843+
],
844+
)
845+
assert cpputest.state is None
846+
assert len(cpputest.detected_suite_names) == 1
847+
assert cpputest.detected_suite_names[0] == "suite_name"
848+
assert cpputest.instance.get_case_by_name("id.suite_name.test_name") is not None
849+
assert (
850+
cpputest.instance.get_case_by_name("id.suite_name.test_name").status == "started"
851+
)
852+
853+
854+
def test_cpputest_one_test_passed(cpputest):
855+
process_logs(
856+
cpputest,
857+
[
858+
SAMPLE_CPPUTEST_START_FMT.format(
859+
suite="suite_name", test="test_name"
860+
),
861+
SAMPLE_CPPUTEST_END_PASS_FMT.format(
862+
tests=1, ran=1, checks=5, ignored=0, filtered=0, time=10
863+
)
864+
],
865+
)
866+
assert len(cpputest.detected_suite_names) == 1
867+
assert cpputest.detected_suite_names[0] == "suite_name"
868+
assert cpputest.instance.get_case_by_name("id.suite_name.test_name") is not None
869+
assert cpputest.instance.get_case_by_name("id.suite_name.test_name").status == "passed"
870+
871+
872+
def test_cpputest_multiple_test_passed(cpputest):
873+
logs = []
874+
total_passed_tests = 5
875+
for i in range(0, total_passed_tests):
876+
logs.append(SAMPLE_CPPUTEST_START_FMT.format(suite="suite_name",
877+
test="test_name_%d" % i))
878+
logs.append(SAMPLE_CPPUTEST_END_PASS_FMT.format(
879+
tests=total_passed_tests, ran=total_passed_tests, checks=5, ignored=0, filtered=0, time=10
880+
))
881+
process_logs(cpputest, logs)
882+
assert len(cpputest.detected_suite_names) == 1
883+
assert cpputest.detected_suite_names[0] == "suite_name"
884+
for i in range(0, total_passed_tests):
885+
test_name = "id.suite_name.test_name_%d" % i
886+
assert cpputest.instance.get_case_by_name(test_name) is not None
887+
assert cpputest.instance.get_case_by_name(test_name).status == "passed"
888+
889+
890+
def test_cpputest_test_failed(cpputest):
891+
process_logs(
892+
cpputest,
893+
[
894+
SAMPLE_CPPUTEST_START_FMT.format(
895+
suite="suite_name", test="test_name"
896+
),
897+
SAMPLE_CPPUTEST_FAIL_FMT.format(
898+
suite="suite_name", test="test_name"
899+
)
900+
],
901+
)
902+
assert cpputest.state is None
903+
assert len(cpputest.detected_suite_names) == 1
904+
assert cpputest.detected_suite_names[0] == "suite_name"
905+
assert cpputest.instance.get_case_by_name("id.suite_name.test_name") is not None
906+
assert cpputest.instance.get_case_by_name("id.suite_name.test_name").status == "failed"
907+
908+
909+
def test_cpputest_test_repeated(cpputest):
910+
with pytest.raises(
911+
AssertionError,
912+
match=r"CppUTest error, id.suite_name.test_name running twice",
913+
):
914+
process_logs(
915+
cpputest,
916+
[
917+
SAMPLE_CPPUTEST_START_FMT.format(
918+
suite="suite_name", test="test_name"
919+
),
920+
SAMPLE_CPPUTEST_START_FMT.format(
921+
suite="suite_name", test="test_name"
922+
),
923+
],
924+
)
925+
926+
798927
def test_bsim_build(monkeypatch, tmp_path):
799928
mocked_instance = mock.Mock()
800929
build_dir = tmp_path / 'build_dir'

0 commit comments

Comments
 (0)