Skip to content

Commit 14684ad

Browse files
committed
separate subtests failures to give better indication on tests analysis
Signed-off-by: Roni Kishner<[email protected]>
1 parent be5c7ed commit 14684ad

File tree

6 files changed

+74
-27
lines changed

6 files changed

+74
-27
lines changed

AUTHORS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -395,6 +395,7 @@ Roland Puntaier
395395
Romain Dorgueil
396396
Roman Bolshakov
397397
Ronny Pfannschmidt
398+
Roni Kishner
398399
Ross Lawley
399400
Ruaridh Williamson
400401
Russel Winder

changelog/13986.bugfix.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
Show subtests failures separate from normal test failures in the final test summary.
2+
3+
Subtest failures are now reported separately as "subtests failed" instead of being counted as regular "failed" tests, providing clearer statistics.
4+
5+
For example, a test with 3 subtests where 1 fails and 2 pass now shows:
6+
``1 failed, 2 subtests passed, 1 subtests failed`` instead of ``2 failed, 2 subtests passed``.

src/_pytest/pytester.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -510,8 +510,9 @@ def _config_for_test() -> Generator[Config]:
510510

511511
# Regex to match the session duration string in the summary: "74.34s".
512512
rex_session_duration = re.compile(r"\d+\.\d\ds")
513-
# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped".
514-
rex_outcome = re.compile(r"(\d+) (\w+)")
513+
# Regex to match all the counts and phrases in the summary line:
514+
# "34 passed, 111 skipped, 3 subtests passed, 1 subtests failed".
515+
rex_outcome = re.compile(r"(\d+) ([\w\s]+?)(?=,| in|$)")
515516

516517

517518
@final
@@ -578,14 +579,17 @@ def parse_summary_nouns(cls, lines) -> dict[str, int]:
578579
for line in reversed(lines):
579580
if rex_session_duration.search(line):
580581
outcomes = rex_outcome.findall(line)
581-
ret = {noun: int(count) for (count, noun) in outcomes}
582+
ret = {noun.strip(): int(count) for (count, noun) in outcomes}
582583
break
583584
else:
584585
raise ValueError("Pytest terminal summary report not found")
585586

586587
to_plural = {
587588
"warning": "warnings",
588589
"error": "errors",
590+
"subtest failed": "subtests failed",
591+
"subtest passed": "subtests passed",
592+
"subtest skipped": "subtests skipped",
589593
}
590594
return {to_plural.get(k, k): v for k, v in ret.items()}
591595

src/_pytest/subtests.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ def pytest_report_teststatus(
387387
return category, short, f"{status}{description}"
388388

389389
if report.failed:
390-
return outcome, "u", f"SUBFAILED{description}"
390+
return "subtests failed", "u", f"SUBFAILED{description}"
391391
else:
392392
if report.passed:
393393
if quiet:

src/_pytest/terminal.py

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
import argparse
1010
from collections import Counter
11+
from collections import defaultdict
1112
from collections.abc import Callable
1213
from collections.abc import Generator
1314
from collections.abc import Mapping
@@ -1167,6 +1168,35 @@ def summary_failures(self) -> None:
11671168
style = self.config.option.tbstyle
11681169
self.summary_failures_combined("failed", "FAILURES", style=style)
11691170

1171+
def _add_subtests_to_failed_reports(
1172+
self, failed_reports: list[BaseReport]
1173+
) -> list[BaseReport]:
1174+
"""Combine failed reports with subtest failed reports, ordering subtests before main tests.
1175+
1176+
For each test nodeid, subtest failures are shown before the main test failure.
1177+
"""
1178+
subtest_failed_reports = self.getreports("subtests failed")
1179+
subtest_reports_by_nodeid: dict[str, list[BaseReport]] = defaultdict(list)
1180+
for rep in subtest_failed_reports:
1181+
subtest_reports_by_nodeid[rep.nodeid].append(rep)
1182+
1183+
ordered_reports: list[BaseReport] = []
1184+
seen_nodeids: set[str] = set()
1185+
1186+
for rep in failed_reports:
1187+
nodeid = rep.nodeid
1188+
if nodeid not in seen_nodeids:
1189+
seen_nodeids.add(nodeid)
1190+
if nodeid in subtest_reports_by_nodeid:
1191+
ordered_reports.extend(subtest_reports_by_nodeid[nodeid])
1192+
ordered_reports.append(rep)
1193+
1194+
for nodeid, subtests in subtest_reports_by_nodeid.items():
1195+
if nodeid not in seen_nodeids:
1196+
ordered_reports.extend(subtests)
1197+
1198+
return ordered_reports
1199+
11701200
def summary_xfailures(self) -> None:
11711201
show_tb = self.config.option.xfail_tb
11721202
style = self.config.option.tbstyle if show_tb else "no"
@@ -1183,6 +1213,10 @@ def summary_failures_combined(
11831213
if style != "no":
11841214
if not needed_opt or self.hasopt(needed_opt):
11851215
reports: list[BaseReport] = self.getreports(which_reports)
1216+
1217+
if which_reports == "failed":
1218+
reports = self._add_subtests_to_failed_reports(reports)
1219+
11861220
if not reports:
11871221
return
11881222
self.write_sep("=", sep_title)
@@ -1272,6 +1306,9 @@ def show_simple(lines: list[str], *, stat: str) -> None:
12721306
if not failed:
12731307
return
12741308
config = self.config
1309+
# For failed reports, also include subtests failed reports
1310+
if stat == "failed":
1311+
failed = self._add_subtests_to_failed_reports(failed)
12751312
for rep in failed:
12761313
color = _color_for_type.get(stat, _color_for_type_default)
12771314
line = _get_line_with_reprcrash_message(
@@ -1380,7 +1417,7 @@ def _get_main_color(self) -> tuple[str, list[str]]:
13801417

13811418
def _determine_main_color(self, unknown_type_seen: bool) -> str:
13821419
stats = self.stats
1383-
if "failed" in stats or "error" in stats:
1420+
if "failed" in stats or "error" in stats or "subtests failed" in stats:
13841421
main_color = "red"
13851422
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
13861423
main_color = "yellow"

testing/test_subtests.py

Lines changed: 21 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def test_zaz(subtests):
5555
[
5656
"test_*.py uFuF. * [[]100%[]]",
5757
*summary_lines,
58-
"* 4 failed, 1 passed in *",
58+
"* 2 failed, 1 passed, 2 subtests failed in *",
5959
]
6060
)
6161

@@ -69,7 +69,7 @@ def test_zaz(subtests):
6969
"test_*.py::test_zaz SUBPASSED[[]zaz subtest[]] * [[]100%[]]",
7070
"test_*.py::test_zaz PASSED * [[]100%[]]",
7171
*summary_lines,
72-
"* 4 failed, 1 passed, 1 subtests passed in *",
72+
"* 2 failed, 1 passed, 1 subtests passed, 2 subtests failed in *",
7373
]
7474
)
7575
pytester.makeini(
@@ -87,7 +87,7 @@ def test_zaz(subtests):
8787
"test_*.py::test_bar FAILED * [[] 66%[]]",
8888
"test_*.py::test_zaz PASSED * [[]100%[]]",
8989
*summary_lines,
90-
"* 4 failed, 1 passed in *",
90+
"* 2 failed, 2 subtests failed, 1 passed in *",
9191
]
9292
)
9393
result.stdout.no_fnmatch_line("test_*.py::test_zaz SUBPASSED[[]zaz subtest[]]*")
@@ -307,7 +307,7 @@ def test_foo(subtests, x):
307307
"*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]",
308308
"*.py::test_foo[[]1[]] FAILED *[[]100%[]]",
309309
"contains 1 failed subtest",
310-
"* 4 failed, 4 subtests passed in *",
310+
"* 2 failed, 4 subtests passed, 2 subtests failed in *",
311311
]
312312
)
313313

@@ -325,7 +325,7 @@ def test_foo(subtests, x):
325325
"*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]",
326326
"*.py::test_foo[[]1[]] FAILED *[[]100%[]]",
327327
"contains 1 failed subtest",
328-
"* 4 failed in *",
328+
"* 2 failed, 2 subtests failed in *",
329329
]
330330
)
331331

@@ -344,7 +344,7 @@ def test_foo(subtests):
344344
result = pytester.runpytest("-v")
345345
result.stdout.fnmatch_lines(
346346
[
347-
"* 2 failed, 2 subtests passed in *",
347+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
348348
]
349349
)
350350

@@ -365,7 +365,7 @@ def test_foo(subtests):
365365
result.stdout.fnmatch_lines(
366366
[
367367
"*AssertionError: top-level failure",
368-
"* 2 failed, 2 subtests passed in *",
368+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
369369
]
370370
)
371371

@@ -386,14 +386,14 @@ def test_foo(subtests):
386386
result = pytester.runpytest("-v")
387387
result.stdout.fnmatch_lines(
388388
[
389-
"* 2 failed, 2 subtests passed in *",
389+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
390390
]
391391
)
392392

393393
result = pytester.runpytest("-v", flag)
394394
result.stdout.fnmatch_lines(
395395
[
396-
"* 2 failed, 2 subtests passed in *",
396+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
397397
]
398398
)
399399

@@ -427,7 +427,7 @@ def test_zaz(self):
427427
result = pytester.runpytest()
428428
result.stdout.fnmatch_lines(
429429
[
430-
"* 3 failed, 2 passed in *",
430+
"* 1 failed, 2 passed, 1 subtests passed, 2 subtests failed in *",
431431
]
432432
)
433433

@@ -578,9 +578,7 @@ def test_foo(self):
578578
result.stdout.fnmatch_lines(
579579
[
580580
"*.py u. * [[]100%[]]",
581-
"*=== short test summary info ===*",
582-
"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
583-
"* 1 failed, 1 passed in *",
581+
"* 1 passed, 1 subtests failed in *",
584582
]
585583
)
586584

@@ -590,9 +588,9 @@ def test_foo(self):
590588
"*.py::T::test_foo SUBSKIPPED[[]subtest 1[]] (skip subtest 1) * [[]100%[]]",
591589
"*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]",
592590
"*.py::T::test_foo PASSED * [[]100%[]]",
591+
"*=== short test summary info ===*",
593592
"SUBSKIPPED[[]subtest 1[]] [[]1[]] *.py:*: skip subtest 1",
594-
"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
595-
"* 1 failed, 1 passed, 1 skipped in *",
593+
"* 1 passed, 1 skipped, 1 subtests failed in *",
596594
]
597595
)
598596

@@ -607,9 +605,7 @@ def test_foo(self):
607605
[
608606
"*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]",
609607
"*.py::T::test_foo PASSED * [[]100%[]]",
610-
"*=== short test summary info ===*",
611-
r"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
612-
r"* 1 failed, 1 passed in *",
608+
"* 1 passed, 1 subtests failed in *",
613609
]
614610
)
615611
result.stdout.no_fnmatch_line(
@@ -814,7 +810,7 @@ def test(subtests):
814810
result = pytester.runpytest("-p no:logging")
815811
result.stdout.fnmatch_lines(
816812
[
817-
"*2 failed in*",
813+
"*1 failed, 1 subtests failed in*",
818814
]
819815
)
820816
result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*")
@@ -899,12 +895,15 @@ def test_foo(subtests):
899895
"""
900896
)
901897
result = pytester.runpytest("--exitfirst")
902-
assert result.parseoutcomes()["failed"] == 2
898+
outcomes = result.parseoutcomes()
899+
assert outcomes["failed"] == 1
900+
assert outcomes["subtests failed"] == 1
903901
result.stdout.fnmatch_lines(
904902
[
905-
"SUBFAILED*[[]sub1[]] *.py::test_foo - assert False*",
903+
"*=== short test summary info ===*",
906904
"FAILED *.py::test_foo - assert False",
907-
"* stopping after 2 failures*",
905+
"*stopping after 2 failures*",
906+
"*1 failed, 1 subtests failed*",
908907
],
909908
consecutive=True,
910909
)

0 commit comments

Comments
 (0)