Skip to content

Commit 46b3844

Browse files
committed
separate subtests failures to give better indication on tests analysis
Signed-off-by: Roni Kishner<[email protected]>
1 parent ee652d6 commit 46b3844

File tree

6 files changed

+37
-27
lines changed

6 files changed

+37
-27
lines changed

AUTHORS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -395,6 +395,7 @@ Roland Puntaier
395395
Romain Dorgueil
396396
Roman Bolshakov
397397
Ronny Pfannschmidt
398+
Roni Kishner
398399
Ross Lawley
399400
Ruaridh Williamson
400401
Russel Winder

changelog/13986.bugfix.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
Show subtests failures separate from normal test failures in the final test summary.
2+
3+
Subtest failures are now reported separately as "subtests failed" instead of being counted as regular "failed" tests, providing clearer statistics.
4+
5+
For example, a test with 3 subtests where 1 fails and 2 pass now shows:
6+
``1 failed, 2 subtests passed, 1 subtests failed`` instead of ``2 failed, 2 subtests passed``.

src/_pytest/pytester.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -510,8 +510,9 @@ def _config_for_test() -> Generator[Config]:
510510

511511
# Regex to match the session duration string in the summary: "74.34s".
512512
rex_session_duration = re.compile(r"\d+\.\d\ds")
513-
# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped".
514-
rex_outcome = re.compile(r"(\d+) (\w+)")
513+
# Regex to match all the counts and phrases in the summary line:
514+
# "34 passed, 111 skipped, 3 subtests passed, 1 subtests failed".
515+
rex_outcome = re.compile(r"(\d+) ([\w\s]+?)(?=,| in|$)")
515516

516517

517518
@final
@@ -578,14 +579,17 @@ def parse_summary_nouns(cls, lines) -> dict[str, int]:
578579
for line in reversed(lines):
579580
if rex_session_duration.search(line):
580581
outcomes = rex_outcome.findall(line)
581-
ret = {noun: int(count) for (count, noun) in outcomes}
582+
ret = {noun.strip(): int(count) for (count, noun) in outcomes}
582583
break
583584
else:
584585
raise ValueError("Pytest terminal summary report not found")
585586

586587
to_plural = {
587588
"warning": "warnings",
588589
"error": "errors",
590+
"subtest failed": "subtests failed",
591+
"subtest passed": "subtests passed",
592+
"subtest skipped": "subtests skipped",
589593
}
590594
return {to_plural.get(k, k): v for k, v in ret.items()}
591595

src/_pytest/subtests.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ def pytest_report_teststatus(
387387
return category, short, f"{status}{description}"
388388

389389
if report.failed:
390-
return outcome, "u", f"SUBFAILED{description}"
390+
return "subtests failed", "u", f"SUBFAILED{description}"
391391
else:
392392
if report.passed:
393393
if quiet:

src/_pytest/terminal.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1380,7 +1380,7 @@ def _get_main_color(self) -> tuple[str, list[str]]:
13801380

13811381
def _determine_main_color(self, unknown_type_seen: bool) -> str:
13821382
stats = self.stats
1383-
if "failed" in stats or "error" in stats:
1383+
if "failed" in stats or "error" in stats or "subtests failed" in stats:
13841384
main_color = "red"
13851385
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
13861386
main_color = "yellow"

testing/test_subtests.py

Lines changed: 21 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def test_zaz(subtests):
5555
[
5656
"test_*.py uFuF. * [[]100%[]]",
5757
*summary_lines,
58-
"* 4 failed, 1 passed in *",
58+
"* 2 failed, 1 passed, 2 subtests failed in *",
5959
]
6060
)
6161

@@ -69,7 +69,7 @@ def test_zaz(subtests):
6969
"test_*.py::test_zaz SUBPASSED[[]zaz subtest[]] * [[]100%[]]",
7070
"test_*.py::test_zaz PASSED * [[]100%[]]",
7171
*summary_lines,
72-
"* 4 failed, 1 passed, 1 subtests passed in *",
72+
"* 2 failed, 1 passed, 1 subtests passed, 2 subtests failed in *",
7373
]
7474
)
7575
pytester.makeini(
@@ -87,7 +87,7 @@ def test_zaz(subtests):
8787
"test_*.py::test_bar FAILED * [[] 66%[]]",
8888
"test_*.py::test_zaz PASSED * [[]100%[]]",
8989
*summary_lines,
90-
"* 4 failed, 1 passed in *",
90+
"* 2 failed, 2 subtests failed, 1 passed in *",
9191
]
9292
)
9393
result.stdout.no_fnmatch_line("test_*.py::test_zaz SUBPASSED[[]zaz subtest[]]*")
@@ -307,7 +307,7 @@ def test_foo(subtests, x):
307307
"*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]",
308308
"*.py::test_foo[[]1[]] FAILED *[[]100%[]]",
309309
"contains 1 failed subtest",
310-
"* 4 failed, 4 subtests passed in *",
310+
"* 2 failed, 4 subtests passed, 2 subtests failed in *",
311311
]
312312
)
313313

@@ -325,7 +325,7 @@ def test_foo(subtests, x):
325325
"*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]",
326326
"*.py::test_foo[[]1[]] FAILED *[[]100%[]]",
327327
"contains 1 failed subtest",
328-
"* 4 failed in *",
328+
"* 2 failed, 2 subtests failed in *",
329329
]
330330
)
331331

@@ -344,7 +344,7 @@ def test_foo(subtests):
344344
result = pytester.runpytest("-v")
345345
result.stdout.fnmatch_lines(
346346
[
347-
"* 2 failed, 2 subtests passed in *",
347+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
348348
]
349349
)
350350

@@ -365,7 +365,7 @@ def test_foo(subtests):
365365
result.stdout.fnmatch_lines(
366366
[
367367
"*AssertionError: top-level failure",
368-
"* 2 failed, 2 subtests passed in *",
368+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
369369
]
370370
)
371371

@@ -386,14 +386,14 @@ def test_foo(subtests):
386386
result = pytester.runpytest("-v")
387387
result.stdout.fnmatch_lines(
388388
[
389-
"* 2 failed, 2 subtests passed in *",
389+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
390390
]
391391
)
392392

393393
result = pytester.runpytest("-v", flag)
394394
result.stdout.fnmatch_lines(
395395
[
396-
"* 2 failed, 2 subtests passed in *",
396+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
397397
]
398398
)
399399

@@ -427,7 +427,7 @@ def test_zaz(self):
427427
result = pytester.runpytest()
428428
result.stdout.fnmatch_lines(
429429
[
430-
"* 3 failed, 2 passed in *",
430+
"* 1 failed, 2 passed, 1 subtests passed, 2 subtests failed in *",
431431
]
432432
)
433433

@@ -578,9 +578,7 @@ def test_foo(self):
578578
result.stdout.fnmatch_lines(
579579
[
580580
"*.py u. * [[]100%[]]",
581-
"*=== short test summary info ===*",
582-
"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
583-
"* 1 failed, 1 passed in *",
581+
"* 1 passed, 1 subtests failed in *",
584582
]
585583
)
586584

@@ -590,9 +588,9 @@ def test_foo(self):
590588
"*.py::T::test_foo SUBSKIPPED[[]subtest 1[]] (skip subtest 1) * [[]100%[]]",
591589
"*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]",
592590
"*.py::T::test_foo PASSED * [[]100%[]]",
591+
"*=== short test summary info ===*",
593592
"SUBSKIPPED[[]subtest 1[]] [[]1[]] *.py:*: skip subtest 1",
594-
"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
595-
"* 1 failed, 1 passed, 1 skipped in *",
593+
"* 1 passed, 1 skipped, 1 subtests failed in *",
596594
]
597595
)
598596

@@ -607,9 +605,7 @@ def test_foo(self):
607605
[
608606
"*.py::T::test_foo SUBFAILED[[]subtest 2[]] * [[]100%[]]",
609607
"*.py::T::test_foo PASSED * [[]100%[]]",
610-
"*=== short test summary info ===*",
611-
r"SUBFAILED[[]subtest 2[]] *.py::T::test_foo - AssertionError: fail subtest 2",
612-
r"* 1 failed, 1 passed in *",
608+
"* 1 passed, 1 subtests failed in *",
613609
]
614610
)
615611
result.stdout.no_fnmatch_line(
@@ -814,7 +810,7 @@ def test(subtests):
814810
result = pytester.runpytest("-p no:logging")
815811
result.stdout.fnmatch_lines(
816812
[
817-
"*2 failed in*",
813+
"*1 failed, 1 subtests failed in*",
818814
]
819815
)
820816
result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*")
@@ -899,12 +895,15 @@ def test_foo(subtests):
899895
"""
900896
)
901897
result = pytester.runpytest("--exitfirst")
902-
assert result.parseoutcomes()["failed"] == 2
898+
outcomes = result.parseoutcomes()
899+
assert outcomes["failed"] == 1
900+
assert outcomes["subtests failed"] == 1
903901
result.stdout.fnmatch_lines(
904902
[
905-
"SUBFAILED*[[]sub1[]] *.py::test_foo - assert False*",
903+
"*=== short test summary info ===*",
906904
"FAILED *.py::test_foo - assert False",
907-
"* stopping after 2 failures*",
905+
"*stopping after 2 failures*",
906+
"*1 failed, 1 subtests failed*",
908907
],
909908
consecutive=True,
910909
)

0 commit comments

Comments
 (0)