Skip to content

Commit 0fda296

Browse files
committed
separate subtests failures to give better indication on tests analysis
Signed-off-by: Roni Kishner<[email protected]>
1 parent 6b6502e commit 0fda296

File tree

6 files changed

+28
-19
lines changed

6 files changed

+28
-19
lines changed

AUTHORS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -395,6 +395,7 @@ Roland Puntaier
395395
Romain Dorgueil
396396
Roman Bolshakov
397397
Ronny Pfannschmidt
398+
Roni Kishner
398399
Ross Lawley
399400
Ruaridh Williamson
400401
Russel Winder

changelog/13986.bugfix.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Fixed double-counting of subtest failures in the final test summary. Subtest failures are now reported separately as "subtests failed" instead of being counted as regular "failed" tests, providing clearer statistics. For example, a test with 3 subtests where 1 fails and 2 pass now shows "1 failed, 1 subtests failed, 2 subtests passed" instead of "2 failed, 2 subtests passed".
2+

src/_pytest/pytester.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -510,8 +510,8 @@ def _config_for_test() -> Generator[Config]:
510510

511511
# Regex to match the session duration string in the summary: "74.34s".
512512
rex_session_duration = re.compile(r"\d+\.\d\ds")
513-
# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped".
514-
rex_outcome = re.compile(r"(\d+) (\w+)")
513+
# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped, 3 subtests passed, 1 subtests failed".
514+
rex_outcome = re.compile(r"(\d+) ([\w\s]+?)(?=,| in|$)")
515515

516516

517517
@final
@@ -578,14 +578,17 @@ def parse_summary_nouns(cls, lines) -> dict[str, int]:
578578
for line in reversed(lines):
579579
if rex_session_duration.search(line):
580580
outcomes = rex_outcome.findall(line)
581-
ret = {noun: int(count) for (count, noun) in outcomes}
581+
ret = {noun.strip(): int(count) for (count, noun) in outcomes}
582582
break
583583
else:
584584
raise ValueError("Pytest terminal summary report not found")
585585

586586
to_plural = {
587587
"warning": "warnings",
588588
"error": "errors",
589+
"subtest failed": "subtests failed",
590+
"subtest passed": "subtests passed",
591+
"subtest skipped": "subtests skipped",
589592
}
590593
return {to_plural.get(k, k): v for k, v in ret.items()}
591594

src/_pytest/subtests.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ def pytest_report_teststatus(
387387
return category, short, f"{status}{description}"
388388

389389
if report.failed:
390-
return outcome, "u", f"SUBFAILED{description}"
390+
return "subtests failed", "u", f"SUBFAILED{description}"
391391
else:
392392
if report.passed:
393393
if quiet:

src/_pytest/terminal.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1387,7 +1387,7 @@ def _get_main_color(self) -> tuple[str, list[str]]:
13871387

13881388
def _determine_main_color(self, unknown_type_seen: bool) -> str:
13891389
stats = self.stats
1390-
if "failed" in stats or "error" in stats:
1390+
if "failed" in stats or "error" in stats or "subtests failed" in stats:
13911391
main_color = "red"
13921392
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
13931393
main_color = "yellow"

testing/test_subtests.py

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def test_zaz(subtests):
5555
[
5656
"test_*.py uFuF. * [[]100%[]]",
5757
*summary_lines,
58-
"* 4 failed, 1 passed in *",
58+
"* 2 failed, 1 passed, 2 subtests failed in *",
5959
]
6060
)
6161

@@ -69,7 +69,7 @@ def test_zaz(subtests):
6969
"test_*.py::test_zaz SUBPASSED[[]zaz subtest[]] * [[]100%[]]",
7070
"test_*.py::test_zaz PASSED * [[]100%[]]",
7171
*summary_lines,
72-
"* 4 failed, 1 passed, 1 subtests passed in *",
72+
"* 2 failed, 1 passed, 1 subtests passed, 2 subtests failed in *",
7373
]
7474
)
7575
pytester.makeini(
@@ -87,7 +87,7 @@ def test_zaz(subtests):
8787
"test_*.py::test_bar FAILED * [[] 66%[]]",
8888
"test_*.py::test_zaz PASSED * [[]100%[]]",
8989
*summary_lines,
90-
"* 4 failed, 1 passed in *",
90+
"* 2 failed, 2 subtests failed, 1 passed in *",
9191
]
9292
)
9393
result.stdout.no_fnmatch_line("test_*.py::test_zaz SUBPASSED[[]zaz subtest[]]*")
@@ -307,7 +307,7 @@ def test_foo(subtests, x):
307307
"*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]",
308308
"*.py::test_foo[[]1[]] FAILED *[[]100%[]]",
309309
"contains 1 failed subtest",
310-
"* 4 failed, 4 subtests passed in *",
310+
"* 2 failed, 4 subtests passed, 2 subtests failed in *",
311311
]
312312
)
313313

@@ -325,7 +325,7 @@ def test_foo(subtests, x):
325325
"*.py::test_foo[[]1[]] SUBFAILED[[]custom[]] (i=1) *[[]100%[]]",
326326
"*.py::test_foo[[]1[]] FAILED *[[]100%[]]",
327327
"contains 1 failed subtest",
328-
"* 4 failed in *",
328+
"* 2 failed, 2 subtests failed in *",
329329
]
330330
)
331331

@@ -344,7 +344,7 @@ def test_foo(subtests):
344344
result = pytester.runpytest("-v")
345345
result.stdout.fnmatch_lines(
346346
[
347-
"* 2 failed, 2 subtests passed in *",
347+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
348348
]
349349
)
350350

@@ -365,7 +365,7 @@ def test_foo(subtests):
365365
result.stdout.fnmatch_lines(
366366
[
367367
"*AssertionError: top-level failure",
368-
"* 2 failed, 2 subtests passed in *",
368+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
369369
]
370370
)
371371

@@ -386,7 +386,7 @@ def test_foo(subtests):
386386
result = pytester.runpytest("-v")
387387
result.stdout.fnmatch_lines(
388388
[
389-
"* 2 failed, 2 subtests passed in *",
389+
"* 1 failed, 2 subtests passed, 1 subtests failed in *",
390390
]
391391
)
392392

@@ -427,7 +427,7 @@ def test_zaz(self):
427427
result = pytester.runpytest()
428428
result.stdout.fnmatch_lines(
429429
[
430-
"* 3 failed, 2 passed in *",
430+
"* 1 failed, 2 passed, 1 subtests passed, 2 subtests failed in *",
431431
]
432432
)
433433

@@ -814,7 +814,7 @@ def test(subtests):
814814
result = pytester.runpytest("-p no:logging")
815815
result.stdout.fnmatch_lines(
816816
[
817-
"*2 failed in*",
817+
"*1 failed, 1 subtests failed in*",
818818
]
819819
)
820820
result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*")
@@ -899,12 +899,15 @@ def test_foo(subtests):
899899
"""
900900
)
901901
result = pytester.runpytest("--exitfirst")
902-
assert result.parseoutcomes()["failed"] == 2
902+
outcomes = result.parseoutcomes()
903+
assert outcomes["failed"] == 1
904+
assert outcomes["subtests failed"] == 1
903905
result.stdout.fnmatch_lines(
904906
[
905-
"SUBFAILED*[[]sub1[]] *.py::test_foo - assert False*",
906-
"FAILED *.py::test_foo - assert False",
907-
"* stopping after 2 failures*",
907+
"*=== short test summary info ===*",
908+
"*FAILED*test_foo*",
909+
"*stopping after 2 failures*",
910+
"*1 failed, 1 subtests failed*",
908911
],
909912
consecutive=True,
910913
)

0 commit comments

Comments
 (0)