From c3dc9f48293bc614b88af1e8d07b0a8481586685 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 20 Sep 2025 08:42:53 -0300 Subject: [PATCH 1/6] Add pytest-subtests files changes In addition, enable the plugin in `pytest/__init__.py` and `config/__init__.py`. --- src/_pytest/config/__init__.py | 1 + src/_pytest/subtests.py | 502 ++++++++++++++++++++ src/pytest/__init__.py | 2 + testing/test_subtests.py | 839 +++++++++++++++++++++++++++++++++ 4 files changed, 1344 insertions(+) create mode 100644 src/_pytest/subtests.py create mode 100644 testing/test_subtests.py diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 38fb1ee6d27..ff776cb98d5 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -271,6 +271,7 @@ def directory_arg(path: str, optname: str) -> str: "logging", "reports", "faulthandler", + "subtests", ) builtin_plugins = { diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py new file mode 100644 index 00000000000..d4be44f2d4e --- /dev/null +++ b/src/_pytest/subtests.py @@ -0,0 +1,502 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterator +from collections.abc import Mapping +from contextlib import contextmanager +from contextlib import ExitStack +from contextlib import nullcontext +import sys +import time +from typing import Any +from typing import ContextManager +from typing import TYPE_CHECKING +from unittest import TestCase + +import attr +import pluggy + +from _pytest._code import ExceptionInfo +from _pytest.capture import CaptureFixture +from _pytest.capture import FDCapture +from _pytest.capture import SysCapture +from _pytest.fixtures import SubRequest +from _pytest.logging import catching_logs +from _pytest.logging import LogCaptureHandler +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.unittest import TestCaseFunction +import pytest + + +if TYPE_CHECKING: + from types import TracebackType + from typing import Literal + + +def pytest_addoption(parser: pytest.Parser) -> None: + group = parser.getgroup("subtests") + group.addoption( + "--no-subtests-shortletter", + action="store_true", + dest="no_subtests_shortletter", + default=False, + help="Disables subtest output 'dots' in non-verbose mode (EXPERIMENTAL)", + ) + group.addoption( + "--no-subtests-reports", + action="store_true", + dest="no_subtests_reports", + default=False, + help="Disables subtest output unless it's a failed subtest (EXPERIMENTAL)", + ) + + +@attr.s +class SubTestContext: + msg: str | None = attr.ib() + kwargs: dict[str, Any] = attr.ib() + + +@attr.s(init=False) +class SubTestReport(TestReport): # type: ignore[misc] + context: SubTestContext = attr.ib() + + @property + def head_line(self) -> str: + _, _, domain = self.location + return f"{domain} {self.sub_test_description()}" + + def sub_test_description(self) -> str: + parts = [] + if isinstance(self.context.msg, str): + parts.append(f"[{self.context.msg}]") + if self.context.kwargs: + params_desc = ", ".join( + f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) + ) + parts.append(f"({params_desc})") + return " ".join(parts) or "()" + + def _to_json(self) -> dict: + data = super()._to_json() + del data["context"] + data["_report_type"] = "SubTestReport" + data["_subtest.context"] = attr.asdict(self.context) + return data + + @classmethod + def _from_json(cls, reportdict: dict[str, Any]) -> SubTestReport: + report = super()._from_json(reportdict) + context_data = reportdict["_subtest.context"] + report.context = SubTestContext( + msg=context_data["msg"], kwargs=context_data["kwargs"] + ) + return report + + @classmethod + def _from_test_report(cls, test_report: TestReport) -> SubTestReport: + return super()._from_json(test_report._to_json()) + + +def _addSkip(self: TestCaseFunction, testcase: TestCase, reason: str) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + if isinstance(testcase, _SubTest): + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) # type: ignore[attr-defined] + else: + # For python < 3.11: the non-subtest skips have to be added by `_originaladdSkip` only after all subtest + # failures are processed by `_addSubTest`. (`self.instance._outcome` has no attribute `skipped/errors` anymore.) + # For python < 3.11, we also need to check if `self.instance._outcome` is `None` (this happens if the test + # class/method is decorated with `unittest.skip`, see #173). + if sys.version_info < (3, 11) and self.instance._outcome is not None: + subtest_errors = [ + x + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + if len(subtest_errors) == 0: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + else: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + + +def _addSubTest( + self: TestCaseFunction, + test_case: Any, + test: TestCase, + exc_info: tuple[type[BaseException], BaseException, TracebackType] | None, +) -> None: + msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] + call_info = make_call_info( + ExceptionInfo(exc_info, _ispytest=True) if exc_info else None, + start=0, + stop=0, + duration=0, + when="call", + ) + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubTestReport._from_test_report(report) + sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] + self.ihook.pytest_runtest_logreport(report=sub_report) + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self, call=call_info, report=sub_report + ) + + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + from unittest.case import _SubTest # type: ignore[attr-defined] + + non_subtest_skip = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `_originaladdSkip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + + +def pytest_configure(config: pytest.Config) -> None: + TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] + TestCaseFunction.failfast = False # type: ignore[attr-defined] + # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a + # parent python process where `addSkip` is already `_addSkip`. A such case is when running tests in + # `test_subtests.py` where `pytester.runpytest` is used. Without this guard condition, `_originaladdSkip` is + # assigned to `_addSkip` which is wrong as well as causing an infinite recursion in some cases. + if not hasattr(TestCaseFunction, "_originaladdSkip"): + TestCaseFunction._originaladdSkip = TestCaseFunction.addSkip # type: ignore[attr-defined] + TestCaseFunction.addSkip = _addSkip # type: ignore[method-assign] + + # Hack (#86): the terminal does not know about the "subtests" + # status, so it will by default turn the output to yellow. + # This forcibly adds the new 'subtests' status. + import _pytest.terminal + + new_types = tuple( + f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") + ) + # We need to check if we are not re-adding because we run our own tests + # with pytester in-process mode, so this will be called multiple times. + if new_types[0] not in _pytest.terminal.KNOWN_TYPES: + _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types # type: ignore[assignment] + + _pytest.terminal._color_for_type.update( + { + f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] + for outcome in ("passed", "failed", "skipped") + if outcome in _pytest.terminal._color_for_type + } + ) + + +def pytest_unconfigure() -> None: + if hasattr(TestCaseFunction, "addSubTest"): + del TestCaseFunction.addSubTest + if hasattr(TestCaseFunction, "failfast"): + del TestCaseFunction.failfast + if hasattr(TestCaseFunction, "_originaladdSkip"): + TestCaseFunction.addSkip = TestCaseFunction._originaladdSkip # type: ignore[method-assign] + del TestCaseFunction._originaladdSkip + + +@pytest.fixture +def subtests(request: SubRequest) -> Generator[SubTests, None, None]: + """Provides subtests functionality.""" + capmam = request.node.config.pluginmanager.get_plugin("capturemanager") + if capmam is not None: + suspend_capture_ctx = capmam.global_and_fixture_disabled + else: + suspend_capture_ctx = nullcontext + yield SubTests(request.node.ihook, suspend_capture_ctx, request) + + +@attr.s +class SubTests: + ihook: pluggy.HookRelay = attr.ib() + suspend_capture_ctx: Callable[[], ContextManager] = attr.ib() + request: SubRequest = attr.ib() + + @property + def item(self) -> pytest.Item: + return self.request.node + + def test( + self, + msg: str | None = None, + **kwargs: Any, + ) -> _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and handling + them through the pytest machinery. + + Usage: + + .. code-block:: python + + with subtests.test(msg="subtest"): + assert 1 == 1 + """ + return _SubTestContextManager( + self.ihook, + msg, + kwargs, + request=self.request, + suspend_capture_ctx=self.suspend_capture_ctx, + ) + + +@attr.s(auto_attribs=True) +class _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and handling + them through the pytest machinery. + + Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however + it is not possible to control the output fully when exiting from it due to an exception when + in --exitfirst mode, so this was refactored into an explicit context manager class (#134). + """ + + ihook: pluggy.HookRelay + msg: str | None + kwargs: dict[str, Any] + suspend_capture_ctx: Callable[[], ContextManager] + request: SubRequest + + def __enter__(self) -> None: + __tracebackhide__ = True + + self._start = time.time() + self._precise_start = time.perf_counter() + self._exc_info = None + + self._exit_stack = ExitStack() + self._captured_output = self._exit_stack.enter_context( + capturing_output(self.request) + ) + self._captured_logs = self._exit_stack.enter_context( + capturing_logs(self.request) + ) + + def __exit__( + self, + exc_type: type[Exception] | None, + exc_val: Exception | None, + exc_tb: TracebackType | None, + ) -> bool: + __tracebackhide__ = True + try: + if exc_val is not None: + exc_info = ExceptionInfo.from_exception(exc_val) + else: + exc_info = None + finally: + self._exit_stack.close() + + precise_stop = time.perf_counter() + duration = precise_stop - self._precise_start + stop = time.time() + + call_info = make_call_info( + exc_info, start=self._start, stop=stop, duration=duration, when="call" + ) + report = self.ihook.pytest_runtest_makereport( + item=self.request.node, call=call_info + ) + sub_report = SubTestReport._from_test_report(report) + sub_report.context = SubTestContext(self.msg, self.kwargs.copy()) + + self._captured_output.update_report(sub_report) + self._captured_logs.update_report(sub_report) + + with self.suspend_capture_ctx(): + self.ihook.pytest_runtest_logreport(report=sub_report) + + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self.request.node, call=call_info, report=sub_report + ) + + if exc_val is not None: + if self.request.session.shouldfail: + return False + return True + + +def make_call_info( + exc_info: ExceptionInfo[BaseException] | None, + *, + start: float, + stop: float, + duration: float, + when: Literal["collect", "setup", "call", "teardown"], +) -> CallInfo: + return CallInfo( + None, + exc_info, + start=start, + stop=stop, + duration=duration, + when=when, + _ispytest=True, + ) + + +@contextmanager +def capturing_output(request: SubRequest) -> Iterator[Captured]: + option = request.config.getoption("capture", None) + + # capsys or capfd are active, subtest should not capture. + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture_active = getattr(capman, "_capture_fixture", None) + + if option == "sys" and not capture_fixture_active: + with ignore_pytest_private_warning(): + fixture = CaptureFixture(SysCapture, request) + elif option == "fd" and not capture_fixture_active: + with ignore_pytest_private_warning(): + fixture = CaptureFixture(FDCapture, request) + else: + fixture = None + + if fixture is not None: + fixture._start() + + captured = Captured() + try: + yield captured + finally: + if fixture is not None: + out, err = fixture.readouterr() + fixture.close() + captured.out = out + captured.err = err + + +@contextmanager +def capturing_logs( + request: SubRequest, +) -> Iterator[CapturedLogs | NullCapturedLogs]: + logging_plugin = request.config.pluginmanager.getplugin("logging-plugin") + if logging_plugin is None: + yield NullCapturedLogs() + else: + handler = LogCaptureHandler() + handler.setFormatter(logging_plugin.formatter) + + captured_logs = CapturedLogs(handler) + with catching_logs(handler): + yield captured_logs + + +@contextmanager +def ignore_pytest_private_warning() -> Generator[None, None, None]: + import warnings + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "A private pytest class or function was used.", + category=pytest.PytestDeprecationWarning, + ) + yield + + +@attr.s +class Captured: + out = attr.ib(default="", type=str) + err = attr.ib(default="", type=str) + + def update_report(self, report: pytest.TestReport) -> None: + if self.out: + report.sections.append(("Captured stdout call", self.out)) + if self.err: + report.sections.append(("Captured stderr call", self.err)) + + +class CapturedLogs: + def __init__(self, handler: LogCaptureHandler) -> None: + self._handler = handler + + def update_report(self, report: pytest.TestReport) -> None: + report.sections.append(("Captured log call", self._handler.stream.getvalue())) + + +class NullCapturedLogs: + def update_report(self, report: pytest.TestReport) -> None: + pass + + +def pytest_report_to_serializable(report: pytest.TestReport) -> dict[str, Any] | None: + if isinstance(report, SubTestReport): + return report._to_json() + return None + + +def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | None: + if data.get("_report_type") == "SubTestReport": + return SubTestReport._from_json(data) + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_report_teststatus( + report: pytest.TestReport, + config: pytest.Config, +) -> tuple[str, str, str | Mapping[str, bool]] | None: + if report.when != "call" or not isinstance(report, SubTestReport): + return None + + outcome = report.outcome + description = report.sub_test_description() + no_output = ("", "", "") + + if hasattr(report, "wasxfail"): + if config.option.no_subtests_reports and outcome != "skipped": + return no_output + elif outcome == "skipped": + category = "xfailed" + short = "y" # x letter is used for regular xfail, y for subtest xfail + status = "SUBXFAIL" + elif outcome == "passed": + category = "xpassed" + short = "Y" # X letter is used for regular xpass, Y for subtest xpass + status = "SUBXPASS" + else: + # This should not normally happen, unless some plugin is setting wasxfail without + # the correct outcome. Pytest expects the call outcome to be either skipped or passed in case of xfail. + # Let's pass this report to the next hook. + return None + short = "" if config.option.no_subtests_shortletter else short + return f"subtests {category}", short, f"{description} {status}" + + if config.option.no_subtests_reports and outcome != "failed": + return no_output + elif report.passed: + short = "" if config.option.no_subtests_shortletter else "," + return f"subtests {outcome}", short, f"{description} SUBPASS" + elif report.skipped: + short = "" if config.option.no_subtests_shortletter else "-" + return outcome, short, f"{description} SUBSKIP" + elif outcome == "failed": + short = "" if config.option.no_subtests_shortletter else "u" + return outcome, short, f"{description} SUBFAIL" + + return None diff --git a/src/pytest/__init__.py b/src/pytest/__init__.py index 297b524bcc2..823269e3169 100644 --- a/src/pytest/__init__.py +++ b/src/pytest/__init__.py @@ -71,6 +71,7 @@ from _pytest.runner import CallInfo from _pytest.stash import Stash from _pytest.stash import StashKey +from _pytest.subtests import SubTests from _pytest.terminal import TerminalReporter from _pytest.terminal import TestShortLogReport from _pytest.tmpdir import TempPathFactory @@ -146,6 +147,7 @@ "Session", "Stash", "StashKey", + "SubTests", "TempPathFactory", "TempdirFactory", "TerminalReporter", diff --git a/testing/test_subtests.py b/testing/test_subtests.py new file mode 100644 index 00000000000..4bc48451a27 --- /dev/null +++ b/testing/test_subtests.py @@ -0,0 +1,839 @@ +from __future__ import annotations + +from pathlib import Path +import sys +from typing import Literal + +import pytest + + +IS_PY311 = sys.version_info[:2] >= (3, 11) + + +@pytest.mark.parametrize("mode", ["normal", "xdist"]) +class TestFixture: + """ + Tests for ``subtests`` fixture. + """ + + @pytest.fixture + def simple_script(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + assert i % 2 == 0 + """ + ) + + def test_simple_terminal_normal( + self, + simple_script: None, + pytester: pytest.Pytester, + mode: Literal["normal", "xdist"], + ) -> None: + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + + expected_lines += [ + "* test_foo [[]custom[]] (i=1) *", + "* test_foo [[]custom[]] (i=3) *", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + result.stdout.fnmatch_lines(expected_lines) + + def test_simple_terminal_verbose( + self, + simple_script: None, + pytester: pytest.Pytester, + mode: Literal["normal", "xdist"], + ) -> None: + if mode == "normal": + result = pytester.runpytest("-v") + expected_lines = [ + "*collected 1 item", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASS *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASS *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASS *100%*", + "test_simple_terminal_verbose.py::test_foo PASSED *100%*", + ] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1", "-v") + expected_lines = [ + "1 worker [1 item]", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + ] + + expected_lines += [ + "* test_foo [[]custom[]] (i=1) *", + "* test_foo [[]custom[]] (i=3) *", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + result.stdout.fnmatch_lines(expected_lines) + + def test_skip( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + if i % 2 == 0: + pytest.skip('even number') + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_xfail( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + if i % 2 == 0: + pytest.xfail('even number') + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed, 2 subtests passed, 3 subtests xfailed in *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_typing_exported( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + from pytest import SubTests + + def test_typing_exported(subtests: SubTests) -> None: + assert isinstance(subtests, SubTests) + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_no_subtests_reports( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + pass + """ + ) + # Without `--no-subtests-reports`, subtests are reported normally. + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBPASS*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 1 passed, 5 subtests passed in*", + ] + ) + + # With `--no-subtests-reports`, passing subtests are no longer reported. + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 1 passed in*", + ] + ) + result.stdout.no_fnmatch_line("*SUBPASS*") + + # Rewrite the test file so the tests fail. Even with the flag, failed subtests are still reported. + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + assert False + """ + ) + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBFAIL*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 5 failed, 1 passed in*", + ] + ) + + +class TestSubTest: + """ + Test Test.subTest functionality. + """ + + @pytest.fixture + def simple_script(self, pytester: pytest.Pytester) -> Path: + return pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + self.assertEqual(i % 2, 0) + + if __name__ == '__main__': + main() + """ + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_simple_terminal_normal( + self, + simple_script: Path, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + suffix = ".test_foo" if IS_PY311 else "" + if runner == "unittest": + result = pytester.run(sys.executable, simple_script) + result.stderr.fnmatch_lines( + [ + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", + "AssertionError: 1 != 0", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", + "AssertionError: 1 != 0", + "Ran 1 test in *", + "FAILED (failures=2)", + ] + ) + else: + if runner == "pytest-normal": + result = pytester.runpytest(simple_script) + expected_lines = ["collected 1 item"] + else: + assert runner == "pytest-xdist" + pytest.importorskip("xdist") + result = pytester.runpytest(simple_script, "-n1") + expected_lines = ["1 worker [1 item]"] + result.stdout.fnmatch_lines( + expected_lines + + [ + "* T.test_foo [[]custom[]] (i=1) *", + "E * AssertionError: 1 != 0", + "* T.test_foo [[]custom[]] (i=3) *", + "E * AssertionError: 1 != 0", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_simple_terminal_verbose( + self, + simple_script: Path, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + suffix = ".test_foo" if IS_PY311 else "" + if runner == "unittest": + result = pytester.run(sys.executable, simple_script, "-v") + result.stderr.fnmatch_lines( + [ + f"test_foo (__main__.T{suffix}) ... ", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", + "AssertionError: 1 != 0", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", + "AssertionError: 1 != 0", + "Ran 1 test in *", + "FAILED (failures=2)", + ] + ) + else: + if runner == "pytest-normal": + result = pytester.runpytest(simple_script, "-v") + expected_lines = [ + "*collected 1 item", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", + ] + else: + assert runner == "pytest-xdist" + pytest.importorskip("xdist") + result = pytester.runpytest(simple_script, "-n1", "-v") + expected_lines = [ + "1 worker [1 item]", + "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", + ] + result.stdout.fnmatch_lines( + expected_lines + + [ + "* T.test_foo [[]custom[]] (i=1) *", + "E * AssertionError: 1 != 0", + "* T.test_foo [[]custom[]] (i=3) *", + "E * AssertionError: 1 != 0", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip( + self, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + p = pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + self.skipTest('even number') + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + @pytest.mark.xfail(reason="Not producing the expected results (#5)") + def test_xfail( + self, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + @expectedFailure + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + raise pytest.xfail('even number') + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) + else: + result = pytester.runpytest(p) + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 xfailed, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["pytest-normal"]) + def test_only_original_skip_is_called( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["pytest-normal"], + ) -> None: + """Regression test for #173.""" + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import unittest + from unittest import TestCase, main + + @unittest.skip("skip this test") + class T(unittest.TestCase): + def test_foo(self): + assert 1 == 2 + + if __name__ == '__main__': + main() + """ + ) + result = pytester.runpytest(p, "-v", "-rsf") + result.stdout.fnmatch_lines( + ["SKIPPED [1] test_only_original_skip_is_called.py:6: skip this test"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=4\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=4\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP \(skip subtest i=0\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP \(skip subtest i=3\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", + "test_skip_with_failure.py::T::test_foo PASSED .*", + r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", + r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", + r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 4 < 4", + r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 9 < 4", + r".* 6 failed, 1 passed, 4 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure_and_non_subskip( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + self.skipTest(f"skip the test") + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=5\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=5\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + # The `(i=0)` is not correct but it's given by pytest `TerminalReporter` without `--no-fold-skipped` + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip subtest i=3", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip the test", + r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + # Check with `--no-fold-skipped` (which gives the correct information). + if sys.version_info >= (3, 10) and pytest.version_tuple[:2] >= (8, 3): + result = pytester.runpytest(p, "-v", "--no-fold-skipped", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", + r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip subtest i=3", + r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", + r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + +class TestCapture: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import sys + def test(subtests): + print() + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + assert 0 + + with subtests.test(i='B'): + print("hello stdout B") + print("hello stderr B", file=sys.stderr) + assert 0 + + print('end test') + assert 0 + """ + ) + + def test_capturing(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*__ test (i='A') __*", + "*Captured stdout call*", + "hello stdout A", + "*Captured stderr call*", + "hello stderr A", + "*__ test (i='B') __*", + "*Captured stdout call*", + "hello stdout B", + "*Captured stderr call*", + "hello stderr B", + "*__ test __*", + "*Captured stdout call*", + "start test", + "end test", + ] + ) + + def test_no_capture(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("-s") + result.stdout.fnmatch_lines( + [ + "start test", + "hello stdout A", + "uhello stdout B", + "uend test", + "*__ test (i='A') __*", + "*__ test (i='B') __*", + "*__ test __*", + ] + ) + result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) + + @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) + def test_capture_with_fixture( + self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"] + ) -> None: + pytester.makepyfile( + rf""" + import sys + + def test(subtests, {fixture}): + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + + out, err = {fixture}.readouterr() + assert out == 'start test\nhello stdout A\n' + assert err == 'hello stderr A\n' + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + +class TestLogging: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test_foo(subtests): + logging.info("before") + + with subtests.test("sub1"): + print("sub1 stdout") + logging.info("sub1 logging") + + with subtests.test("sub2"): + print("sub2 stdout") + logging.info("sub2 logging") + assert False + """ + ) + + def test_capturing(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("--log-level=INFO") + result.stdout.fnmatch_lines( + [ + "*___ test_foo [[]sub2[]] __*", + "*-- Captured stdout call --*", + "sub2 stdout", + "*-- Captured log call ---*", + "INFO root:test_capturing.py:12 sub2 logging", + "*== short test summary info ==*", + ] + ) + + def test_caplog(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests, caplog): + caplog.set_level(logging.INFO) + logging.info("start test") + + with subtests.test("sub1"): + logging.info("inside %s", "subtest1") + + assert len(caplog.records) == 2 + assert caplog.records[0].getMessage() == "start test" + assert caplog.records[1].getMessage() == "inside subtest1" + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + def test_no_logging(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests): + logging.info("start log line") + + with subtests.test("sub passing"): + logging.info("inside %s", "passing log line") + + with subtests.test("sub failing"): + logging.info("inside %s", "failing log line") + assert False + + logging.info("end log line") + """ + ) + result = pytester.runpytest("-p no:logging") + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") + + +class TestDebugging: + """Check --pdb support for subtests fixture and TestCase.subTest.""" + + class _FakePdb: + """ + Fake debugger class implementation that tracks which methods were called on it. + """ + + quitting: bool = False + calls: list[str] = [] + + def __init__(self, *_: object, **__: object) -> None: + self.calls.append("init") + + def reset(self) -> None: + self.calls.append("reset") + + def interaction(self, *_: object) -> None: + self.calls.append("interaction") + + @pytest.fixture(autouse=True) + def cleanup_calls(self) -> None: + self._FakePdb.calls.clear() + + def test_pdb_fixture( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + def test(subtests): + with subtests.test(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def test_pdb_unittest( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + from unittest import TestCase + class Test(TestCase): + def test(self): + with self.subTest(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def runpytest_and_check_pdb( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + # Install the fake pdb implementation in _pytest.subtests so we can reference + # it in the command line (any module would do). + import _pytest.subtests + + monkeypatch.setattr( + _pytest.subtests, "_CustomPdb", self._FakePdb, raising=False + ) + result = pytester.runpytest("--pdb", "--pdbcls=_pytest.subtests:_CustomPdb") + + # Ensure pytest entered in debugging mode when encountering the failing + # assert. + result.stdout.fnmatch_lines("*entering PDB*") + assert self._FakePdb.calls == ["init", "reset", "interaction"] + + +def test_exitfirst(pytester: pytest.Pytester) -> None: + """ + Validate that when passing --exitfirst the test exits after the first failed subtest. + """ + pytester.makepyfile( + """ + def test_foo(subtests): + with subtests.test("sub1"): + assert False + + with subtests.test("sub2"): + assert False + """ + ) + result = pytester.runpytest("--exitfirst") + assert result.parseoutcomes()["failed"] == 2 + result.stdout.fnmatch_lines( + [ + "*[[]sub1[]] SUBFAIL test_exitfirst.py::test_foo - assert False*", + "FAILED test_exitfirst.py::test_foo - assert False", + "* stopping after 2 failures*", + ], + consecutive=True, + ) + result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. From e856b0b9eb6a4073397add5978c86b9cc5fc780e Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 20 Sep 2025 08:48:09 -0300 Subject: [PATCH 2/6] subtests: remove direct pytest import --- src/_pytest/subtests.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index d4be44f2d4e..3a6502d6718 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -21,14 +21,19 @@ from _pytest.capture import CaptureFixture from _pytest.capture import FDCapture from _pytest.capture import SysCapture +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import Parser +from _pytest.fixtures import fixture from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs from _pytest.logging import LogCaptureHandler +from _pytest.nodes import Item from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception from _pytest.unittest import TestCaseFunction -import pytest +from _pytest.warning_types import PytestDeprecationWarning if TYPE_CHECKING: @@ -36,7 +41,7 @@ from typing import Literal -def pytest_addoption(parser: pytest.Parser) -> None: +def pytest_addoption(parser: Parser) -> None: group = parser.getgroup("subtests") group.addoption( "--no-subtests-shortletter", @@ -174,7 +179,7 @@ def _addSubTest( self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] -def pytest_configure(config: pytest.Config) -> None: +def pytest_configure(config: Config) -> None: TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] TestCaseFunction.failfast = False # type: ignore[attr-defined] # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a @@ -217,7 +222,7 @@ def pytest_unconfigure() -> None: del TestCaseFunction._originaladdSkip -@pytest.fixture +@fixture def subtests(request: SubRequest) -> Generator[SubTests, None, None]: """Provides subtests functionality.""" capmam = request.node.config.pluginmanager.get_plugin("capturemanager") @@ -235,7 +240,7 @@ class SubTests: request: SubRequest = attr.ib() @property - def item(self) -> pytest.Item: + def item(self) -> Item: return self.request.node def test( @@ -414,7 +419,7 @@ def ignore_pytest_private_warning() -> Generator[None, None, None]: warnings.filterwarnings( "ignore", "A private pytest class or function was used.", - category=pytest.PytestDeprecationWarning, + category=PytestDeprecationWarning, ) yield @@ -424,7 +429,7 @@ class Captured: out = attr.ib(default="", type=str) err = attr.ib(default="", type=str) - def update_report(self, report: pytest.TestReport) -> None: + def update_report(self, report: TestReport) -> None: if self.out: report.sections.append(("Captured stdout call", self.out)) if self.err: @@ -435,16 +440,16 @@ class CapturedLogs: def __init__(self, handler: LogCaptureHandler) -> None: self._handler = handler - def update_report(self, report: pytest.TestReport) -> None: + def update_report(self, report: TestReport) -> None: report.sections.append(("Captured log call", self._handler.stream.getvalue())) class NullCapturedLogs: - def update_report(self, report: pytest.TestReport) -> None: + def update_report(self, report: TestReport) -> None: pass -def pytest_report_to_serializable(report: pytest.TestReport) -> dict[str, Any] | None: +def pytest_report_to_serializable(report: TestReport) -> dict[str, Any] | None: if isinstance(report, SubTestReport): return report._to_json() return None @@ -456,10 +461,10 @@ def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | Non return None -@pytest.hookimpl(tryfirst=True) +@hookimpl(tryfirst=True) def pytest_report_teststatus( - report: pytest.TestReport, - config: pytest.Config, + report: TestReport, + config: Config, ) -> tuple[str, str, str | Mapping[str, bool]] | None: if report.when != "call" or not isinstance(report, SubTestReport): return None From 596a4dcfe3cac15c693fc08f56886347cb3d1578 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Mon, 22 Sep 2025 20:04:51 -0300 Subject: [PATCH 3/6] Force using xdist plugin and fix linting --- src/_pytest/subtests.py | 19 ++++++------- testing/test_subtests.py | 61 ++++++++++++++++++++-------------------- 2 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 3a6502d6718..8cc9b0448f9 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -4,13 +4,13 @@ from collections.abc import Generator from collections.abc import Iterator from collections.abc import Mapping +from contextlib import AbstractContextManager from contextlib import contextmanager from contextlib import ExitStack from contextlib import nullcontext import sys import time from typing import Any -from typing import ContextManager from typing import TYPE_CHECKING from unittest import TestCase @@ -23,12 +23,11 @@ from _pytest.capture import SysCapture from _pytest.config import Config from _pytest.config import hookimpl -from _pytest.config import Parser +from _pytest.config.argparsing import Parser from _pytest.fixtures import fixture from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs from _pytest.logging import LogCaptureHandler -from _pytest.nodes import Item from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception @@ -85,7 +84,7 @@ def sub_test_description(self) -> str: parts.append(f"({params_desc})") return " ".join(parts) or "()" - def _to_json(self) -> dict: + def _to_json(self) -> dict[str, Any]: data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" @@ -236,11 +235,11 @@ def subtests(request: SubRequest) -> Generator[SubTests, None, None]: @attr.s class SubTests: ihook: pluggy.HookRelay = attr.ib() - suspend_capture_ctx: Callable[[], ContextManager] = attr.ib() + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] = attr.ib() request: SubRequest = attr.ib() @property - def item(self) -> Item: + def item(self) -> Any: return self.request.node def test( @@ -282,7 +281,7 @@ class _SubTestContextManager: ihook: pluggy.HookRelay msg: str | None kwargs: dict[str, Any] - suspend_capture_ctx: Callable[[], ContextManager] + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] request: SubRequest def __enter__(self) -> None: @@ -302,8 +301,8 @@ def __enter__(self) -> None: def __exit__( self, - exc_type: type[Exception] | None, - exc_val: Exception | None, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool: __tracebackhide__ = True @@ -352,7 +351,7 @@ def make_call_info( stop: float, duration: float, when: Literal["collect", "setup", "call", "teardown"], -) -> CallInfo: +) -> CallInfo[Any]: return CallInfo( None, exc_info, diff --git a/testing/test_subtests.py b/testing/test_subtests.py index 4bc48451a27..e729ec1ba6d 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -12,9 +12,7 @@ @pytest.mark.parametrize("mode", ["normal", "xdist"]) class TestFixture: - """ - Tests for ``subtests`` fixture. - """ + """Tests for ``subtests`` fixture.""" @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> None: @@ -39,7 +37,7 @@ def test_simple_terminal_normal( else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += [ @@ -69,7 +67,7 @@ def test_simple_terminal_verbose( else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1", "-v") + result = pytester.runpytest("-n1", "-v", "-pxdist.plugin") expected_lines = [ "1 worker [1 item]", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", @@ -106,7 +104,7 @@ def test_foo(subtests): else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] result.stdout.fnmatch_lines(expected_lines) @@ -130,7 +128,7 @@ def test_foo(subtests): else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 2 subtests passed, 3 subtests xfailed in *"] result.stdout.fnmatch_lines(expected_lines) @@ -152,7 +150,7 @@ def test_typing_exported(subtests: SubTests) -> None: else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed *"] result.stdout.fnmatch_lines(expected_lines) @@ -215,9 +213,7 @@ def test_foo(subtests): class TestSubTest: - """ - Test Test.subTest functionality. - """ + """Test.subTest functionality.""" @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> Path: @@ -264,11 +260,11 @@ def test_simple_terminal_normal( else: assert runner == "pytest-xdist" pytest.importorskip("xdist") - result = pytester.runpytest(simple_script, "-n1") + result = pytester.runpytest(simple_script, "-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] result.stdout.fnmatch_lines( - expected_lines - + [ + [ + *expected_lines, "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", @@ -310,7 +306,9 @@ def test_simple_terminal_verbose( else: assert runner == "pytest-xdist" pytest.importorskip("xdist") - result = pytester.runpytest(simple_script, "-n1", "-v") + result = pytester.runpytest( + simple_script, "-n1", "-v", "-pxdist.plugin" + ) expected_lines = [ "1 worker [1 item]", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", @@ -318,8 +316,8 @@ def test_simple_terminal_verbose( "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", ] result.stdout.fnmatch_lines( - expected_lines - + [ + [ + *expected_lines, "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", @@ -470,15 +468,19 @@ def test_foo(self): result = pytester.runpytest(p, "-v", "-rsf") result.stdout.re_match_lines( [ - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP \(skip subtest i=0\) .*", - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP \(skip subtest i=3\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP" + r" \(skip subtest i=0\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP" + r" \(skip subtest i=3\) .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", "test_skip_with_failure.py::T::test_foo PASSED .*", r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", - r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 4 < 4", - r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 9 < 4", + r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo" + r" - AssertionError: assert 4 < 4", + r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo" + r" - AssertionError: assert 9 < 4", r".* 6 failed, 1 passed, 4 skipped in .*", ] ) @@ -542,8 +544,10 @@ def test_foo(self): [ r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", - r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip subtest i=3", - r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip the test", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r" skip subtest i=3", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r" skip the test", r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", ] @@ -555,7 +559,8 @@ def test_foo(self): [ r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", - r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip subtest i=3", + r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo" + r" - Skipped: skip subtest i=3", r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", @@ -748,9 +753,7 @@ class TestDebugging: """Check --pdb support for subtests fixture and TestCase.subTest.""" class _FakePdb: - """ - Fake debugger class implementation that tracks which methods were called on it. - """ + """Fake debugger class implementation that tracks which methods were called on it.""" quitting: bool = False calls: list[str] = [] @@ -813,9 +816,7 @@ def runpytest_and_check_pdb( def test_exitfirst(pytester: pytest.Pytester) -> None: - """ - Validate that when passing --exitfirst the test exits after the first failed subtest. - """ + """Validate that when passing --exitfirst the test exits after the first failed subtest.""" pytester.makepyfile( """ def test_foo(subtests): From 8f588512643e7337f1fbbcca31811fc32c64101f Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Mon, 22 Sep 2025 20:07:39 -0300 Subject: [PATCH 4/6] Replace attr by dataclass --- src/_pytest/subtests.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 8cc9b0448f9..5d5816e168f 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -8,13 +8,13 @@ from contextlib import contextmanager from contextlib import ExitStack from contextlib import nullcontext +import dataclasses import sys import time from typing import Any from typing import TYPE_CHECKING from unittest import TestCase -import attr import pluggy from _pytest._code import ExceptionInfo @@ -58,15 +58,15 @@ def pytest_addoption(parser: Parser) -> None: ) -@attr.s +@dataclasses.dataclass class SubTestContext: - msg: str | None = attr.ib() - kwargs: dict[str, Any] = attr.ib() + msg: str | None + kwargs: dict[str, Any] -@attr.s(init=False) +@dataclasses.dataclass(init=False) class SubTestReport(TestReport): # type: ignore[misc] - context: SubTestContext = attr.ib() + context: SubTestContext @property def head_line(self) -> str: @@ -88,7 +88,7 @@ def _to_json(self) -> dict[str, Any]: data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" - data["_subtest.context"] = attr.asdict(self.context) + data["_subtest.context"] = dataclasses.asdict(self.context) return data @classmethod @@ -232,11 +232,11 @@ def subtests(request: SubRequest) -> Generator[SubTests, None, None]: yield SubTests(request.node.ihook, suspend_capture_ctx, request) -@attr.s +@dataclasses.dataclass class SubTests: - ihook: pluggy.HookRelay = attr.ib() - suspend_capture_ctx: Callable[[], AbstractContextManager[None]] = attr.ib() - request: SubRequest = attr.ib() + ihook: pluggy.HookRelay + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] + request: SubRequest @property def item(self) -> Any: @@ -267,7 +267,7 @@ def test( ) -@attr.s(auto_attribs=True) +@dataclasses.dataclass class _SubTestContextManager: """ Context manager for subtests, capturing exceptions raised inside the subtest scope and handling @@ -423,10 +423,10 @@ def ignore_pytest_private_warning() -> Generator[None, None, None]: yield -@attr.s +@dataclasses.dataclass() class Captured: - out = attr.ib(default="", type=str) - err = attr.ib(default="", type=str) + out: str = "" + err: str = "" def update_report(self, report: TestReport) -> None: if self.out: From 40b6cb2eaf6aa4a2a5d94e256fe23310983725c4 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 26 Sep 2025 09:48:51 -0300 Subject: [PATCH 5/6] Add docs --- changelog/1367.feature.rst | 21 ++++++++ doc/en/how-to/index.rst | 1 + doc/en/how-to/parametrize.rst | 6 +++ doc/en/how-to/subtests.rst | 88 ++++++++++++++++++++++++++++++++++ doc/en/how-to/unittest.rst | 13 ++--- doc/en/reference/fixtures.rst | 3 ++ doc/en/reference/reference.rst | 13 +++++ src/_pytest/deprecated.py | 1 + src/_pytest/subtests.py | 44 ++++++++++++----- testing/test_subtests.py | 8 ++-- 10 files changed, 173 insertions(+), 25 deletions(-) create mode 100644 changelog/1367.feature.rst create mode 100644 doc/en/how-to/subtests.rst diff --git a/changelog/1367.feature.rst b/changelog/1367.feature.rst new file mode 100644 index 00000000000..83aa65254c8 --- /dev/null +++ b/changelog/1367.feature.rst @@ -0,0 +1,21 @@ +**Support for subtests** has been added. + +:ref:`subtests ` are an alternative to parametrization, useful in situations where test setup is expensive or the parametrization values are not all known at collection time. + +**Example** + +.. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + + +Each assert failure or error is caught by the context manager and reported individually. + +In addition, :meth:`unittest.TestCase.subTest` is now also supported. + +.. note:: + + This feature is experimental and will likely evolve in future releases. By that we mean that we might change how subtests are reported on failure, but the functionality and how to use it are stable. diff --git a/doc/en/how-to/index.rst b/doc/en/how-to/index.rst index 225f289651e..9796f1f8090 100644 --- a/doc/en/how-to/index.rst +++ b/doc/en/how-to/index.rst @@ -16,6 +16,7 @@ Core pytest functionality fixtures mark parametrize + subtests tmp_path monkeypatch doctest diff --git a/doc/en/how-to/parametrize.rst b/doc/en/how-to/parametrize.rst index fe186146434..5c39358d32a 100644 --- a/doc/en/how-to/parametrize.rst +++ b/doc/en/how-to/parametrize.rst @@ -20,6 +20,11 @@ pytest enables test parametrization at several levels: * `pytest_generate_tests`_ allows one to define custom parametrization schemes or extensions. + +.. note:: + + See :ref:`subtests` for an alternative to parametrization. + .. _parametrizemark: .. _`@pytest.mark.parametrize`: @@ -194,6 +199,7 @@ To get all combinations of multiple parametrized arguments you can stack This will run the test with the arguments set to ``x=0/y=2``, ``x=1/y=2``, ``x=0/y=3``, and ``x=1/y=3`` exhausting parameters in the order of the decorators. + .. _`pytest_generate_tests`: Basic ``pytest_generate_tests`` example diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst new file mode 100644 index 00000000000..ad3af0ea531 --- /dev/null +++ b/doc/en/how-to/subtests.rst @@ -0,0 +1,88 @@ +.. _subtests: + +How to use subtests +=================== + +.. versionadded:: 9.0 + +.. note:: + + This feature is experimental. Its behavior, particularly how failures are reported, may evolve in future releases. However, the core functionality and usage are considered stable. + +pytest allows for grouping assertions within a normal test, known as *subtests*. + +Subtests are an alternative to parametrization, particularly useful when test setup is expensive or when the exact parametrization values are not known at collection time. + + +.. code-block:: python + + # content of test_subtest.py + + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + +Each assertion failure or error is caught by the context manager and reported individually: + +.. code-block:: pytest + + $ pytest -q test_subtest.py + + +Note that it is possible to use ``subtests`` multiple times in the same test, or even mix and match with normal assertions +outside the ``subtests.test`` block: + +.. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test(msg="stage 1", i=i): + assert i % 2 == 0 + + assert func() == 10 + + for i in range(10, 20): + with subtests.test(msg="stage 2", i=i): + assert i % 2 == 0 + +.. note:: + + See :ref:`parametrize` for an alternative to subtests. + + +Typing +------ + +:class:`pytest.SubTests` is exported so it can be used in type annotations: + +.. code-block:: python + + def test(subtests: pytest.SubTests) -> None: ... + +.. _parametrize_vs_subtests: + +Parametrization vs Subtests +--------------------------- + +While :ref:`traditional pytest parametrization ` and ``subtests`` are similar, they have important differences and use cases. + + +Parametrization +~~~~~~~~~~~~~~~ + +* Happens at collection time. +* Generates individual tests. +* Parametrized tests can be referenced from the command line. +* Plays well with plugins that handle test execution, such as ``--last-failed``. +* Ideal for decision table testing. + +Subtests +~~~~~~~~ + +* Happen during test execution. +* Are not known at collection time. +* Can be generated dynamically. +* Cannot be referenced individually from the command line. +* Plugins that handle test execution cannot target individual subtests. diff --git a/doc/en/how-to/unittest.rst b/doc/en/how-to/unittest.rst index ba98b366d04..a8c56c266bd 100644 --- a/doc/en/how-to/unittest.rst +++ b/doc/en/how-to/unittest.rst @@ -22,17 +22,14 @@ their ``test`` methods in ``test_*.py`` or ``*_test.py`` files. Almost all ``unittest`` features are supported: -* ``@unittest.skip`` style decorators; -* ``setUp/tearDown``; -* ``setUpClass/tearDownClass``; -* ``setUpModule/tearDownModule``; +* :func:`unittest.skip`/:func:`unittest.skipIf` style decorators +* :meth:`unittest.TestCase.setUp`/:meth:`unittest.TestCase.tearDown` +* :meth:`unittest.TestCase.setUpClass`/:meth:`unittest.TestCase.tearDownClass` +* :func:`unittest.setUpModule`/:func:`unittest.tearDownModule` +* :meth:`unittest.TestCase.subTest` (since version ``9.0``) -.. _`pytest-subtests`: https://github.com/pytest-dev/pytest-subtests .. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol -Additionally, :ref:`subtests ` are supported by the -`pytest-subtests`_ plugin. - Up to this point pytest does not have support for the following features: * `load_tests protocol`_; diff --git a/doc/en/reference/fixtures.rst b/doc/en/reference/fixtures.rst index 566304d3330..02e235ceb9e 100644 --- a/doc/en/reference/fixtures.rst +++ b/doc/en/reference/fixtures.rst @@ -52,6 +52,9 @@ Built-in fixtures :fixture:`pytestconfig` Access to configuration values, pluginmanager and plugin hooks. + :fixture:`subtests` + Enable declaring subtests inside test functions. + :fixture:`record_property` Add extra properties to the test. diff --git a/doc/en/reference/reference.rst b/doc/en/reference/reference.rst index 7ec1b110baf..8d605841ba0 100644 --- a/doc/en/reference/reference.rst +++ b/doc/en/reference/reference.rst @@ -572,6 +572,19 @@ The ``request`` fixture is a special fixture providing information of the reques :members: +.. fixture:: subtests + +subtests +~~~~~~~~ + +The ``subtests`` fixture enables declaring subtests inside test functions. + +**Tutorial**: :ref:`subtests` + +.. autoclass:: pytest.SubTests() + :members: + + .. fixture:: testdir testdir diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index a605c24e58f..778d3614715 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -24,6 +24,7 @@ "pytest_catchlog", "pytest_capturelog", "pytest_faulthandler", + "pytest_subtests", } diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 5d5816e168f..6700df297c6 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -232,15 +232,24 @@ def subtests(request: SubRequest) -> Generator[SubTests, None, None]: yield SubTests(request.node.ihook, suspend_capture_ctx, request) -@dataclasses.dataclass +# Note: cannot use a dataclass here because Sphinx insists on showing up the __init__ method in the documentation, +# even if we explicitly use :exclude-members: __init__. class SubTests: - ihook: pluggy.HookRelay - suspend_capture_ctx: Callable[[], AbstractContextManager[None]] - request: SubRequest + """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" + + def __init__( + self, + ihook: pluggy.HookRelay, + suspend_capture_ctx: Callable[[], AbstractContextManager[None]], + request: SubRequest, + ) -> None: + self._ihook = ihook + self._suspend_capture_ctx = suspend_capture_ctx + self._request = request @property def item(self) -> Any: - return self.request.node + return self._request.node def test( self, @@ -248,22 +257,31 @@ def test( **kwargs: Any, ) -> _SubTestContextManager: """ - Context manager for subtests, capturing exceptions raised inside the subtest scope and handling - them through the pytest machinery. + Context manager for subtests, capturing exceptions raised inside the subtest scope and + reporting assertion failures and errors individually. - Usage: + Usage + ----- .. code-block:: python - with subtests.test(msg="subtest"): - assert 1 == 1 + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + + :param msg: + If given, the message will be shown in the test report in case of subtest failure. + + :param kwargs: + Arbitrary values that are also added to the subtest report. """ return _SubTestContextManager( - self.ihook, + self._ihook, msg, kwargs, - request=self.request, - suspend_capture_ctx=self.suspend_capture_ctx, + request=self._request, + suspend_capture_ctx=self._suspend_capture_ctx, ) diff --git a/testing/test_subtests.py b/testing/test_subtests.py index e729ec1ba6d..fa32c076813 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -352,14 +352,14 @@ def test_foo(self): result = pytester.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) else: - pytest.xfail("Not producing the expected results (#5)") + pytest.xfail("Not producing the expected results (#13756)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) - @pytest.mark.xfail(reason="Not producing the expected results (#5)") + @pytest.mark.xfail(reason="Not producing the expected results (#13756)") def test_xfail( self, pytester: pytest.Pytester, @@ -485,7 +485,7 @@ def test_foo(self): ] ) else: - pytest.xfail("Not producing the expected results (#5)") + pytest.xfail("Not producing the expected results (#13756)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] @@ -567,7 +567,7 @@ def test_foo(self): ] ) else: - pytest.xfail("Not producing the expected results (#5)") + pytest.xfail("Not producing the expected results (#13756)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] From b569c93096d131b96ab23d497810ddc8ae48af03 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 26 Sep 2025 10:34:30 -0300 Subject: [PATCH 6/6] Cleanup internal hacks --- src/_pytest/reports.py | 1 - src/_pytest/subtests.py | 157 +++------------------------------------ src/_pytest/terminal.py | 5 ++ src/_pytest/unittest.py | 109 +++++++++++++++++++++++++-- testing/test_subtests.py | 2 +- 5 files changed, 120 insertions(+), 154 deletions(-) diff --git a/src/_pytest/reports.py b/src/_pytest/reports.py index fb0607bfb95..8deed3be79e 100644 --- a/src/_pytest/reports.py +++ b/src/_pytest/reports.py @@ -251,7 +251,6 @@ def _report_unserialization_failure( raise RuntimeError(stream.getvalue()) -@final class TestReport(BaseReport): """Basic test report object (also used for setup and teardown calls if they fail). diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 6700df297c6..c5a3bedfa90 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -9,11 +9,9 @@ from contextlib import ExitStack from contextlib import nullcontext import dataclasses -import sys import time from typing import Any from typing import TYPE_CHECKING -from unittest import TestCase import pluggy @@ -31,8 +29,6 @@ from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception -from _pytest.unittest import TestCaseFunction -from _pytest.warning_types import PytestDeprecationWarning if TYPE_CHECKING: @@ -60,12 +56,14 @@ def pytest_addoption(parser: Parser) -> None: @dataclasses.dataclass class SubTestContext: + """The values passed to SubTests.test() that are included in the test report.""" + msg: str | None kwargs: dict[str, Any] @dataclasses.dataclass(init=False) -class SubTestReport(TestReport): # type: ignore[misc] +class SubTestReport(TestReport): context: SubTestContext @property @@ -105,122 +103,6 @@ def _from_test_report(cls, test_report: TestReport) -> SubTestReport: return super()._from_json(test_report._to_json()) -def _addSkip(self: TestCaseFunction, testcase: TestCase, reason: str) -> None: - from unittest.case import _SubTest # type: ignore[attr-defined] - - if isinstance(testcase, _SubTest): - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - if self._excinfo is not None: - exc_info = self._excinfo[-1] - self.addSubTest(testcase.test_case, testcase, exc_info) # type: ignore[attr-defined] - else: - # For python < 3.11: the non-subtest skips have to be added by `_originaladdSkip` only after all subtest - # failures are processed by `_addSubTest`. (`self.instance._outcome` has no attribute `skipped/errors` anymore.) - # For python < 3.11, we also need to check if `self.instance._outcome` is `None` (this happens if the test - # class/method is decorated with `unittest.skip`, see #173). - if sys.version_info < (3, 11) and self.instance._outcome is not None: - subtest_errors = [ - x - for x, y in self.instance._outcome.errors - if isinstance(x, _SubTest) and y is not None - ] - if len(subtest_errors) == 0: - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - else: - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - - -def _addSubTest( - self: TestCaseFunction, - test_case: Any, - test: TestCase, - exc_info: tuple[type[BaseException], BaseException, TracebackType] | None, -) -> None: - msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] - call_info = make_call_info( - ExceptionInfo(exc_info, _ispytest=True) if exc_info else None, - start=0, - stop=0, - duration=0, - when="call", - ) - report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) - sub_report = SubTestReport._from_test_report(report) - sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] - self.ihook.pytest_runtest_logreport(report=sub_report) - if check_interactive_exception(call_info, sub_report): - self.ihook.pytest_exception_interact( - node=self, call=call_info, report=sub_report - ) - - # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. - if sys.version_info < (3, 11): - from unittest.case import _SubTest # type: ignore[attr-defined] - - non_subtest_skip = [ - (x, y) - for x, y in self.instance._outcome.skipped - if not isinstance(x, _SubTest) - ] - subtest_errors = [ - (x, y) - for x, y in self.instance._outcome.errors - if isinstance(x, _SubTest) and y is not None - ] - # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in - # `_addSubTest` and have to be added using `_originaladdSkip` after all subtest failures are processed. - if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: - # Make sure we have processed the last subtest failure - last_subset_error = subtest_errors[-1] - if exc_info is last_subset_error[-1]: - # Add non-subtest skips (as they could not be treated in `_addSkip`) - for testcase, reason in non_subtest_skip: - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - - -def pytest_configure(config: Config) -> None: - TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] - TestCaseFunction.failfast = False # type: ignore[attr-defined] - # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a - # parent python process where `addSkip` is already `_addSkip`. A such case is when running tests in - # `test_subtests.py` where `pytester.runpytest` is used. Without this guard condition, `_originaladdSkip` is - # assigned to `_addSkip` which is wrong as well as causing an infinite recursion in some cases. - if not hasattr(TestCaseFunction, "_originaladdSkip"): - TestCaseFunction._originaladdSkip = TestCaseFunction.addSkip # type: ignore[attr-defined] - TestCaseFunction.addSkip = _addSkip # type: ignore[method-assign] - - # Hack (#86): the terminal does not know about the "subtests" - # status, so it will by default turn the output to yellow. - # This forcibly adds the new 'subtests' status. - import _pytest.terminal - - new_types = tuple( - f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") - ) - # We need to check if we are not re-adding because we run our own tests - # with pytester in-process mode, so this will be called multiple times. - if new_types[0] not in _pytest.terminal.KNOWN_TYPES: - _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types # type: ignore[assignment] - - _pytest.terminal._color_for_type.update( - { - f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] - for outcome in ("passed", "failed", "skipped") - if outcome in _pytest.terminal._color_for_type - } - ) - - -def pytest_unconfigure() -> None: - if hasattr(TestCaseFunction, "addSubTest"): - del TestCaseFunction.addSubTest - if hasattr(TestCaseFunction, "failfast"): - del TestCaseFunction.failfast - if hasattr(TestCaseFunction, "_originaladdSkip"): - TestCaseFunction.addSkip = TestCaseFunction._originaladdSkip # type: ignore[method-assign] - del TestCaseFunction._originaladdSkip - - @fixture def subtests(request: SubRequest) -> Generator[SubTests, None, None]: """Provides subtests functionality.""" @@ -247,10 +129,6 @@ def __init__( self._suspend_capture_ctx = suspend_capture_ctx self._request = request - @property - def item(self) -> Any: - return self._request.node - def test( self, msg: str | None = None, @@ -293,7 +171,7 @@ class _SubTestContextManager: Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however it is not possible to control the output fully when exiting from it due to an exception when - in --exitfirst mode, so this was refactored into an explicit context manager class (#134). + in --exitfirst mode, so this was refactored into an explicit context manager class (pytest-dev/pytest-subtests#134). """ ihook: pluggy.HookRelay @@ -390,11 +268,9 @@ def capturing_output(request: SubRequest) -> Iterator[Captured]: capture_fixture_active = getattr(capman, "_capture_fixture", None) if option == "sys" and not capture_fixture_active: - with ignore_pytest_private_warning(): - fixture = CaptureFixture(SysCapture, request) + fixture = CaptureFixture(SysCapture, request, _ispytest=True) elif option == "fd" and not capture_fixture_active: - with ignore_pytest_private_warning(): - fixture = CaptureFixture(FDCapture, request) + fixture = CaptureFixture(FDCapture, request, _ispytest=True) else: fixture = None @@ -428,20 +304,7 @@ def capturing_logs( yield captured_logs -@contextmanager -def ignore_pytest_private_warning() -> Generator[None, None, None]: - import warnings - - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "A private pytest class or function was used.", - category=PytestDeprecationWarning, - ) - yield - - -@dataclasses.dataclass() +@dataclasses.dataclass class Captured: out: str = "" err: str = "" @@ -453,12 +316,12 @@ def update_report(self, report: TestReport) -> None: report.sections.append(("Captured stderr call", self.err)) +@dataclasses.dataclass class CapturedLogs: - def __init__(self, handler: LogCaptureHandler) -> None: - self._handler = handler + handler: LogCaptureHandler def update_report(self, report: TestReport) -> None: - report.sections.append(("Captured log call", self._handler.stream.getvalue())) + report.sections.append(("Captured log call", self.handler.stream.getvalue())) class NullCapturedLogs: diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index a95f79ba6b6..a96ba8e19ba 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -69,6 +69,9 @@ "xpassed", "warnings", "error", + "subtests passed", + "subtests failed", + "subtests skipped", ) _REPORTCHARS_DEFAULT = "fE" @@ -1559,6 +1562,8 @@ def _folded_skips( "error": "red", "warnings": "yellow", "passed": "green", + "subtests passed": "green", + "subtests failed": "red", } _color_for_type_default = "yellow" diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index 282f7b25680..1a45fbe9fd5 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -13,9 +13,13 @@ import sys import traceback import types +from typing import Any from typing import TYPE_CHECKING +from unittest import TestCase import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest.compat import assert_never from _pytest.compat import is_async_function from _pytest.config import hookimpl from _pytest.fixtures import FixtureRequest @@ -30,12 +34,17 @@ from _pytest.python import Function from _pytest.python import Module from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.subtests import make_call_info +from _pytest.subtests import SubTestContext +from _pytest.subtests import SubTestReport if sys.version_info[:2] < (3, 11): from exceptiongroup import ExceptionGroup if TYPE_CHECKING: + from types import TracebackType import unittest import twisted.trial.unittest @@ -200,6 +209,7 @@ def unittest_setup_method_fixture( class TestCaseFunction(Function): nofuncargs = True + failfast = False _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None def _getinstance(self): @@ -277,11 +287,42 @@ def addFailure( ) -> None: self._addexcinfo(rawexcinfo) - def addSkip(self, testcase: unittest.TestCase, reason: str) -> None: - try: - raise skip.Exception(reason, _use_item_location=True) - except skip.Exception: - self._addexcinfo(sys.exc_info()) + def addSkip( + self, testcase: unittest.TestCase, reason: str, *, handle_subtests: bool = True + ) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + def add_skip() -> None: + try: + raise skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + if not handle_subtests: + add_skip() + return + + if isinstance(testcase, _SubTest): + add_skip() + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) + else: + # For python < 3.11: the non-subtest skips have to be added by `add_skip` only after all subtest + # failures are processed by `_addSubTest`: `self.instance._outcome` has no attribute + # `skipped/errors` anymore. + # We also need to check if `self.instance._outcome` is `None` (this happens if the test + # class/method is decorated with `unittest.skip`, see pytest-dev/pytest-subtests#173). + if sys.version_info < (3, 11) and self.instance._outcome is not None: + subtest_errors = [ + x + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + if len(subtest_errors) == 0: + add_skip() + else: + add_skip() def addExpectedFailure( self, @@ -361,6 +402,64 @@ def _traceback_filter( ntraceback = traceback return ntraceback + def addSubTest( + self, + test_case: Any, + test: TestCase, + exc_info: ExceptionInfo[BaseException] + | tuple[type[BaseException], BaseException, TracebackType] + | None, + ) -> None: + exception_info: ExceptionInfo[BaseException] | None + match exc_info: + case tuple(): + exception_info = ExceptionInfo(exc_info, _ispytest=True) + case ExceptionInfo() | None: + exception_info = exc_info + case unreachable: + assert_never(unreachable) + + call_info = make_call_info( + exception_info, + start=0, + stop=0, + duration=0, + when="call", + ) + msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubTestReport._from_test_report(report) + sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] + self.ihook.pytest_runtest_logreport(report=sub_report) + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self, call=call_info, report=sub_report + ) + + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + from unittest.case import _SubTest # type: ignore[attr-defined] + + non_subtest_skip = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `add_skip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self.addSkip(testcase, reason, handle_subtests=False) + @hookimpl(tryfirst=True) def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: diff --git a/testing/test_subtests.py b/testing/test_subtests.py index fa32c076813..b4e591a20ee 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -398,7 +398,7 @@ def test_only_original_skip_is_called( monkeypatch: pytest.MonkeyPatch, runner: Literal["pytest-normal"], ) -> None: - """Regression test for #173.""" + """Regression test for pytest-dev/pytest-subtests#173.""" monkeypatch.setenv("COLUMNS", "200") p = pytester.makepyfile( """