From 3678b760356de19a0808d19a0cb7707635bb9f3b Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 20 Sep 2025 08:42:53 -0300 Subject: [PATCH 01/18] Add pytest-subtests files changes In addition, enable the plugin in `pytest/__init__.py` and `config/__init__.py`. --- src/_pytest/config/__init__.py | 1 + src/_pytest/subtests.py | 502 ++++++++++++++++++++ src/pytest/__init__.py | 2 + testing/test_subtests.py | 839 +++++++++++++++++++++++++++++++++ 4 files changed, 1344 insertions(+) create mode 100644 src/_pytest/subtests.py create mode 100644 testing/test_subtests.py diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 2af60fa9c3c..23c85f70d64 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -278,6 +278,7 @@ def directory_arg(path: str, optname: str) -> str: "logging", "reports", "faulthandler", + "subtests", ) builtin_plugins = { diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py new file mode 100644 index 00000000000..d4be44f2d4e --- /dev/null +++ b/src/_pytest/subtests.py @@ -0,0 +1,502 @@ +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterator +from collections.abc import Mapping +from contextlib import contextmanager +from contextlib import ExitStack +from contextlib import nullcontext +import sys +import time +from typing import Any +from typing import ContextManager +from typing import TYPE_CHECKING +from unittest import TestCase + +import attr +import pluggy + +from _pytest._code import ExceptionInfo +from _pytest.capture import CaptureFixture +from _pytest.capture import FDCapture +from _pytest.capture import SysCapture +from _pytest.fixtures import SubRequest +from _pytest.logging import catching_logs +from _pytest.logging import LogCaptureHandler +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.unittest import TestCaseFunction +import pytest + + +if TYPE_CHECKING: + from types import TracebackType + from typing import Literal + + +def pytest_addoption(parser: pytest.Parser) -> None: + group = parser.getgroup("subtests") + group.addoption( + "--no-subtests-shortletter", + action="store_true", + dest="no_subtests_shortletter", + default=False, + help="Disables subtest output 'dots' in non-verbose mode (EXPERIMENTAL)", + ) + group.addoption( + "--no-subtests-reports", + action="store_true", + dest="no_subtests_reports", + default=False, + help="Disables subtest output unless it's a failed subtest (EXPERIMENTAL)", + ) + + +@attr.s +class SubTestContext: + msg: str | None = attr.ib() + kwargs: dict[str, Any] = attr.ib() + + +@attr.s(init=False) +class SubTestReport(TestReport): # type: ignore[misc] + context: SubTestContext = attr.ib() + + @property + def head_line(self) -> str: + _, _, domain = self.location + return f"{domain} {self.sub_test_description()}" + + def sub_test_description(self) -> str: + parts = [] + if isinstance(self.context.msg, str): + parts.append(f"[{self.context.msg}]") + if self.context.kwargs: + params_desc = ", ".join( + f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) + ) + parts.append(f"({params_desc})") + return " ".join(parts) or "()" + + def _to_json(self) -> dict: + data = super()._to_json() + del data["context"] + data["_report_type"] = "SubTestReport" + data["_subtest.context"] = attr.asdict(self.context) + return data + + @classmethod + def _from_json(cls, reportdict: dict[str, Any]) -> SubTestReport: + report = super()._from_json(reportdict) + context_data = reportdict["_subtest.context"] + report.context = SubTestContext( + msg=context_data["msg"], kwargs=context_data["kwargs"] + ) + return report + + @classmethod + def _from_test_report(cls, test_report: TestReport) -> SubTestReport: + return super()._from_json(test_report._to_json()) + + +def _addSkip(self: TestCaseFunction, testcase: TestCase, reason: str) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + if isinstance(testcase, _SubTest): + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) # type: ignore[attr-defined] + else: + # For python < 3.11: the non-subtest skips have to be added by `_originaladdSkip` only after all subtest + # failures are processed by `_addSubTest`. (`self.instance._outcome` has no attribute `skipped/errors` anymore.) + # For python < 3.11, we also need to check if `self.instance._outcome` is `None` (this happens if the test + # class/method is decorated with `unittest.skip`, see #173). + if sys.version_info < (3, 11) and self.instance._outcome is not None: + subtest_errors = [ + x + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + if len(subtest_errors) == 0: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + else: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + + +def _addSubTest( + self: TestCaseFunction, + test_case: Any, + test: TestCase, + exc_info: tuple[type[BaseException], BaseException, TracebackType] | None, +) -> None: + msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] + call_info = make_call_info( + ExceptionInfo(exc_info, _ispytest=True) if exc_info else None, + start=0, + stop=0, + duration=0, + when="call", + ) + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubTestReport._from_test_report(report) + sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] + self.ihook.pytest_runtest_logreport(report=sub_report) + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self, call=call_info, report=sub_report + ) + + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + from unittest.case import _SubTest # type: ignore[attr-defined] + + non_subtest_skip = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `_originaladdSkip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + + +def pytest_configure(config: pytest.Config) -> None: + TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] + TestCaseFunction.failfast = False # type: ignore[attr-defined] + # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a + # parent python process where `addSkip` is already `_addSkip`. A such case is when running tests in + # `test_subtests.py` where `pytester.runpytest` is used. Without this guard condition, `_originaladdSkip` is + # assigned to `_addSkip` which is wrong as well as causing an infinite recursion in some cases. + if not hasattr(TestCaseFunction, "_originaladdSkip"): + TestCaseFunction._originaladdSkip = TestCaseFunction.addSkip # type: ignore[attr-defined] + TestCaseFunction.addSkip = _addSkip # type: ignore[method-assign] + + # Hack (#86): the terminal does not know about the "subtests" + # status, so it will by default turn the output to yellow. + # This forcibly adds the new 'subtests' status. + import _pytest.terminal + + new_types = tuple( + f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") + ) + # We need to check if we are not re-adding because we run our own tests + # with pytester in-process mode, so this will be called multiple times. + if new_types[0] not in _pytest.terminal.KNOWN_TYPES: + _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types # type: ignore[assignment] + + _pytest.terminal._color_for_type.update( + { + f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] + for outcome in ("passed", "failed", "skipped") + if outcome in _pytest.terminal._color_for_type + } + ) + + +def pytest_unconfigure() -> None: + if hasattr(TestCaseFunction, "addSubTest"): + del TestCaseFunction.addSubTest + if hasattr(TestCaseFunction, "failfast"): + del TestCaseFunction.failfast + if hasattr(TestCaseFunction, "_originaladdSkip"): + TestCaseFunction.addSkip = TestCaseFunction._originaladdSkip # type: ignore[method-assign] + del TestCaseFunction._originaladdSkip + + +@pytest.fixture +def subtests(request: SubRequest) -> Generator[SubTests, None, None]: + """Provides subtests functionality.""" + capmam = request.node.config.pluginmanager.get_plugin("capturemanager") + if capmam is not None: + suspend_capture_ctx = capmam.global_and_fixture_disabled + else: + suspend_capture_ctx = nullcontext + yield SubTests(request.node.ihook, suspend_capture_ctx, request) + + +@attr.s +class SubTests: + ihook: pluggy.HookRelay = attr.ib() + suspend_capture_ctx: Callable[[], ContextManager] = attr.ib() + request: SubRequest = attr.ib() + + @property + def item(self) -> pytest.Item: + return self.request.node + + def test( + self, + msg: str | None = None, + **kwargs: Any, + ) -> _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and handling + them through the pytest machinery. + + Usage: + + .. code-block:: python + + with subtests.test(msg="subtest"): + assert 1 == 1 + """ + return _SubTestContextManager( + self.ihook, + msg, + kwargs, + request=self.request, + suspend_capture_ctx=self.suspend_capture_ctx, + ) + + +@attr.s(auto_attribs=True) +class _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and handling + them through the pytest machinery. + + Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however + it is not possible to control the output fully when exiting from it due to an exception when + in --exitfirst mode, so this was refactored into an explicit context manager class (#134). + """ + + ihook: pluggy.HookRelay + msg: str | None + kwargs: dict[str, Any] + suspend_capture_ctx: Callable[[], ContextManager] + request: SubRequest + + def __enter__(self) -> None: + __tracebackhide__ = True + + self._start = time.time() + self._precise_start = time.perf_counter() + self._exc_info = None + + self._exit_stack = ExitStack() + self._captured_output = self._exit_stack.enter_context( + capturing_output(self.request) + ) + self._captured_logs = self._exit_stack.enter_context( + capturing_logs(self.request) + ) + + def __exit__( + self, + exc_type: type[Exception] | None, + exc_val: Exception | None, + exc_tb: TracebackType | None, + ) -> bool: + __tracebackhide__ = True + try: + if exc_val is not None: + exc_info = ExceptionInfo.from_exception(exc_val) + else: + exc_info = None + finally: + self._exit_stack.close() + + precise_stop = time.perf_counter() + duration = precise_stop - self._precise_start + stop = time.time() + + call_info = make_call_info( + exc_info, start=self._start, stop=stop, duration=duration, when="call" + ) + report = self.ihook.pytest_runtest_makereport( + item=self.request.node, call=call_info + ) + sub_report = SubTestReport._from_test_report(report) + sub_report.context = SubTestContext(self.msg, self.kwargs.copy()) + + self._captured_output.update_report(sub_report) + self._captured_logs.update_report(sub_report) + + with self.suspend_capture_ctx(): + self.ihook.pytest_runtest_logreport(report=sub_report) + + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self.request.node, call=call_info, report=sub_report + ) + + if exc_val is not None: + if self.request.session.shouldfail: + return False + return True + + +def make_call_info( + exc_info: ExceptionInfo[BaseException] | None, + *, + start: float, + stop: float, + duration: float, + when: Literal["collect", "setup", "call", "teardown"], +) -> CallInfo: + return CallInfo( + None, + exc_info, + start=start, + stop=stop, + duration=duration, + when=when, + _ispytest=True, + ) + + +@contextmanager +def capturing_output(request: SubRequest) -> Iterator[Captured]: + option = request.config.getoption("capture", None) + + # capsys or capfd are active, subtest should not capture. + capman = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture_active = getattr(capman, "_capture_fixture", None) + + if option == "sys" and not capture_fixture_active: + with ignore_pytest_private_warning(): + fixture = CaptureFixture(SysCapture, request) + elif option == "fd" and not capture_fixture_active: + with ignore_pytest_private_warning(): + fixture = CaptureFixture(FDCapture, request) + else: + fixture = None + + if fixture is not None: + fixture._start() + + captured = Captured() + try: + yield captured + finally: + if fixture is not None: + out, err = fixture.readouterr() + fixture.close() + captured.out = out + captured.err = err + + +@contextmanager +def capturing_logs( + request: SubRequest, +) -> Iterator[CapturedLogs | NullCapturedLogs]: + logging_plugin = request.config.pluginmanager.getplugin("logging-plugin") + if logging_plugin is None: + yield NullCapturedLogs() + else: + handler = LogCaptureHandler() + handler.setFormatter(logging_plugin.formatter) + + captured_logs = CapturedLogs(handler) + with catching_logs(handler): + yield captured_logs + + +@contextmanager +def ignore_pytest_private_warning() -> Generator[None, None, None]: + import warnings + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "A private pytest class or function was used.", + category=pytest.PytestDeprecationWarning, + ) + yield + + +@attr.s +class Captured: + out = attr.ib(default="", type=str) + err = attr.ib(default="", type=str) + + def update_report(self, report: pytest.TestReport) -> None: + if self.out: + report.sections.append(("Captured stdout call", self.out)) + if self.err: + report.sections.append(("Captured stderr call", self.err)) + + +class CapturedLogs: + def __init__(self, handler: LogCaptureHandler) -> None: + self._handler = handler + + def update_report(self, report: pytest.TestReport) -> None: + report.sections.append(("Captured log call", self._handler.stream.getvalue())) + + +class NullCapturedLogs: + def update_report(self, report: pytest.TestReport) -> None: + pass + + +def pytest_report_to_serializable(report: pytest.TestReport) -> dict[str, Any] | None: + if isinstance(report, SubTestReport): + return report._to_json() + return None + + +def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | None: + if data.get("_report_type") == "SubTestReport": + return SubTestReport._from_json(data) + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_report_teststatus( + report: pytest.TestReport, + config: pytest.Config, +) -> tuple[str, str, str | Mapping[str, bool]] | None: + if report.when != "call" or not isinstance(report, SubTestReport): + return None + + outcome = report.outcome + description = report.sub_test_description() + no_output = ("", "", "") + + if hasattr(report, "wasxfail"): + if config.option.no_subtests_reports and outcome != "skipped": + return no_output + elif outcome == "skipped": + category = "xfailed" + short = "y" # x letter is used for regular xfail, y for subtest xfail + status = "SUBXFAIL" + elif outcome == "passed": + category = "xpassed" + short = "Y" # X letter is used for regular xpass, Y for subtest xpass + status = "SUBXPASS" + else: + # This should not normally happen, unless some plugin is setting wasxfail without + # the correct outcome. Pytest expects the call outcome to be either skipped or passed in case of xfail. + # Let's pass this report to the next hook. + return None + short = "" if config.option.no_subtests_shortletter else short + return f"subtests {category}", short, f"{description} {status}" + + if config.option.no_subtests_reports and outcome != "failed": + return no_output + elif report.passed: + short = "" if config.option.no_subtests_shortletter else "," + return f"subtests {outcome}", short, f"{description} SUBPASS" + elif report.skipped: + short = "" if config.option.no_subtests_shortletter else "-" + return outcome, short, f"{description} SUBSKIP" + elif outcome == "failed": + short = "" if config.option.no_subtests_shortletter else "u" + return outcome, short, f"{description} SUBFAIL" + + return None diff --git a/src/pytest/__init__.py b/src/pytest/__init__.py index 31d56deede4..610693f1682 100644 --- a/src/pytest/__init__.py +++ b/src/pytest/__init__.py @@ -71,6 +71,7 @@ from _pytest.runner import CallInfo from _pytest.stash import Stash from _pytest.stash import StashKey +from _pytest.subtests import SubTests from _pytest.terminal import TerminalReporter from _pytest.terminal import TestShortLogReport from _pytest.tmpdir import TempPathFactory @@ -148,6 +149,7 @@ "Session", "Stash", "StashKey", + "SubTests", "TempPathFactory", "TempdirFactory", "TerminalReporter", diff --git a/testing/test_subtests.py b/testing/test_subtests.py new file mode 100644 index 00000000000..4bc48451a27 --- /dev/null +++ b/testing/test_subtests.py @@ -0,0 +1,839 @@ +from __future__ import annotations + +from pathlib import Path +import sys +from typing import Literal + +import pytest + + +IS_PY311 = sys.version_info[:2] >= (3, 11) + + +@pytest.mark.parametrize("mode", ["normal", "xdist"]) +class TestFixture: + """ + Tests for ``subtests`` fixture. + """ + + @pytest.fixture + def simple_script(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + assert i % 2 == 0 + """ + ) + + def test_simple_terminal_normal( + self, + simple_script: None, + pytester: pytest.Pytester, + mode: Literal["normal", "xdist"], + ) -> None: + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + + expected_lines += [ + "* test_foo [[]custom[]] (i=1) *", + "* test_foo [[]custom[]] (i=3) *", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + result.stdout.fnmatch_lines(expected_lines) + + def test_simple_terminal_verbose( + self, + simple_script: None, + pytester: pytest.Pytester, + mode: Literal["normal", "xdist"], + ) -> None: + if mode == "normal": + result = pytester.runpytest("-v") + expected_lines = [ + "*collected 1 item", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASS *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASS *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASS *100%*", + "test_simple_terminal_verbose.py::test_foo PASSED *100%*", + ] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1", "-v") + expected_lines = [ + "1 worker [1 item]", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + ] + + expected_lines += [ + "* test_foo [[]custom[]] (i=1) *", + "* test_foo [[]custom[]] (i=3) *", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + result.stdout.fnmatch_lines(expected_lines) + + def test_skip( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + if i % 2 == 0: + pytest.skip('even number') + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_xfail( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + if i % 2 == 0: + pytest.xfail('even number') + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed, 2 subtests passed, 3 subtests xfailed in *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_typing_exported( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + from pytest import SubTests + + def test_typing_exported(subtests: SubTests) -> None: + assert isinstance(subtests, SubTests) + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_no_subtests_reports( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + pass + """ + ) + # Without `--no-subtests-reports`, subtests are reported normally. + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBPASS*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 1 passed, 5 subtests passed in*", + ] + ) + + # With `--no-subtests-reports`, passing subtests are no longer reported. + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 1 passed in*", + ] + ) + result.stdout.no_fnmatch_line("*SUBPASS*") + + # Rewrite the test file so the tests fail. Even with the flag, failed subtests are still reported. + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + assert False + """ + ) + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBFAIL*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 5 failed, 1 passed in*", + ] + ) + + +class TestSubTest: + """ + Test Test.subTest functionality. + """ + + @pytest.fixture + def simple_script(self, pytester: pytest.Pytester) -> Path: + return pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + self.assertEqual(i % 2, 0) + + if __name__ == '__main__': + main() + """ + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_simple_terminal_normal( + self, + simple_script: Path, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + suffix = ".test_foo" if IS_PY311 else "" + if runner == "unittest": + result = pytester.run(sys.executable, simple_script) + result.stderr.fnmatch_lines( + [ + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", + "AssertionError: 1 != 0", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", + "AssertionError: 1 != 0", + "Ran 1 test in *", + "FAILED (failures=2)", + ] + ) + else: + if runner == "pytest-normal": + result = pytester.runpytest(simple_script) + expected_lines = ["collected 1 item"] + else: + assert runner == "pytest-xdist" + pytest.importorskip("xdist") + result = pytester.runpytest(simple_script, "-n1") + expected_lines = ["1 worker [1 item]"] + result.stdout.fnmatch_lines( + expected_lines + + [ + "* T.test_foo [[]custom[]] (i=1) *", + "E * AssertionError: 1 != 0", + "* T.test_foo [[]custom[]] (i=3) *", + "E * AssertionError: 1 != 0", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_simple_terminal_verbose( + self, + simple_script: Path, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + suffix = ".test_foo" if IS_PY311 else "" + if runner == "unittest": + result = pytester.run(sys.executable, simple_script, "-v") + result.stderr.fnmatch_lines( + [ + f"test_foo (__main__.T{suffix}) ... ", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", + "AssertionError: 1 != 0", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", + "AssertionError: 1 != 0", + "Ran 1 test in *", + "FAILED (failures=2)", + ] + ) + else: + if runner == "pytest-normal": + result = pytester.runpytest(simple_script, "-v") + expected_lines = [ + "*collected 1 item", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", + ] + else: + assert runner == "pytest-xdist" + pytest.importorskip("xdist") + result = pytester.runpytest(simple_script, "-n1", "-v") + expected_lines = [ + "1 worker [1 item]", + "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", + ] + result.stdout.fnmatch_lines( + expected_lines + + [ + "* T.test_foo [[]custom[]] (i=1) *", + "E * AssertionError: 1 != 0", + "* T.test_foo [[]custom[]] (i=3) *", + "E * AssertionError: 1 != 0", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip( + self, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + p = pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + self.skipTest('even number') + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + @pytest.mark.xfail(reason="Not producing the expected results (#5)") + def test_xfail( + self, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + @expectedFailure + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + raise pytest.xfail('even number') + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) + else: + result = pytester.runpytest(p) + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 xfailed, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["pytest-normal"]) + def test_only_original_skip_is_called( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["pytest-normal"], + ) -> None: + """Regression test for #173.""" + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import unittest + from unittest import TestCase, main + + @unittest.skip("skip this test") + class T(unittest.TestCase): + def test_foo(self): + assert 1 == 2 + + if __name__ == '__main__': + main() + """ + ) + result = pytester.runpytest(p, "-v", "-rsf") + result.stdout.fnmatch_lines( + ["SKIPPED [1] test_only_original_skip_is_called.py:6: skip this test"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=4\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=4\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP \(skip subtest i=0\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP \(skip subtest i=3\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", + "test_skip_with_failure.py::T::test_foo PASSED .*", + r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", + r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", + r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 4 < 4", + r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 9 < 4", + r".* 6 failed, 1 passed, 4 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure_and_non_subskip( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + self.skipTest(f"skip the test") + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=5\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=5\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + # The `(i=0)` is not correct but it's given by pytest `TerminalReporter` without `--no-fold-skipped` + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip subtest i=3", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip the test", + r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + # Check with `--no-fold-skipped` (which gives the correct information). + if sys.version_info >= (3, 10) and pytest.version_tuple[:2] >= (8, 3): + result = pytester.runpytest(p, "-v", "--no-fold-skipped", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", + r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip subtest i=3", + r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", + r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + +class TestCapture: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import sys + def test(subtests): + print() + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + assert 0 + + with subtests.test(i='B'): + print("hello stdout B") + print("hello stderr B", file=sys.stderr) + assert 0 + + print('end test') + assert 0 + """ + ) + + def test_capturing(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*__ test (i='A') __*", + "*Captured stdout call*", + "hello stdout A", + "*Captured stderr call*", + "hello stderr A", + "*__ test (i='B') __*", + "*Captured stdout call*", + "hello stdout B", + "*Captured stderr call*", + "hello stderr B", + "*__ test __*", + "*Captured stdout call*", + "start test", + "end test", + ] + ) + + def test_no_capture(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("-s") + result.stdout.fnmatch_lines( + [ + "start test", + "hello stdout A", + "uhello stdout B", + "uend test", + "*__ test (i='A') __*", + "*__ test (i='B') __*", + "*__ test __*", + ] + ) + result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) + + @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) + def test_capture_with_fixture( + self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"] + ) -> None: + pytester.makepyfile( + rf""" + import sys + + def test(subtests, {fixture}): + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + + out, err = {fixture}.readouterr() + assert out == 'start test\nhello stdout A\n' + assert err == 'hello stderr A\n' + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + +class TestLogging: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test_foo(subtests): + logging.info("before") + + with subtests.test("sub1"): + print("sub1 stdout") + logging.info("sub1 logging") + + with subtests.test("sub2"): + print("sub2 stdout") + logging.info("sub2 logging") + assert False + """ + ) + + def test_capturing(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("--log-level=INFO") + result.stdout.fnmatch_lines( + [ + "*___ test_foo [[]sub2[]] __*", + "*-- Captured stdout call --*", + "sub2 stdout", + "*-- Captured log call ---*", + "INFO root:test_capturing.py:12 sub2 logging", + "*== short test summary info ==*", + ] + ) + + def test_caplog(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests, caplog): + caplog.set_level(logging.INFO) + logging.info("start test") + + with subtests.test("sub1"): + logging.info("inside %s", "subtest1") + + assert len(caplog.records) == 2 + assert caplog.records[0].getMessage() == "start test" + assert caplog.records[1].getMessage() == "inside subtest1" + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + def test_no_logging(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests): + logging.info("start log line") + + with subtests.test("sub passing"): + logging.info("inside %s", "passing log line") + + with subtests.test("sub failing"): + logging.info("inside %s", "failing log line") + assert False + + logging.info("end log line") + """ + ) + result = pytester.runpytest("-p no:logging") + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") + + +class TestDebugging: + """Check --pdb support for subtests fixture and TestCase.subTest.""" + + class _FakePdb: + """ + Fake debugger class implementation that tracks which methods were called on it. + """ + + quitting: bool = False + calls: list[str] = [] + + def __init__(self, *_: object, **__: object) -> None: + self.calls.append("init") + + def reset(self) -> None: + self.calls.append("reset") + + def interaction(self, *_: object) -> None: + self.calls.append("interaction") + + @pytest.fixture(autouse=True) + def cleanup_calls(self) -> None: + self._FakePdb.calls.clear() + + def test_pdb_fixture( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + def test(subtests): + with subtests.test(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def test_pdb_unittest( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + from unittest import TestCase + class Test(TestCase): + def test(self): + with self.subTest(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def runpytest_and_check_pdb( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + # Install the fake pdb implementation in _pytest.subtests so we can reference + # it in the command line (any module would do). + import _pytest.subtests + + monkeypatch.setattr( + _pytest.subtests, "_CustomPdb", self._FakePdb, raising=False + ) + result = pytester.runpytest("--pdb", "--pdbcls=_pytest.subtests:_CustomPdb") + + # Ensure pytest entered in debugging mode when encountering the failing + # assert. + result.stdout.fnmatch_lines("*entering PDB*") + assert self._FakePdb.calls == ["init", "reset", "interaction"] + + +def test_exitfirst(pytester: pytest.Pytester) -> None: + """ + Validate that when passing --exitfirst the test exits after the first failed subtest. + """ + pytester.makepyfile( + """ + def test_foo(subtests): + with subtests.test("sub1"): + assert False + + with subtests.test("sub2"): + assert False + """ + ) + result = pytester.runpytest("--exitfirst") + assert result.parseoutcomes()["failed"] == 2 + result.stdout.fnmatch_lines( + [ + "*[[]sub1[]] SUBFAIL test_exitfirst.py::test_foo - assert False*", + "FAILED test_exitfirst.py::test_foo - assert False", + "* stopping after 2 failures*", + ], + consecutive=True, + ) + result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. From 6c71ef1fc8f563169e197fdfeed398cfcb75702b Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 20 Sep 2025 08:48:09 -0300 Subject: [PATCH 02/18] subtests: remove direct pytest import --- src/_pytest/subtests.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index d4be44f2d4e..3a6502d6718 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -21,14 +21,19 @@ from _pytest.capture import CaptureFixture from _pytest.capture import FDCapture from _pytest.capture import SysCapture +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import Parser +from _pytest.fixtures import fixture from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs from _pytest.logging import LogCaptureHandler +from _pytest.nodes import Item from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception from _pytest.unittest import TestCaseFunction -import pytest +from _pytest.warning_types import PytestDeprecationWarning if TYPE_CHECKING: @@ -36,7 +41,7 @@ from typing import Literal -def pytest_addoption(parser: pytest.Parser) -> None: +def pytest_addoption(parser: Parser) -> None: group = parser.getgroup("subtests") group.addoption( "--no-subtests-shortletter", @@ -174,7 +179,7 @@ def _addSubTest( self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] -def pytest_configure(config: pytest.Config) -> None: +def pytest_configure(config: Config) -> None: TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] TestCaseFunction.failfast = False # type: ignore[attr-defined] # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a @@ -217,7 +222,7 @@ def pytest_unconfigure() -> None: del TestCaseFunction._originaladdSkip -@pytest.fixture +@fixture def subtests(request: SubRequest) -> Generator[SubTests, None, None]: """Provides subtests functionality.""" capmam = request.node.config.pluginmanager.get_plugin("capturemanager") @@ -235,7 +240,7 @@ class SubTests: request: SubRequest = attr.ib() @property - def item(self) -> pytest.Item: + def item(self) -> Item: return self.request.node def test( @@ -414,7 +419,7 @@ def ignore_pytest_private_warning() -> Generator[None, None, None]: warnings.filterwarnings( "ignore", "A private pytest class or function was used.", - category=pytest.PytestDeprecationWarning, + category=PytestDeprecationWarning, ) yield @@ -424,7 +429,7 @@ class Captured: out = attr.ib(default="", type=str) err = attr.ib(default="", type=str) - def update_report(self, report: pytest.TestReport) -> None: + def update_report(self, report: TestReport) -> None: if self.out: report.sections.append(("Captured stdout call", self.out)) if self.err: @@ -435,16 +440,16 @@ class CapturedLogs: def __init__(self, handler: LogCaptureHandler) -> None: self._handler = handler - def update_report(self, report: pytest.TestReport) -> None: + def update_report(self, report: TestReport) -> None: report.sections.append(("Captured log call", self._handler.stream.getvalue())) class NullCapturedLogs: - def update_report(self, report: pytest.TestReport) -> None: + def update_report(self, report: TestReport) -> None: pass -def pytest_report_to_serializable(report: pytest.TestReport) -> dict[str, Any] | None: +def pytest_report_to_serializable(report: TestReport) -> dict[str, Any] | None: if isinstance(report, SubTestReport): return report._to_json() return None @@ -456,10 +461,10 @@ def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | Non return None -@pytest.hookimpl(tryfirst=True) +@hookimpl(tryfirst=True) def pytest_report_teststatus( - report: pytest.TestReport, - config: pytest.Config, + report: TestReport, + config: Config, ) -> tuple[str, str, str | Mapping[str, bool]] | None: if report.when != "call" or not isinstance(report, SubTestReport): return None From bbfcafee3d1fb92a06a42764c5d5a3262d956073 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Mon, 22 Sep 2025 20:04:51 -0300 Subject: [PATCH 03/18] Force using xdist plugin and fix linting --- src/_pytest/subtests.py | 19 ++++++------- testing/test_subtests.py | 61 ++++++++++++++++++++-------------------- 2 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 3a6502d6718..8cc9b0448f9 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -4,13 +4,13 @@ from collections.abc import Generator from collections.abc import Iterator from collections.abc import Mapping +from contextlib import AbstractContextManager from contextlib import contextmanager from contextlib import ExitStack from contextlib import nullcontext import sys import time from typing import Any -from typing import ContextManager from typing import TYPE_CHECKING from unittest import TestCase @@ -23,12 +23,11 @@ from _pytest.capture import SysCapture from _pytest.config import Config from _pytest.config import hookimpl -from _pytest.config import Parser +from _pytest.config.argparsing import Parser from _pytest.fixtures import fixture from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs from _pytest.logging import LogCaptureHandler -from _pytest.nodes import Item from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception @@ -85,7 +84,7 @@ def sub_test_description(self) -> str: parts.append(f"({params_desc})") return " ".join(parts) or "()" - def _to_json(self) -> dict: + def _to_json(self) -> dict[str, Any]: data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" @@ -236,11 +235,11 @@ def subtests(request: SubRequest) -> Generator[SubTests, None, None]: @attr.s class SubTests: ihook: pluggy.HookRelay = attr.ib() - suspend_capture_ctx: Callable[[], ContextManager] = attr.ib() + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] = attr.ib() request: SubRequest = attr.ib() @property - def item(self) -> Item: + def item(self) -> Any: return self.request.node def test( @@ -282,7 +281,7 @@ class _SubTestContextManager: ihook: pluggy.HookRelay msg: str | None kwargs: dict[str, Any] - suspend_capture_ctx: Callable[[], ContextManager] + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] request: SubRequest def __enter__(self) -> None: @@ -302,8 +301,8 @@ def __enter__(self) -> None: def __exit__( self, - exc_type: type[Exception] | None, - exc_val: Exception | None, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> bool: __tracebackhide__ = True @@ -352,7 +351,7 @@ def make_call_info( stop: float, duration: float, when: Literal["collect", "setup", "call", "teardown"], -) -> CallInfo: +) -> CallInfo[Any]: return CallInfo( None, exc_info, diff --git a/testing/test_subtests.py b/testing/test_subtests.py index 4bc48451a27..e729ec1ba6d 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -12,9 +12,7 @@ @pytest.mark.parametrize("mode", ["normal", "xdist"]) class TestFixture: - """ - Tests for ``subtests`` fixture. - """ + """Tests for ``subtests`` fixture.""" @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> None: @@ -39,7 +37,7 @@ def test_simple_terminal_normal( else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += [ @@ -69,7 +67,7 @@ def test_simple_terminal_verbose( else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1", "-v") + result = pytester.runpytest("-n1", "-v", "-pxdist.plugin") expected_lines = [ "1 worker [1 item]", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", @@ -106,7 +104,7 @@ def test_foo(subtests): else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] result.stdout.fnmatch_lines(expected_lines) @@ -130,7 +128,7 @@ def test_foo(subtests): else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 2 subtests passed, 3 subtests xfailed in *"] result.stdout.fnmatch_lines(expected_lines) @@ -152,7 +150,7 @@ def test_typing_exported(subtests: SubTests) -> None: else: assert mode == "xdist" pytest.importorskip("xdist") - result = pytester.runpytest("-n1") + result = pytester.runpytest("-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed *"] result.stdout.fnmatch_lines(expected_lines) @@ -215,9 +213,7 @@ def test_foo(subtests): class TestSubTest: - """ - Test Test.subTest functionality. - """ + """Test.subTest functionality.""" @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> Path: @@ -264,11 +260,11 @@ def test_simple_terminal_normal( else: assert runner == "pytest-xdist" pytest.importorskip("xdist") - result = pytester.runpytest(simple_script, "-n1") + result = pytester.runpytest(simple_script, "-n1", "-pxdist.plugin") expected_lines = ["1 worker [1 item]"] result.stdout.fnmatch_lines( - expected_lines - + [ + [ + *expected_lines, "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", @@ -310,7 +306,9 @@ def test_simple_terminal_verbose( else: assert runner == "pytest-xdist" pytest.importorskip("xdist") - result = pytester.runpytest(simple_script, "-n1", "-v") + result = pytester.runpytest( + simple_script, "-n1", "-v", "-pxdist.plugin" + ) expected_lines = [ "1 worker [1 item]", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", @@ -318,8 +316,8 @@ def test_simple_terminal_verbose( "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", ] result.stdout.fnmatch_lines( - expected_lines - + [ + [ + *expected_lines, "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", @@ -470,15 +468,19 @@ def test_foo(self): result = pytester.runpytest(p, "-v", "-rsf") result.stdout.re_match_lines( [ - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP \(skip subtest i=0\) .*", - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP \(skip subtest i=3\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP" + r" \(skip subtest i=0\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP" + r" \(skip subtest i=3\) .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", "test_skip_with_failure.py::T::test_foo PASSED .*", r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", - r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 4 < 4", - r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 9 < 4", + r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo" + r" - AssertionError: assert 4 < 4", + r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo" + r" - AssertionError: assert 9 < 4", r".* 6 failed, 1 passed, 4 skipped in .*", ] ) @@ -542,8 +544,10 @@ def test_foo(self): [ r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", - r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip subtest i=3", - r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip the test", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r" skip subtest i=3", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r" skip the test", r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", ] @@ -555,7 +559,8 @@ def test_foo(self): [ r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", - r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip subtest i=3", + r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo" + r" - Skipped: skip subtest i=3", r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", @@ -748,9 +753,7 @@ class TestDebugging: """Check --pdb support for subtests fixture and TestCase.subTest.""" class _FakePdb: - """ - Fake debugger class implementation that tracks which methods were called on it. - """ + """Fake debugger class implementation that tracks which methods were called on it.""" quitting: bool = False calls: list[str] = [] @@ -813,9 +816,7 @@ def runpytest_and_check_pdb( def test_exitfirst(pytester: pytest.Pytester) -> None: - """ - Validate that when passing --exitfirst the test exits after the first failed subtest. - """ + """Validate that when passing --exitfirst the test exits after the first failed subtest.""" pytester.makepyfile( """ def test_foo(subtests): From 1cca16cd18e7a594903b09ca2fc67e19e04a2823 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Mon, 22 Sep 2025 20:07:39 -0300 Subject: [PATCH 04/18] Replace attr by dataclass --- src/_pytest/subtests.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 8cc9b0448f9..5d5816e168f 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -8,13 +8,13 @@ from contextlib import contextmanager from contextlib import ExitStack from contextlib import nullcontext +import dataclasses import sys import time from typing import Any from typing import TYPE_CHECKING from unittest import TestCase -import attr import pluggy from _pytest._code import ExceptionInfo @@ -58,15 +58,15 @@ def pytest_addoption(parser: Parser) -> None: ) -@attr.s +@dataclasses.dataclass class SubTestContext: - msg: str | None = attr.ib() - kwargs: dict[str, Any] = attr.ib() + msg: str | None + kwargs: dict[str, Any] -@attr.s(init=False) +@dataclasses.dataclass(init=False) class SubTestReport(TestReport): # type: ignore[misc] - context: SubTestContext = attr.ib() + context: SubTestContext @property def head_line(self) -> str: @@ -88,7 +88,7 @@ def _to_json(self) -> dict[str, Any]: data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" - data["_subtest.context"] = attr.asdict(self.context) + data["_subtest.context"] = dataclasses.asdict(self.context) return data @classmethod @@ -232,11 +232,11 @@ def subtests(request: SubRequest) -> Generator[SubTests, None, None]: yield SubTests(request.node.ihook, suspend_capture_ctx, request) -@attr.s +@dataclasses.dataclass class SubTests: - ihook: pluggy.HookRelay = attr.ib() - suspend_capture_ctx: Callable[[], AbstractContextManager[None]] = attr.ib() - request: SubRequest = attr.ib() + ihook: pluggy.HookRelay + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] + request: SubRequest @property def item(self) -> Any: @@ -267,7 +267,7 @@ def test( ) -@attr.s(auto_attribs=True) +@dataclasses.dataclass class _SubTestContextManager: """ Context manager for subtests, capturing exceptions raised inside the subtest scope and handling @@ -423,10 +423,10 @@ def ignore_pytest_private_warning() -> Generator[None, None, None]: yield -@attr.s +@dataclasses.dataclass() class Captured: - out = attr.ib(default="", type=str) - err = attr.ib(default="", type=str) + out: str = "" + err: str = "" def update_report(self, report: TestReport) -> None: if self.out: From 098c0400ded92dcbfeb4d1627d1158234a963c1e Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 26 Sep 2025 09:48:51 -0300 Subject: [PATCH 05/18] Add docs --- changelog/1367.feature.rst | 21 ++++++++ doc/en/how-to/index.rst | 1 + doc/en/how-to/parametrize.rst | 6 +++ doc/en/how-to/subtests.rst | 88 ++++++++++++++++++++++++++++++++++ doc/en/how-to/unittest.rst | 13 ++--- doc/en/reference/fixtures.rst | 3 ++ doc/en/reference/reference.rst | 13 +++++ src/_pytest/deprecated.py | 1 + src/_pytest/subtests.py | 44 ++++++++++++----- testing/test_subtests.py | 8 ++-- 10 files changed, 173 insertions(+), 25 deletions(-) create mode 100644 changelog/1367.feature.rst create mode 100644 doc/en/how-to/subtests.rst diff --git a/changelog/1367.feature.rst b/changelog/1367.feature.rst new file mode 100644 index 00000000000..83aa65254c8 --- /dev/null +++ b/changelog/1367.feature.rst @@ -0,0 +1,21 @@ +**Support for subtests** has been added. + +:ref:`subtests ` are an alternative to parametrization, useful in situations where test setup is expensive or the parametrization values are not all known at collection time. + +**Example** + +.. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + + +Each assert failure or error is caught by the context manager and reported individually. + +In addition, :meth:`unittest.TestCase.subTest` is now also supported. + +.. note:: + + This feature is experimental and will likely evolve in future releases. By that we mean that we might change how subtests are reported on failure, but the functionality and how to use it are stable. diff --git a/doc/en/how-to/index.rst b/doc/en/how-to/index.rst index 225f289651e..9796f1f8090 100644 --- a/doc/en/how-to/index.rst +++ b/doc/en/how-to/index.rst @@ -16,6 +16,7 @@ Core pytest functionality fixtures mark parametrize + subtests tmp_path monkeypatch doctest diff --git a/doc/en/how-to/parametrize.rst b/doc/en/how-to/parametrize.rst index fe186146434..5c39358d32a 100644 --- a/doc/en/how-to/parametrize.rst +++ b/doc/en/how-to/parametrize.rst @@ -20,6 +20,11 @@ pytest enables test parametrization at several levels: * `pytest_generate_tests`_ allows one to define custom parametrization schemes or extensions. + +.. note:: + + See :ref:`subtests` for an alternative to parametrization. + .. _parametrizemark: .. _`@pytest.mark.parametrize`: @@ -194,6 +199,7 @@ To get all combinations of multiple parametrized arguments you can stack This will run the test with the arguments set to ``x=0/y=2``, ``x=1/y=2``, ``x=0/y=3``, and ``x=1/y=3`` exhausting parameters in the order of the decorators. + .. _`pytest_generate_tests`: Basic ``pytest_generate_tests`` example diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst new file mode 100644 index 00000000000..ad3af0ea531 --- /dev/null +++ b/doc/en/how-to/subtests.rst @@ -0,0 +1,88 @@ +.. _subtests: + +How to use subtests +=================== + +.. versionadded:: 9.0 + +.. note:: + + This feature is experimental. Its behavior, particularly how failures are reported, may evolve in future releases. However, the core functionality and usage are considered stable. + +pytest allows for grouping assertions within a normal test, known as *subtests*. + +Subtests are an alternative to parametrization, particularly useful when test setup is expensive or when the exact parametrization values are not known at collection time. + + +.. code-block:: python + + # content of test_subtest.py + + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + +Each assertion failure or error is caught by the context manager and reported individually: + +.. code-block:: pytest + + $ pytest -q test_subtest.py + + +Note that it is possible to use ``subtests`` multiple times in the same test, or even mix and match with normal assertions +outside the ``subtests.test`` block: + +.. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test(msg="stage 1", i=i): + assert i % 2 == 0 + + assert func() == 10 + + for i in range(10, 20): + with subtests.test(msg="stage 2", i=i): + assert i % 2 == 0 + +.. note:: + + See :ref:`parametrize` for an alternative to subtests. + + +Typing +------ + +:class:`pytest.SubTests` is exported so it can be used in type annotations: + +.. code-block:: python + + def test(subtests: pytest.SubTests) -> None: ... + +.. _parametrize_vs_subtests: + +Parametrization vs Subtests +--------------------------- + +While :ref:`traditional pytest parametrization ` and ``subtests`` are similar, they have important differences and use cases. + + +Parametrization +~~~~~~~~~~~~~~~ + +* Happens at collection time. +* Generates individual tests. +* Parametrized tests can be referenced from the command line. +* Plays well with plugins that handle test execution, such as ``--last-failed``. +* Ideal for decision table testing. + +Subtests +~~~~~~~~ + +* Happen during test execution. +* Are not known at collection time. +* Can be generated dynamically. +* Cannot be referenced individually from the command line. +* Plugins that handle test execution cannot target individual subtests. diff --git a/doc/en/how-to/unittest.rst b/doc/en/how-to/unittest.rst index ba98b366d04..a8c56c266bd 100644 --- a/doc/en/how-to/unittest.rst +++ b/doc/en/how-to/unittest.rst @@ -22,17 +22,14 @@ their ``test`` methods in ``test_*.py`` or ``*_test.py`` files. Almost all ``unittest`` features are supported: -* ``@unittest.skip`` style decorators; -* ``setUp/tearDown``; -* ``setUpClass/tearDownClass``; -* ``setUpModule/tearDownModule``; +* :func:`unittest.skip`/:func:`unittest.skipIf` style decorators +* :meth:`unittest.TestCase.setUp`/:meth:`unittest.TestCase.tearDown` +* :meth:`unittest.TestCase.setUpClass`/:meth:`unittest.TestCase.tearDownClass` +* :func:`unittest.setUpModule`/:func:`unittest.tearDownModule` +* :meth:`unittest.TestCase.subTest` (since version ``9.0``) -.. _`pytest-subtests`: https://github.com/pytest-dev/pytest-subtests .. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol -Additionally, :ref:`subtests ` are supported by the -`pytest-subtests`_ plugin. - Up to this point pytest does not have support for the following features: * `load_tests protocol`_; diff --git a/doc/en/reference/fixtures.rst b/doc/en/reference/fixtures.rst index 566304d3330..02e235ceb9e 100644 --- a/doc/en/reference/fixtures.rst +++ b/doc/en/reference/fixtures.rst @@ -52,6 +52,9 @@ Built-in fixtures :fixture:`pytestconfig` Access to configuration values, pluginmanager and plugin hooks. + :fixture:`subtests` + Enable declaring subtests inside test functions. + :fixture:`record_property` Add extra properties to the test. diff --git a/doc/en/reference/reference.rst b/doc/en/reference/reference.rst index 3dfa11901ea..e62e6377820 100644 --- a/doc/en/reference/reference.rst +++ b/doc/en/reference/reference.rst @@ -572,6 +572,19 @@ The ``request`` fixture is a special fixture providing information of the reques :members: +.. fixture:: subtests + +subtests +~~~~~~~~ + +The ``subtests`` fixture enables declaring subtests inside test functions. + +**Tutorial**: :ref:`subtests` + +.. autoclass:: pytest.SubTests() + :members: + + .. fixture:: testdir testdir diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 60540552401..cb5d2e93e93 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -25,6 +25,7 @@ "pytest_catchlog", "pytest_capturelog", "pytest_faulthandler", + "pytest_subtests", } diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 5d5816e168f..6700df297c6 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -232,15 +232,24 @@ def subtests(request: SubRequest) -> Generator[SubTests, None, None]: yield SubTests(request.node.ihook, suspend_capture_ctx, request) -@dataclasses.dataclass +# Note: cannot use a dataclass here because Sphinx insists on showing up the __init__ method in the documentation, +# even if we explicitly use :exclude-members: __init__. class SubTests: - ihook: pluggy.HookRelay - suspend_capture_ctx: Callable[[], AbstractContextManager[None]] - request: SubRequest + """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" + + def __init__( + self, + ihook: pluggy.HookRelay, + suspend_capture_ctx: Callable[[], AbstractContextManager[None]], + request: SubRequest, + ) -> None: + self._ihook = ihook + self._suspend_capture_ctx = suspend_capture_ctx + self._request = request @property def item(self) -> Any: - return self.request.node + return self._request.node def test( self, @@ -248,22 +257,31 @@ def test( **kwargs: Any, ) -> _SubTestContextManager: """ - Context manager for subtests, capturing exceptions raised inside the subtest scope and handling - them through the pytest machinery. + Context manager for subtests, capturing exceptions raised inside the subtest scope and + reporting assertion failures and errors individually. - Usage: + Usage + ----- .. code-block:: python - with subtests.test(msg="subtest"): - assert 1 == 1 + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + + :param msg: + If given, the message will be shown in the test report in case of subtest failure. + + :param kwargs: + Arbitrary values that are also added to the subtest report. """ return _SubTestContextManager( - self.ihook, + self._ihook, msg, kwargs, - request=self.request, - suspend_capture_ctx=self.suspend_capture_ctx, + request=self._request, + suspend_capture_ctx=self._suspend_capture_ctx, ) diff --git a/testing/test_subtests.py b/testing/test_subtests.py index e729ec1ba6d..fa32c076813 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -352,14 +352,14 @@ def test_foo(self): result = pytester.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) else: - pytest.xfail("Not producing the expected results (#5)") + pytest.xfail("Not producing the expected results (#13756)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) - @pytest.mark.xfail(reason="Not producing the expected results (#5)") + @pytest.mark.xfail(reason="Not producing the expected results (#13756)") def test_xfail( self, pytester: pytest.Pytester, @@ -485,7 +485,7 @@ def test_foo(self): ] ) else: - pytest.xfail("Not producing the expected results (#5)") + pytest.xfail("Not producing the expected results (#13756)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] @@ -567,7 +567,7 @@ def test_foo(self): ] ) else: - pytest.xfail("Not producing the expected results (#5)") + pytest.xfail("Not producing the expected results (#13756)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] From 09eaf6ea60d2a2f29c3527a1ca296aa0748af293 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 26 Sep 2025 10:34:30 -0300 Subject: [PATCH 06/18] Cleanup internal hacks --- src/_pytest/reports.py | 1 - src/_pytest/subtests.py | 157 +++------------------------------------ src/_pytest/terminal.py | 5 ++ src/_pytest/unittest.py | 109 +++++++++++++++++++++++++-- testing/test_subtests.py | 2 +- 5 files changed, 120 insertions(+), 154 deletions(-) diff --git a/src/_pytest/reports.py b/src/_pytest/reports.py index fb0607bfb95..8deed3be79e 100644 --- a/src/_pytest/reports.py +++ b/src/_pytest/reports.py @@ -251,7 +251,6 @@ def _report_unserialization_failure( raise RuntimeError(stream.getvalue()) -@final class TestReport(BaseReport): """Basic test report object (also used for setup and teardown calls if they fail). diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 6700df297c6..c5a3bedfa90 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -9,11 +9,9 @@ from contextlib import ExitStack from contextlib import nullcontext import dataclasses -import sys import time from typing import Any from typing import TYPE_CHECKING -from unittest import TestCase import pluggy @@ -31,8 +29,6 @@ from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception -from _pytest.unittest import TestCaseFunction -from _pytest.warning_types import PytestDeprecationWarning if TYPE_CHECKING: @@ -60,12 +56,14 @@ def pytest_addoption(parser: Parser) -> None: @dataclasses.dataclass class SubTestContext: + """The values passed to SubTests.test() that are included in the test report.""" + msg: str | None kwargs: dict[str, Any] @dataclasses.dataclass(init=False) -class SubTestReport(TestReport): # type: ignore[misc] +class SubTestReport(TestReport): context: SubTestContext @property @@ -105,122 +103,6 @@ def _from_test_report(cls, test_report: TestReport) -> SubTestReport: return super()._from_json(test_report._to_json()) -def _addSkip(self: TestCaseFunction, testcase: TestCase, reason: str) -> None: - from unittest.case import _SubTest # type: ignore[attr-defined] - - if isinstance(testcase, _SubTest): - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - if self._excinfo is not None: - exc_info = self._excinfo[-1] - self.addSubTest(testcase.test_case, testcase, exc_info) # type: ignore[attr-defined] - else: - # For python < 3.11: the non-subtest skips have to be added by `_originaladdSkip` only after all subtest - # failures are processed by `_addSubTest`. (`self.instance._outcome` has no attribute `skipped/errors` anymore.) - # For python < 3.11, we also need to check if `self.instance._outcome` is `None` (this happens if the test - # class/method is decorated with `unittest.skip`, see #173). - if sys.version_info < (3, 11) and self.instance._outcome is not None: - subtest_errors = [ - x - for x, y in self.instance._outcome.errors - if isinstance(x, _SubTest) and y is not None - ] - if len(subtest_errors) == 0: - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - else: - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - - -def _addSubTest( - self: TestCaseFunction, - test_case: Any, - test: TestCase, - exc_info: tuple[type[BaseException], BaseException, TracebackType] | None, -) -> None: - msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] - call_info = make_call_info( - ExceptionInfo(exc_info, _ispytest=True) if exc_info else None, - start=0, - stop=0, - duration=0, - when="call", - ) - report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) - sub_report = SubTestReport._from_test_report(report) - sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] - self.ihook.pytest_runtest_logreport(report=sub_report) - if check_interactive_exception(call_info, sub_report): - self.ihook.pytest_exception_interact( - node=self, call=call_info, report=sub_report - ) - - # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. - if sys.version_info < (3, 11): - from unittest.case import _SubTest # type: ignore[attr-defined] - - non_subtest_skip = [ - (x, y) - for x, y in self.instance._outcome.skipped - if not isinstance(x, _SubTest) - ] - subtest_errors = [ - (x, y) - for x, y in self.instance._outcome.errors - if isinstance(x, _SubTest) and y is not None - ] - # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in - # `_addSubTest` and have to be added using `_originaladdSkip` after all subtest failures are processed. - if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: - # Make sure we have processed the last subtest failure - last_subset_error = subtest_errors[-1] - if exc_info is last_subset_error[-1]: - # Add non-subtest skips (as they could not be treated in `_addSkip`) - for testcase, reason in non_subtest_skip: - self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] - - -def pytest_configure(config: Config) -> None: - TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] - TestCaseFunction.failfast = False # type: ignore[attr-defined] - # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a - # parent python process where `addSkip` is already `_addSkip`. A such case is when running tests in - # `test_subtests.py` where `pytester.runpytest` is used. Without this guard condition, `_originaladdSkip` is - # assigned to `_addSkip` which is wrong as well as causing an infinite recursion in some cases. - if not hasattr(TestCaseFunction, "_originaladdSkip"): - TestCaseFunction._originaladdSkip = TestCaseFunction.addSkip # type: ignore[attr-defined] - TestCaseFunction.addSkip = _addSkip # type: ignore[method-assign] - - # Hack (#86): the terminal does not know about the "subtests" - # status, so it will by default turn the output to yellow. - # This forcibly adds the new 'subtests' status. - import _pytest.terminal - - new_types = tuple( - f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") - ) - # We need to check if we are not re-adding because we run our own tests - # with pytester in-process mode, so this will be called multiple times. - if new_types[0] not in _pytest.terminal.KNOWN_TYPES: - _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types # type: ignore[assignment] - - _pytest.terminal._color_for_type.update( - { - f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] - for outcome in ("passed", "failed", "skipped") - if outcome in _pytest.terminal._color_for_type - } - ) - - -def pytest_unconfigure() -> None: - if hasattr(TestCaseFunction, "addSubTest"): - del TestCaseFunction.addSubTest - if hasattr(TestCaseFunction, "failfast"): - del TestCaseFunction.failfast - if hasattr(TestCaseFunction, "_originaladdSkip"): - TestCaseFunction.addSkip = TestCaseFunction._originaladdSkip # type: ignore[method-assign] - del TestCaseFunction._originaladdSkip - - @fixture def subtests(request: SubRequest) -> Generator[SubTests, None, None]: """Provides subtests functionality.""" @@ -247,10 +129,6 @@ def __init__( self._suspend_capture_ctx = suspend_capture_ctx self._request = request - @property - def item(self) -> Any: - return self._request.node - def test( self, msg: str | None = None, @@ -293,7 +171,7 @@ class _SubTestContextManager: Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however it is not possible to control the output fully when exiting from it due to an exception when - in --exitfirst mode, so this was refactored into an explicit context manager class (#134). + in --exitfirst mode, so this was refactored into an explicit context manager class (pytest-dev/pytest-subtests#134). """ ihook: pluggy.HookRelay @@ -390,11 +268,9 @@ def capturing_output(request: SubRequest) -> Iterator[Captured]: capture_fixture_active = getattr(capman, "_capture_fixture", None) if option == "sys" and not capture_fixture_active: - with ignore_pytest_private_warning(): - fixture = CaptureFixture(SysCapture, request) + fixture = CaptureFixture(SysCapture, request, _ispytest=True) elif option == "fd" and not capture_fixture_active: - with ignore_pytest_private_warning(): - fixture = CaptureFixture(FDCapture, request) + fixture = CaptureFixture(FDCapture, request, _ispytest=True) else: fixture = None @@ -428,20 +304,7 @@ def capturing_logs( yield captured_logs -@contextmanager -def ignore_pytest_private_warning() -> Generator[None, None, None]: - import warnings - - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "A private pytest class or function was used.", - category=PytestDeprecationWarning, - ) - yield - - -@dataclasses.dataclass() +@dataclasses.dataclass class Captured: out: str = "" err: str = "" @@ -453,12 +316,12 @@ def update_report(self, report: TestReport) -> None: report.sections.append(("Captured stderr call", self.err)) +@dataclasses.dataclass class CapturedLogs: - def __init__(self, handler: LogCaptureHandler) -> None: - self._handler = handler + handler: LogCaptureHandler def update_report(self, report: TestReport) -> None: - report.sections.append(("Captured log call", self._handler.stream.getvalue())) + report.sections.append(("Captured log call", self.handler.stream.getvalue())) class NullCapturedLogs: diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index ed62c9e345e..929b0d51aaa 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -69,6 +69,9 @@ "xpassed", "warnings", "error", + "subtests passed", + "subtests failed", + "subtests skipped", ) _REPORTCHARS_DEFAULT = "fE" @@ -1579,6 +1582,8 @@ def _folded_skips( "error": "red", "warnings": "yellow", "passed": "green", + "subtests passed": "green", + "subtests failed": "red", } _color_for_type_default = "yellow" diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index 282f7b25680..1a45fbe9fd5 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -13,9 +13,13 @@ import sys import traceback import types +from typing import Any from typing import TYPE_CHECKING +from unittest import TestCase import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest.compat import assert_never from _pytest.compat import is_async_function from _pytest.config import hookimpl from _pytest.fixtures import FixtureRequest @@ -30,12 +34,17 @@ from _pytest.python import Function from _pytest.python import Module from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.subtests import make_call_info +from _pytest.subtests import SubTestContext +from _pytest.subtests import SubTestReport if sys.version_info[:2] < (3, 11): from exceptiongroup import ExceptionGroup if TYPE_CHECKING: + from types import TracebackType import unittest import twisted.trial.unittest @@ -200,6 +209,7 @@ def unittest_setup_method_fixture( class TestCaseFunction(Function): nofuncargs = True + failfast = False _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None def _getinstance(self): @@ -277,11 +287,42 @@ def addFailure( ) -> None: self._addexcinfo(rawexcinfo) - def addSkip(self, testcase: unittest.TestCase, reason: str) -> None: - try: - raise skip.Exception(reason, _use_item_location=True) - except skip.Exception: - self._addexcinfo(sys.exc_info()) + def addSkip( + self, testcase: unittest.TestCase, reason: str, *, handle_subtests: bool = True + ) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + def add_skip() -> None: + try: + raise skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + if not handle_subtests: + add_skip() + return + + if isinstance(testcase, _SubTest): + add_skip() + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) + else: + # For python < 3.11: the non-subtest skips have to be added by `add_skip` only after all subtest + # failures are processed by `_addSubTest`: `self.instance._outcome` has no attribute + # `skipped/errors` anymore. + # We also need to check if `self.instance._outcome` is `None` (this happens if the test + # class/method is decorated with `unittest.skip`, see pytest-dev/pytest-subtests#173). + if sys.version_info < (3, 11) and self.instance._outcome is not None: + subtest_errors = [ + x + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + if len(subtest_errors) == 0: + add_skip() + else: + add_skip() def addExpectedFailure( self, @@ -361,6 +402,64 @@ def _traceback_filter( ntraceback = traceback return ntraceback + def addSubTest( + self, + test_case: Any, + test: TestCase, + exc_info: ExceptionInfo[BaseException] + | tuple[type[BaseException], BaseException, TracebackType] + | None, + ) -> None: + exception_info: ExceptionInfo[BaseException] | None + match exc_info: + case tuple(): + exception_info = ExceptionInfo(exc_info, _ispytest=True) + case ExceptionInfo() | None: + exception_info = exc_info + case unreachable: + assert_never(unreachable) + + call_info = make_call_info( + exception_info, + start=0, + stop=0, + duration=0, + when="call", + ) + msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubTestReport._from_test_report(report) + sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] + self.ihook.pytest_runtest_logreport(report=sub_report) + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self, call=call_info, report=sub_report + ) + + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + from unittest.case import _SubTest # type: ignore[attr-defined] + + non_subtest_skip = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `add_skip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self.addSkip(testcase, reason, handle_subtests=False) + @hookimpl(tryfirst=True) def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: diff --git a/testing/test_subtests.py b/testing/test_subtests.py index fa32c076813..b4e591a20ee 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -398,7 +398,7 @@ def test_only_original_skip_is_called( monkeypatch: pytest.MonkeyPatch, runner: Literal["pytest-normal"], ) -> None: - """Regression test for #173.""" + """Regression test for pytest-dev/pytest-subtests#173.""" monkeypatch.setenv("COLUMNS", "200") p = pytester.makepyfile( """ From c67b985c45c338699ea1eb57ae72bee818252fad Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 11 Oct 2025 18:22:58 -0300 Subject: [PATCH 07/18] Code review --- changelog/1367.feature.rst | 2 +- doc/en/how-to/subtests.rst | 6 +-- doc/en/reference/reference.rst | 2 +- src/_pytest/subtests.py | 68 +++++++++++++++++++--------------- src/_pytest/unittest.py | 8 ++-- src/pytest/__init__.py | 4 +- testing/test_subtests.py | 35 ++++++++++++++--- 7 files changed, 80 insertions(+), 45 deletions(-) diff --git a/changelog/1367.feature.rst b/changelog/1367.feature.rst index 83aa65254c8..7b19f19430b 100644 --- a/changelog/1367.feature.rst +++ b/changelog/1367.feature.rst @@ -1,6 +1,6 @@ **Support for subtests** has been added. -:ref:`subtests ` are an alternative to parametrization, useful in situations where test setup is expensive or the parametrization values are not all known at collection time. +:ref:`subtests ` are an alternative to parametrization, useful in situations where the parametrization values are not all known at collection time. **Example** diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst index ad3af0ea531..43b7951c6cf 100644 --- a/doc/en/how-to/subtests.rst +++ b/doc/en/how-to/subtests.rst @@ -11,7 +11,7 @@ How to use subtests pytest allows for grouping assertions within a normal test, known as *subtests*. -Subtests are an alternative to parametrization, particularly useful when test setup is expensive or when the exact parametrization values are not known at collection time. +Subtests are an alternative to parametrization, particularly useful when the exact parametrization values are not known at collection time. .. code-block:: python @@ -55,11 +55,11 @@ outside the ``subtests.test`` block: Typing ------ -:class:`pytest.SubTests` is exported so it can be used in type annotations: +:class:`pytest.Subtests` is exported so it can be used in type annotations: .. code-block:: python - def test(subtests: pytest.SubTests) -> None: ... + def test(subtests: pytest.Subtests) -> None: ... .. _parametrize_vs_subtests: diff --git a/doc/en/reference/reference.rst b/doc/en/reference/reference.rst index e62e6377820..ab41fd1554c 100644 --- a/doc/en/reference/reference.rst +++ b/doc/en/reference/reference.rst @@ -581,7 +581,7 @@ The ``subtests`` fixture enables declaring subtests inside test functions. **Tutorial**: :ref:`subtests` -.. autoclass:: pytest.SubTests() +.. autoclass:: pytest.Subtests() :members: diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index c5a3bedfa90..a0ae9c1e379 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -1,7 +1,8 @@ +"""Builtin plugin that adds subtests support.""" + from __future__ import annotations from collections.abc import Callable -from collections.abc import Generator from collections.abc import Iterator from collections.abc import Mapping from contextlib import AbstractContextManager @@ -10,7 +11,9 @@ from contextlib import nullcontext import dataclasses import time +from types import TracebackType from typing import Any +from typing import Literal from typing import TYPE_CHECKING import pluggy @@ -22,6 +25,7 @@ from _pytest.config import Config from _pytest.config import hookimpl from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest from _pytest.fixtures import fixture from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs @@ -32,8 +36,7 @@ if TYPE_CHECKING: - from types import TracebackType - from typing import Literal + from typing_extensions import Self def pytest_addoption(parser: Parser) -> None: @@ -54,24 +57,31 @@ def pytest_addoption(parser: Parser) -> None: ) -@dataclasses.dataclass -class SubTestContext: - """The values passed to SubTests.test() that are included in the test report.""" +@dataclasses.dataclass(frozen=True, slots=True, kw_only=True) +class SubtestContext: + """The values passed to Subtests.test() that are included in the test report.""" msg: str | None - kwargs: dict[str, Any] + kwargs: Mapping[str, Any] + + def _to_json(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + @classmethod + def _from_json(cls, d: dict[str, Any]) -> Self: + return cls(msg=d["msg"], kwargs=d["kwargs"]) @dataclasses.dataclass(init=False) -class SubTestReport(TestReport): - context: SubTestContext +class SubtestReport(TestReport): + context: SubtestContext @property def head_line(self) -> str: _, _, domain = self.location - return f"{domain} {self.sub_test_description()}" + return f"{domain} {self._sub_test_description()}" - def sub_test_description(self) -> str: + def _sub_test_description(self) -> str: parts = [] if isinstance(self.context.msg, str): parts.append(f"[{self.context.msg}]") @@ -86,37 +96,34 @@ def _to_json(self) -> dict[str, Any]: data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" - data["_subtest.context"] = dataclasses.asdict(self.context) + data["_subtest.context"] = self.context._to_json() return data @classmethod - def _from_json(cls, reportdict: dict[str, Any]) -> SubTestReport: + def _from_json(cls, reportdict: dict[str, Any]) -> SubtestReport: report = super()._from_json(reportdict) - context_data = reportdict["_subtest.context"] - report.context = SubTestContext( - msg=context_data["msg"], kwargs=context_data["kwargs"] - ) + report.context = SubtestContext._from_json(reportdict["_subtest.context"]) return report @classmethod - def _from_test_report(cls, test_report: TestReport) -> SubTestReport: + def _from_test_report(cls, test_report: TestReport) -> SubtestReport: return super()._from_json(test_report._to_json()) @fixture -def subtests(request: SubRequest) -> Generator[SubTests, None, None]: +def subtests(request: SubRequest) -> Subtests: """Provides subtests functionality.""" capmam = request.node.config.pluginmanager.get_plugin("capturemanager") if capmam is not None: suspend_capture_ctx = capmam.global_and_fixture_disabled else: suspend_capture_ctx = nullcontext - yield SubTests(request.node.ihook, suspend_capture_ctx, request) + return Subtests(request.node.ihook, suspend_capture_ctx, request, _ispytest=True) # Note: cannot use a dataclass here because Sphinx insists on showing up the __init__ method in the documentation, # even if we explicitly use :exclude-members: __init__. -class SubTests: +class Subtests: """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" def __init__( @@ -124,7 +131,10 @@ def __init__( ihook: pluggy.HookRelay, suspend_capture_ctx: Callable[[], AbstractContextManager[None]], request: SubRequest, + *, + _ispytest: bool = False, ) -> None: + check_ispytest(_ispytest) self._ihook = ihook self._suspend_capture_ctx = suspend_capture_ctx self._request = request @@ -169,7 +179,7 @@ class _SubTestContextManager: Context manager for subtests, capturing exceptions raised inside the subtest scope and handling them through the pytest machinery. - Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however + Note: initially this logic was implemented directly in Subtests.test() as a @contextmanager, however it is not possible to control the output fully when exiting from it due to an exception when in --exitfirst mode, so this was refactored into an explicit context manager class (pytest-dev/pytest-subtests#134). """ @@ -220,8 +230,8 @@ def __exit__( report = self.ihook.pytest_runtest_makereport( item=self.request.node, call=call_info ) - sub_report = SubTestReport._from_test_report(report) - sub_report.context = SubTestContext(self.msg, self.kwargs.copy()) + sub_report = SubtestReport._from_test_report(report) + sub_report.context = SubtestContext(msg=self.msg, kwargs=self.kwargs.copy()) self._captured_output.update_report(sub_report) self._captured_logs.update_report(sub_report) @@ -330,14 +340,14 @@ def update_report(self, report: TestReport) -> None: def pytest_report_to_serializable(report: TestReport) -> dict[str, Any] | None: - if isinstance(report, SubTestReport): + if isinstance(report, SubtestReport): return report._to_json() return None -def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | None: +def pytest_report_from_serializable(data: dict[str, Any]) -> SubtestReport | None: if data.get("_report_type") == "SubTestReport": - return SubTestReport._from_json(data) + return SubtestReport._from_json(data) return None @@ -346,11 +356,11 @@ def pytest_report_teststatus( report: TestReport, config: Config, ) -> tuple[str, str, str | Mapping[str, bool]] | None: - if report.when != "call" or not isinstance(report, SubTestReport): + if report.when != "call" or not isinstance(report, SubtestReport): return None outcome = report.outcome - description = report.sub_test_description() + description = report._sub_test_description() no_output = ("", "", "") if hasattr(report, "wasxfail"): diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index 1a45fbe9fd5..69f64e9fa79 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -36,8 +36,8 @@ from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception from _pytest.subtests import make_call_info -from _pytest.subtests import SubTestContext -from _pytest.subtests import SubTestReport +from _pytest.subtests import SubtestContext +from _pytest.subtests import SubtestReport if sys.version_info[:2] < (3, 11): @@ -428,8 +428,8 @@ def addSubTest( ) msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) - sub_report = SubTestReport._from_test_report(report) - sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] + sub_report = SubtestReport._from_test_report(report) + sub_report.context = SubtestContext(msg=msg, kwargs=dict(test.params)) # type: ignore[attr-defined] self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( diff --git a/src/pytest/__init__.py b/src/pytest/__init__.py index 610693f1682..6c09a2461d4 100644 --- a/src/pytest/__init__.py +++ b/src/pytest/__init__.py @@ -71,7 +71,7 @@ from _pytest.runner import CallInfo from _pytest.stash import Stash from _pytest.stash import StashKey -from _pytest.subtests import SubTests +from _pytest.subtests import Subtests from _pytest.terminal import TerminalReporter from _pytest.terminal import TestShortLogReport from _pytest.tmpdir import TempPathFactory @@ -149,7 +149,7 @@ "Session", "Stash", "StashKey", - "SubTests", + "Subtests", "TempPathFactory", "TempdirFactory", "TerminalReporter", diff --git a/testing/test_subtests.py b/testing/test_subtests.py index b4e591a20ee..8acc3422a95 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -138,10 +138,10 @@ def test_typing_exported( ) -> None: pytester.makepyfile( """ - from pytest import SubTests + from pytest import Subtests - def test_typing_exported(subtests: SubTests) -> None: - assert isinstance(subtests, SubTests) + def test_typing_exported(subtests: Subtests) -> None: + assert isinstance(subtests, Subtests) """ ) if mode == "normal": @@ -212,8 +212,33 @@ def test_foo(subtests): ) -class TestSubTest: - """Test.subTest functionality.""" +def test_subtests_and_parametrization(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.parametrize("x", [0, 1]) + def test_foo(subtests, x): + for i in range(3): + with subtests.test(msg="custom", i=i): + assert i % 2 == 0 + assert x == 0 + """ + ) + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "test_subtests_and_parametrization.py::test_foo[[]0[]] [[]custom[]] (i=1) SUBFAIL*[[] 50%[]]", + "test_subtests_and_parametrization.py::test_foo[[]0[]] PASSED *[[] 50%[]]", + "test_subtests_and_parametrization.py::test_foo[[]1[]] [[]custom[]] (i=1) SUBFAIL *[[]100%[]]", + "test_subtests_and_parametrization.py::test_foo[[]1[]] FAILED *[[]100%[]]", + "* 3 failed, 1 passed in *", + ] + ) + + +class TestUnittestSubTest: + """Test unittest.TestCase.subTest functionality.""" @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> Path: From 3639c713942543fac3dc5724163c0389719264a5 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 18 Oct 2025 09:58:45 -0300 Subject: [PATCH 08/18] Code review --- changelog/1367.feature.rst | 15 ++++++---- doc/en/how-to/subtests.rst | 16 ++++++----- src/_pytest/subtests.py | 59 ++++++++++++++++---------------------- src/_pytest/unittest.py | 11 ++++--- testing/test_subtests.py | 2 +- 5 files changed, 52 insertions(+), 51 deletions(-) diff --git a/changelog/1367.feature.rst b/changelog/1367.feature.rst index 7b19f19430b..72eadbc55ca 100644 --- a/changelog/1367.feature.rst +++ b/changelog/1367.feature.rst @@ -6,13 +6,18 @@ .. code-block:: python - def test(subtests): - for i in range(5): - with subtests.test(msg="custom message", i=i): - assert i % 2 == 0 + def contains_docstring(p: Path) -> bool: + """Return True if the given Python file contains a top-level docstring.""" + ... -Each assert failure or error is caught by the context manager and reported individually. + def test_py_files_contain_docstring(subtests: pytest.Subtests) -> None: + for path in Path.cwd().glob("*.py"): + with subtests.test(path=str(path)): + assert contains_docstring(path) + + +Each assert failure or error is caught by the context manager and reported individually, giving a clear picture of all files that are missing a docstring. In addition, :meth:`unittest.TestCase.subTest` is now also supported. diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst index 43b7951c6cf..e03aae0c75e 100644 --- a/doc/en/how-to/subtests.rst +++ b/doc/en/how-to/subtests.rst @@ -16,13 +16,15 @@ Subtests are an alternative to parametrization, particularly useful when the exa .. code-block:: python - # content of test_subtest.py + def contains_docstring(p: Path) -> bool: + """Return True if the given Python file contains a top-level docstring.""" + ... - def test(subtests): - for i in range(5): - with subtests.test(msg="custom message", i=i): - assert i % 2 == 0 + def test_py_files_contain_docstring(subtests: pytest.Subtests) -> None: + for path in Path.cwd().glob("*.py"): + with subtests.test(path=str(path)): + assert contains_docstring(path) Each assertion failure or error is caught by the context manager and reported individually: @@ -38,13 +40,13 @@ outside the ``subtests.test`` block: def test(subtests): for i in range(5): - with subtests.test(msg="stage 1", i=i): + with subtests.test("stage 1", i=i): assert i % 2 == 0 assert func() == 10 for i in range(10, 20): - with subtests.test(msg="stage 2", i=i): + with subtests.test("stage 2", i=i): assert i % 2 == 0 .. note:: diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index a0ae9c1e379..76c0fbd8bc6 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -13,12 +13,12 @@ import time from types import TracebackType from typing import Any -from typing import Literal from typing import TYPE_CHECKING import pluggy from _pytest._code import ExceptionInfo +from _pytest._io.saferepr import saferepr from _pytest.capture import CaptureFixture from _pytest.capture import FDCapture from _pytest.capture import SysCapture @@ -83,11 +83,11 @@ def head_line(self) -> str: def _sub_test_description(self) -> str: parts = [] - if isinstance(self.context.msg, str): + if self.context.msg is not None: parts.append(f"[{self.context.msg}]") if self.context.kwargs: params_desc = ", ".join( - f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) + f"{k}={saferepr(v)}" for (k, v) in self.context.kwargs.items() ) parts.append(f"({params_desc})") return " ".join(parts) or "()" @@ -106,8 +106,12 @@ def _from_json(cls, reportdict: dict[str, Any]) -> SubtestReport: return report @classmethod - def _from_test_report(cls, test_report: TestReport) -> SubtestReport: - return super()._from_json(test_report._to_json()) + def _from_test_report( + cls, test_report: TestReport, context: SubtestContext + ) -> Self: + result = super()._from_json(test_report._to_json()) + result.context = context + return result @fixture @@ -121,8 +125,6 @@ def subtests(request: SubRequest) -> Subtests: return Subtests(request.node.ihook, suspend_capture_ctx, request, _ispytest=True) -# Note: cannot use a dataclass here because Sphinx insists on showing up the __init__ method in the documentation, -# even if we explicitly use :exclude-members: __init__. class Subtests: """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" @@ -178,12 +180,13 @@ class _SubTestContextManager: """ Context manager for subtests, capturing exceptions raised inside the subtest scope and handling them through the pytest machinery. - - Note: initially this logic was implemented directly in Subtests.test() as a @contextmanager, however - it is not possible to control the output fully when exiting from it due to an exception when - in --exitfirst mode, so this was refactored into an explicit context manager class (pytest-dev/pytest-subtests#134). """ + # Note: initially the logic for this context manager was implemented directly + # in Subtests.test() as a @contextmanager, however, it is not possible to control the output fully when + # exiting from it due to an exception when in `--exitfirst` mode, so this was refactored into an + # explicit context manager class (pytest-dev/pytest-subtests#134). + ihook: pluggy.HookRelay msg: str | None kwargs: dict[str, Any] @@ -224,14 +227,21 @@ def __exit__( duration = precise_stop - self._precise_start stop = time.time() - call_info = make_call_info( - exc_info, start=self._start, stop=stop, duration=duration, when="call" + call_info = CallInfo[None]( + None, + exc_info, + start=self._start, + stop=stop, + duration=duration, + when="call", + _ispytest=True, ) report = self.ihook.pytest_runtest_makereport( item=self.request.node, call=call_info ) - sub_report = SubtestReport._from_test_report(report) - sub_report.context = SubtestContext(msg=self.msg, kwargs=self.kwargs.copy()) + sub_report = SubtestReport._from_test_report( + report, SubtestContext(msg=self.msg, kwargs=self.kwargs.copy()) + ) self._captured_output.update_report(sub_report) self._captured_logs.update_report(sub_report) @@ -250,25 +260,6 @@ def __exit__( return True -def make_call_info( - exc_info: ExceptionInfo[BaseException] | None, - *, - start: float, - stop: float, - duration: float, - when: Literal["collect", "setup", "call", "teardown"], -) -> CallInfo[Any]: - return CallInfo( - None, - exc_info, - start=start, - stop=stop, - duration=duration, - when=when, - _ispytest=True, - ) - - @contextmanager def capturing_output(request: SubRequest) -> Iterator[Captured]: option = request.config.getoption("capture", None) diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index 69f64e9fa79..035f9533cf5 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -35,7 +35,6 @@ from _pytest.python import Module from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception -from _pytest.subtests import make_call_info from _pytest.subtests import SubtestContext from _pytest.subtests import SubtestReport @@ -419,17 +418,21 @@ def addSubTest( case unreachable: assert_never(unreachable) - call_info = make_call_info( + call_info = CallInfo[None]( + None, exception_info, start=0, stop=0, duration=0, when="call", + _ispytest=True, ) msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) - sub_report = SubtestReport._from_test_report(report) - sub_report.context = SubtestContext(msg=msg, kwargs=dict(test.params)) # type: ignore[attr-defined] + sub_report = SubtestReport._from_test_report( + report, + SubtestContext(msg=msg, kwargs=dict(test.params)), # type: ignore[attr-defined] + ) self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( diff --git a/testing/test_subtests.py b/testing/test_subtests.py index 8acc3422a95..9370a540720 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -220,7 +220,7 @@ def test_subtests_and_parametrization(pytester: pytest.Pytester) -> None: @pytest.mark.parametrize("x", [0, 1]) def test_foo(subtests, x): for i in range(3): - with subtests.test(msg="custom", i=i): + with subtests.test("custom", i=i): assert i % 2 == 0 assert x == 0 """ From 8fee2ef421a8bedaa08479c6da2ba5856bf61127 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 18 Oct 2025 10:12:14 -0300 Subject: [PATCH 09/18] Docs --- doc/en/how-to/subtests.rst | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst index e03aae0c75e..f07b1ad9970 100644 --- a/doc/en/how-to/subtests.rst +++ b/doc/en/how-to/subtests.rst @@ -16,15 +16,13 @@ Subtests are an alternative to parametrization, particularly useful when the exa .. code-block:: python - def contains_docstring(p: Path) -> bool: - """Return True if the given Python file contains a top-level docstring.""" - ... + # content of test_subtest.py - def test_py_files_contain_docstring(subtests: pytest.Subtests) -> None: - for path in Path.cwd().glob("*.py"): - with subtests.test(path=str(path)): - assert contains_docstring(path) + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 Each assertion failure or error is caught by the context manager and reported individually: @@ -33,6 +31,12 @@ Each assertion failure or error is caught by the context manager and reported in $ pytest -q test_subtest.py +In the output above: + +* Each subtest is reported with the ``,`` character. +* Subtests are reported first and the "top-level" test is reported at the end on its own. +* Subtest failures are reported as ``SUBFAIL``. + Note that it is possible to use ``subtests`` multiple times in the same test, or even mix and match with normal assertions outside the ``subtests.test`` block: @@ -88,3 +92,4 @@ Subtests * Can be generated dynamically. * Cannot be referenced individually from the command line. * Plugins that handle test execution cannot target individual subtests. +* An assertion failure inside a subtest does not interrupt the test, letting users see all failures in the same report. From d8a22d5d0a07ba888d99deac26d60b8147b97146 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 18 Oct 2025 11:00:57 -0300 Subject: [PATCH 10/18] Make top-level tests fail when there are failing subtests --- src/_pytest/subtests.py | 127 +++++++++++++++++++++++----------- testing/test_subtests.py | 146 +++++++++++++++++++++++++++++---------- 2 files changed, 193 insertions(+), 80 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 76c0fbd8bc6..dbd35485c9d 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -2,6 +2,7 @@ from __future__ import annotations +from collections import defaultdict from collections.abc import Callable from collections.abc import Iterator from collections.abc import Mapping @@ -13,6 +14,7 @@ import time from types import TracebackType from typing import Any +from typing import ClassVar from typing import TYPE_CHECKING import pluggy @@ -172,6 +174,9 @@ def test(subtests): kwargs, request=self._request, suspend_capture_ctx=self._suspend_capture_ctx, + reporter=self._request.config.pluginmanager.getplugin( + SubtestsReporterPlugin.NAME + ), ) @@ -192,6 +197,7 @@ class _SubTestContextManager: kwargs: dict[str, Any] suspend_capture_ctx: Callable[[], AbstractContextManager[None]] request: SubRequest + reporter: SubtestsReporterPlugin def __enter__(self) -> None: __tracebackhide__ = True @@ -243,6 +249,9 @@ def __exit__( report, SubtestContext(msg=self.msg, kwargs=self.kwargs.copy()) ) + if sub_report.failed: + self.reporter.contains_failed_subtests[self.request.node.nodeid] += 1 + self._captured_output.update_report(sub_report) self._captured_logs.update_report(sub_report) @@ -322,7 +331,9 @@ class CapturedLogs: handler: LogCaptureHandler def update_report(self, report: TestReport) -> None: - report.sections.append(("Captured log call", self.handler.stream.getvalue())) + captured_log = self.handler.stream.getvalue() + if captured_log: + report.sections.append(("Captured log call", captured_log)) class NullCapturedLogs: @@ -342,47 +353,79 @@ def pytest_report_from_serializable(data: dict[str, Any]) -> SubtestReport | Non return None -@hookimpl(tryfirst=True) -def pytest_report_teststatus( - report: TestReport, - config: Config, -) -> tuple[str, str, str | Mapping[str, bool]] | None: - if report.when != "call" or not isinstance(report, SubtestReport): - return None +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(SubtestsReporterPlugin(), SubtestsReporterPlugin.NAME) - outcome = report.outcome - description = report._sub_test_description() - no_output = ("", "", "") - - if hasattr(report, "wasxfail"): - if config.option.no_subtests_reports and outcome != "skipped": - return no_output - elif outcome == "skipped": - category = "xfailed" - short = "y" # x letter is used for regular xfail, y for subtest xfail - status = "SUBXFAIL" - elif outcome == "passed": - category = "xpassed" - short = "Y" # X letter is used for regular xpass, Y for subtest xpass - status = "SUBXPASS" - else: - # This should not normally happen, unless some plugin is setting wasxfail without - # the correct outcome. Pytest expects the call outcome to be either skipped or passed in case of xfail. - # Let's pass this report to the next hook. + +@dataclasses.dataclass() +class SubtestsReporterPlugin: + NAME: ClassVar[str] = "subtests-reporter" + + # Tracks node-ids -> number of failed subtests. + contains_failed_subtests: defaultdict[str, int] = dataclasses.field( + default_factory=lambda: defaultdict(lambda: 0) + ) + + def __hash__(self) -> int: + return id(self) + + @hookimpl(tryfirst=True) + def pytest_report_teststatus( + self, + report: TestReport, + config: Config, + ) -> tuple[str, str, str | Mapping[str, bool]] | None: + if report.when != "call": return None - short = "" if config.option.no_subtests_shortletter else short - return f"subtests {category}", short, f"{description} {status}" - - if config.option.no_subtests_reports and outcome != "failed": - return no_output - elif report.passed: - short = "" if config.option.no_subtests_shortletter else "," - return f"subtests {outcome}", short, f"{description} SUBPASS" - elif report.skipped: - short = "" if config.option.no_subtests_shortletter else "-" - return outcome, short, f"{description} SUBSKIP" - elif outcome == "failed": - short = "" if config.option.no_subtests_shortletter else "u" - return outcome, short, f"{description} SUBFAIL" - return None + if isinstance(report, SubtestReport): + outcome = report.outcome + description = report._sub_test_description() + no_output = ("", "", "") + + if hasattr(report, "wasxfail"): + if config.option.no_subtests_reports and outcome != "skipped": + return no_output + elif outcome == "skipped": + category = "xfailed" + short = ( + "y" # x letter is used for regular xfail, y for subtest xfail + ) + status = "SUBXFAIL" + elif outcome == "passed": + category = "xpassed" + short = ( + "Y" # X letter is used for regular xpass, Y for subtest xpass + ) + status = "SUBXPASS" + else: + # This should not normally happen, unless some plugin is setting wasxfail without + # the correct outcome. Pytest expects the call outcome to be either skipped or + # passed in case of xfail. + # Let's pass this report to the next hook. + return None + short = "" if config.option.no_subtests_shortletter else short + return f"subtests {category}", short, f"{description} {status}" + + if config.option.no_subtests_reports and outcome != "failed": + return no_output + elif report.passed: + short = "" if config.option.no_subtests_shortletter else "," + return f"subtests {outcome}", short, f"{description} SUBPASSED" + elif report.skipped: + short = "" if config.option.no_subtests_shortletter else "-" + return outcome, short, f"{description} SUBSKIPPED" + elif outcome == "failed": + short = "" if config.option.no_subtests_shortletter else "u" + return outcome, short, f"{description} SUBFAILED" + else: + # Top-level test, fail it it contains failed subtests and it has passed. + if ( + report.passed + and (count := self.contains_failed_subtests.get(report.nodeid, 0)) > 0 + ): + report.outcome = "failed" + suffix = "s" if count > 1 else "" + report.longrepr = f"Contains {count} failed subtest{suffix}" + + return None diff --git a/testing/test_subtests.py b/testing/test_subtests.py index 9370a540720..2904cfaaa64 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -43,7 +43,8 @@ def test_simple_terminal_normal( expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", - "* 2 failed, 1 passed, 3 subtests passed in *", + "Contains 2 failed subtests", + "* 3 failed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) @@ -57,12 +58,12 @@ def test_simple_terminal_verbose( result = pytester.runpytest("-v") expected_lines = [ "*collected 1 item", - "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASS *100%*", - "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", - "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASS *100%*", - "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", - "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASS *100%*", - "test_simple_terminal_verbose.py::test_foo PASSED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASSED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAILED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASSED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAILED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASSED *100%*", + "test_simple_terminal_verbose.py::test_foo FAILED *100%*", ] else: assert mode == "xdist" @@ -81,7 +82,7 @@ def test_simple_terminal_verbose( expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", - "* 2 failed, 1 passed, 3 subtests passed in *", + "* 3 failed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) @@ -173,7 +174,7 @@ def test_foo(subtests): result.stdout.fnmatch_lines( [ "*collected 1 item*", - "test_no_subtests_reports.py::test_foo * (i=0) SUBPASS*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBPASSED*", "*test_no_subtests_reports.py::test_foo PASSED*", "* 1 passed, 5 subtests passed in*", ] @@ -188,7 +189,7 @@ def test_foo(subtests): "* 1 passed in*", ] ) - result.stdout.no_fnmatch_line("*SUBPASS*") + result.stdout.no_fnmatch_line("*SUBPASSED*") # Rewrite the test file so the tests fail. Even with the flag, failed subtests are still reported. pytester.makepyfile( @@ -205,9 +206,9 @@ def test_foo(subtests): result.stdout.fnmatch_lines( [ "*collected 1 item*", - "test_no_subtests_reports.py::test_foo * (i=0) SUBFAIL*", - "*test_no_subtests_reports.py::test_foo PASSED*", - "* 5 failed, 1 passed in*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBFAILED*", + "*test_no_subtests_reports.py::test_foo FAILED*", + "* 6 failed in*", ] ) @@ -228,11 +229,80 @@ def test_foo(subtests, x): result = pytester.runpytest("-v", "--no-subtests-reports") result.stdout.fnmatch_lines( [ - "test_subtests_and_parametrization.py::test_foo[[]0[]] [[]custom[]] (i=1) SUBFAIL*[[] 50%[]]", - "test_subtests_and_parametrization.py::test_foo[[]0[]] PASSED *[[] 50%[]]", - "test_subtests_and_parametrization.py::test_foo[[]1[]] [[]custom[]] (i=1) SUBFAIL *[[]100%[]]", + "test_subtests_and_parametrization.py::test_foo[[]0[]] [[]custom[]] (i=1) SUBFAILED*[[] 50%[]]", + "test_subtests_and_parametrization.py::test_foo[[]0[]] FAILED *[[] 50%[]]", + "test_subtests_and_parametrization.py::test_foo[[]1[]] [[]custom[]] (i=1) SUBFAILED *[[]100%[]]", "test_subtests_and_parametrization.py::test_foo[[]1[]] FAILED *[[]100%[]]", - "* 3 failed, 1 passed in *", + "Contains 1 failed subtest", + "* 4 failed in *", + ] + ) + + +def test_subtests_fail_top_level_test(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + +def test_subtests_do_not_overwrite_top_level_failure(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + assert False, "top-level failure" + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*AssertionError: top-level failure", + "* 2 failed, 2 subtests passed in *", + ] + ) + + +@pytest.mark.parametrize("flag", ["--last-failed", "--stepwise"]) +def test_subtests_last_failed_step_wise(pytester: pytest.Pytester, flag: str) -> None: + """Check that --last-failed and --step-wise correctly rerun tests with failed subtests.""" + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + result = pytester.runpytest("-v", flag) + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", ] ) @@ -324,8 +394,8 @@ def test_simple_terminal_verbose( result = pytester.runpytest(simple_script, "-v") expected_lines = [ "*collected 1 item", - "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", - "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAILED *100%*", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAILED *100%*", "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", ] else: @@ -336,8 +406,8 @@ def test_simple_terminal_verbose( ) expected_lines = [ "1 worker [1 item]", - "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", - "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* SUBFAILED test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* SUBFAILED test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", ] result.stdout.fnmatch_lines( @@ -493,18 +563,18 @@ def test_foo(self): result = pytester.runpytest(p, "-v", "-rsf") result.stdout.re_match_lines( [ - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP" + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIPPED" r" \(skip subtest i=0\) .*", - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP" + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIPPED" r" \(skip subtest i=3\) .*", - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAILED .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAILED .*", "test_skip_with_failure.py::T::test_foo PASSED .*", - r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", - r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", - r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo" + r"[custom message] (i=0) SUBSKIPPED [1] test_skip_with_failure.py:5: skip subtest i=0", + r"[custom message] (i=0) SUBSKIPPED [1] test_skip_with_failure.py:5: skip subtest i=3", + r"[custom message] (i=4) SUBFAILED test_skip_with_failure.py::T::test_foo" r" - AssertionError: assert 4 < 4", - r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo" + r"[custom message] (i=9) SUBFAILED test_skip_with_failure.py::T::test_foo" r" - AssertionError: assert 9 < 4", r".* 6 failed, 1 passed, 4 skipped in .*", ] @@ -567,13 +637,13 @@ def test_foo(self): # The `(i=0)` is not correct but it's given by pytest `TerminalReporter` without `--no-fold-skipped` result.stdout.re_match_lines( [ - r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAILED .*", r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", - r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r"\[custom message\] \(i=0\) SUBSKIPPED \[1\] test_skip_with_failure_and_non_subskip.py:5:" r" skip subtest i=3", - r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r"\[custom message\] \(i=0\) SUBSKIPPED \[1\] test_skip_with_failure_and_non_subskip.py:5:" r" skip the test", - r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r"\[custom message\] \(i=4\) SUBFAILED test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", ] ) @@ -582,12 +652,12 @@ def test_foo(self): result = pytester.runpytest(p, "-v", "--no-fold-skipped", "-rsf") result.stdout.re_match_lines( [ - r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAILED .*", # noqa: E501 r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", - r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo" + r"\[custom message\] \(i=3\) SUBSKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo" r" - Skipped: skip subtest i=3", r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", - r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r"\[custom message\] \(i=4\) SUBFAILED test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", ] ) @@ -768,7 +838,7 @@ def test(subtests): result = pytester.runpytest("-p no:logging") result.stdout.fnmatch_lines( [ - "*1 passed*", + "*2 failed, 1 subtests passed in*", ] ) result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") @@ -856,7 +926,7 @@ def test_foo(subtests): assert result.parseoutcomes()["failed"] == 2 result.stdout.fnmatch_lines( [ - "*[[]sub1[]] SUBFAIL test_exitfirst.py::test_foo - assert False*", + "*[[]sub1[]] SUBFAILED test_exitfirst.py::test_foo - assert False*", "FAILED test_exitfirst.py::test_foo - assert False", "* stopping after 2 failures*", ], From 61200570a1b877ab2a7c257856068577408a7640 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 17:25:11 -0300 Subject: [PATCH 11/18] Minor code review --- doc/en/how-to/subtests.rst | 2 +- src/_pytest/subtests.py | 12 ++++++------ testing/test_subtests.py | 6 ++---- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst index f07b1ad9970..398f68db7ba 100644 --- a/doc/en/how-to/subtests.rst +++ b/doc/en/how-to/subtests.rst @@ -35,7 +35,7 @@ In the output above: * Each subtest is reported with the ``,`` character. * Subtests are reported first and the "top-level" test is reported at the end on its own. -* Subtest failures are reported as ``SUBFAIL``. +* Subtest failures are reported as ``SUBFAILED``. Note that it is possible to use ``subtests`` multiple times in the same test, or even mix and match with normal assertions outside the ``subtests.test`` block: diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index dbd35485c9d..a242c180fb5 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -159,7 +159,7 @@ def test( def test(subtests): for i in range(5): - with subtests.test(msg="custom message", i=i): + with subtests.test("custom message", i=i): assert i % 2 == 0 :param msg: @@ -273,13 +273,13 @@ def __exit__( def capturing_output(request: SubRequest) -> Iterator[Captured]: option = request.config.getoption("capture", None) - # capsys or capfd are active, subtest should not capture. capman = request.config.pluginmanager.getplugin("capturemanager") - capture_fixture_active = getattr(capman, "_capture_fixture", None) - - if option == "sys" and not capture_fixture_active: + if getattr(capman, "_capture_fixture", None): + # capsys or capfd are active, subtest should not capture. + fixture = None + elif option == "sys": fixture = CaptureFixture(SysCapture, request, _ispytest=True) - elif option == "fd" and not capture_fixture_active: + elif option == "fd": fixture = CaptureFixture(FDCapture, request, _ispytest=True) else: fixture = None diff --git a/testing/test_subtests.py b/testing/test_subtests.py index 2904cfaaa64..dfe6001830e 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -563,10 +563,8 @@ def test_foo(self): result = pytester.runpytest(p, "-v", "-rsf") result.stdout.re_match_lines( [ - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIPPED" - r" \(skip subtest i=0\) .*", - r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIPPED" - r" \(skip subtest i=3\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIPPED \(skip subtest i=0\) .*", # noqa: E501 + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIPPED \(skip subtest i=3\) .*", # noqa: E501 r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAILED .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAILED .*", "test_skip_with_failure.py::T::test_foo PASSED .*", From aa8a2ad001fd28e61333f32be24d3e6a4873ee9c Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 17:34:50 -0300 Subject: [PATCH 12/18] Replace separate plugin by config.stash --- src/_pytest/subtests.py | 135 ++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 75 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index a242c180fb5..4a86d691f14 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -14,7 +14,6 @@ import time from types import TracebackType from typing import Any -from typing import ClassVar from typing import TYPE_CHECKING import pluggy @@ -35,6 +34,7 @@ from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception +from _pytest.stash import StashKey if TYPE_CHECKING: @@ -174,9 +174,7 @@ def test(subtests): kwargs, request=self._request, suspend_capture_ctx=self._suspend_capture_ctx, - reporter=self._request.config.pluginmanager.getplugin( - SubtestsReporterPlugin.NAME - ), + config=self._request.config, ) @@ -197,7 +195,7 @@ class _SubTestContextManager: kwargs: dict[str, Any] suspend_capture_ctx: Callable[[], AbstractContextManager[None]] request: SubRequest - reporter: SubtestsReporterPlugin + config: Config def __enter__(self) -> None: __tracebackhide__ = True @@ -250,7 +248,8 @@ def __exit__( ) if sub_report.failed: - self.reporter.contains_failed_subtests[self.request.node.nodeid] += 1 + failed_subtests = self.config.stash[failed_subtests_key] + failed_subtests[self.request.node.nodeid] += 1 self._captured_output.update_report(sub_report) self._captured_logs.update_report(sub_report) @@ -353,79 +352,65 @@ def pytest_report_from_serializable(data: dict[str, Any]) -> SubtestReport | Non return None -def pytest_configure(config: Config) -> None: - config.pluginmanager.register(SubtestsReporterPlugin(), SubtestsReporterPlugin.NAME) +# Dict of nodeid -> number of failed subtests. +# Used to fail top-level tests that passed but contain failed subtests. +failed_subtests_key = StashKey[defaultdict[str, int]]() -@dataclasses.dataclass() -class SubtestsReporterPlugin: - NAME: ClassVar[str] = "subtests-reporter" +def pytest_configure(config: Config) -> None: + config.stash[failed_subtests_key] = defaultdict(lambda: 0) - # Tracks node-ids -> number of failed subtests. - contains_failed_subtests: defaultdict[str, int] = dataclasses.field( - default_factory=lambda: defaultdict(lambda: 0) - ) - def __hash__(self) -> int: - return id(self) +@hookimpl(tryfirst=True) +def pytest_report_teststatus( + report: TestReport, + config: Config, +) -> tuple[str, str, str | Mapping[str, bool]] | None: + if report.when != "call": + return None - @hookimpl(tryfirst=True) - def pytest_report_teststatus( - self, - report: TestReport, - config: Config, - ) -> tuple[str, str, str | Mapping[str, bool]] | None: - if report.when != "call": - return None - - if isinstance(report, SubtestReport): - outcome = report.outcome - description = report._sub_test_description() - no_output = ("", "", "") - - if hasattr(report, "wasxfail"): - if config.option.no_subtests_reports and outcome != "skipped": - return no_output - elif outcome == "skipped": - category = "xfailed" - short = ( - "y" # x letter is used for regular xfail, y for subtest xfail - ) - status = "SUBXFAIL" - elif outcome == "passed": - category = "xpassed" - short = ( - "Y" # X letter is used for regular xpass, Y for subtest xpass - ) - status = "SUBXPASS" - else: - # This should not normally happen, unless some plugin is setting wasxfail without - # the correct outcome. Pytest expects the call outcome to be either skipped or - # passed in case of xfail. - # Let's pass this report to the next hook. - return None - short = "" if config.option.no_subtests_shortletter else short - return f"subtests {category}", short, f"{description} {status}" - - if config.option.no_subtests_reports and outcome != "failed": + if isinstance(report, SubtestReport): + outcome = report.outcome + description = report._sub_test_description() + no_output = ("", "", "") + + if hasattr(report, "wasxfail"): + if config.option.no_subtests_reports and outcome != "skipped": return no_output - elif report.passed: - short = "" if config.option.no_subtests_shortletter else "," - return f"subtests {outcome}", short, f"{description} SUBPASSED" - elif report.skipped: - short = "" if config.option.no_subtests_shortletter else "-" - return outcome, short, f"{description} SUBSKIPPED" - elif outcome == "failed": - short = "" if config.option.no_subtests_shortletter else "u" - return outcome, short, f"{description} SUBFAILED" - else: - # Top-level test, fail it it contains failed subtests and it has passed. - if ( - report.passed - and (count := self.contains_failed_subtests.get(report.nodeid, 0)) > 0 - ): - report.outcome = "failed" - suffix = "s" if count > 1 else "" - report.longrepr = f"Contains {count} failed subtest{suffix}" + elif outcome == "skipped": + category = "xfailed" + short = "y" # x letter is used for regular xfail, y for subtest xfail + status = "SUBXFAIL" + elif outcome == "passed": + category = "xpassed" + short = "Y" # X letter is used for regular xpass, Y for subtest xpass + status = "SUBXPASS" + else: + # This should not normally happen, unless some plugin is setting wasxfail without + # the correct outcome. Pytest expects the call outcome to be either skipped or + # passed in case of xfail. + # Let's pass this report to the next hook. + return None + short = "" if config.option.no_subtests_shortletter else short + return f"subtests {category}", short, f"{description} {status}" + + if config.option.no_subtests_reports and outcome != "failed": + return no_output + elif report.passed: + short = "" if config.option.no_subtests_shortletter else "," + return f"subtests {outcome}", short, f"{description} SUBPASSED" + elif report.skipped: + short = "" if config.option.no_subtests_shortletter else "-" + return outcome, short, f"{description} SUBSKIPPED" + elif outcome == "failed": + short = "" if config.option.no_subtests_shortletter else "u" + return outcome, short, f"{description} SUBFAILED" + else: + failed_subtests_count = config.stash[failed_subtests_key][report.nodeid] + # Top-level test, fail it it contains failed subtests and it has passed. + if report.passed and failed_subtests_count > 0: + report.outcome = "failed" + suffix = "s" if failed_subtests_count > 1 else "" + report.longrepr = f"Contains {failed_subtests_count} failed subtest{suffix}" - return None + return None From aa1bf099c0c7802c334384b2e00e1b4cd27e8418 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 17:39:37 -0300 Subject: [PATCH 13/18] Mention pytest-subtests in the docs --- changelog/1367.feature.rst | 2 ++ doc/en/how-to/subtests.rst | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/changelog/1367.feature.rst b/changelog/1367.feature.rst index 72eadbc55ca..b88480338b5 100644 --- a/changelog/1367.feature.rst +++ b/changelog/1367.feature.rst @@ -21,6 +21,8 @@ Each assert failure or error is caught by the context manager and reported indiv In addition, :meth:`unittest.TestCase.subTest` is now also supported. +This feature was originally implemented as a separate plugin in `pytest-subtests `__, but since then has been merged into the core. + .. note:: This feature is experimental and will likely evolve in future releases. By that we mean that we might change how subtests are reported on failure, but the functionality and how to use it are stable. diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst index 398f68db7ba..fbcdb9a9b0a 100644 --- a/doc/en/how-to/subtests.rst +++ b/doc/en/how-to/subtests.rst @@ -33,9 +33,9 @@ Each assertion failure or error is caught by the context manager and reported in In the output above: +* Subtest failures are reported as ``SUBFAILED``. * Each subtest is reported with the ``,`` character. * Subtests are reported first and the "top-level" test is reported at the end on its own. -* Subtest failures are reported as ``SUBFAILED``. Note that it is possible to use ``subtests`` multiple times in the same test, or even mix and match with normal assertions outside the ``subtests.test`` block: @@ -93,3 +93,10 @@ Subtests * Cannot be referenced individually from the command line. * Plugins that handle test execution cannot target individual subtests. * An assertion failure inside a subtest does not interrupt the test, letting users see all failures in the same report. + + +.. note:: + + This feature was originally implemented as a separate plugin in `pytest-subtests `__, but since ``9.0`` has been merged into the core. + + The core implementation should be compatible to the plugin implementation, except it does not contain custom command-line options to control subtest output. From f8a7c668db53828ece4dfb81f877baa55ae0cd59 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 17:42:08 -0300 Subject: [PATCH 14/18] More code review --- src/_pytest/subtests.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 4a86d691f14..58aecbb072d 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -219,13 +219,12 @@ def __exit__( exc_tb: TracebackType | None, ) -> bool: __tracebackhide__ = True - try: - if exc_val is not None: - exc_info = ExceptionInfo.from_exception(exc_val) - else: - exc_info = None - finally: - self._exit_stack.close() + if exc_val is not None: + exc_info = ExceptionInfo.from_exception(exc_val) + else: + exc_info = None + + self._exit_stack.close() precise_stop = time.perf_counter() duration = precise_stop - self._precise_start @@ -244,7 +243,7 @@ def __exit__( item=self.request.node, call=call_info ) sub_report = SubtestReport._from_test_report( - report, SubtestContext(msg=self.msg, kwargs=self.kwargs.copy()) + report, SubtestContext(msg=self.msg, kwargs=self.kwargs) ) if sub_report.failed: From 810fa895428b7fdaacf84bcb9ea0058fe67286a7 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 17:51:57 -0300 Subject: [PATCH 15/18] Remove update_report --- src/_pytest/subtests.py | 48 ++++++++++++++++++++--------------------- src/_pytest/unittest.py | 4 +++- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index 58aecbb072d..c76ceb19fa5 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -108,11 +108,25 @@ def _from_json(cls, reportdict: dict[str, Any]) -> SubtestReport: return report @classmethod - def _from_test_report( - cls, test_report: TestReport, context: SubtestContext + def _new( + cls, + test_report: TestReport, + context: SubtestContext, + captured_output: Captured | None, + captured_logs: CapturedLogs | None, ) -> Self: result = super()._from_json(test_report._to_json()) result.context = context + + if captured_output: + if captured_output.out: + result.sections.append(("Captured stdout call", captured_output.out)) + if captured_output.err: + result.sections.append(("Captured stderr call", captured_output.err)) + + if captured_logs and (log := captured_logs.handler.stream.getvalue()): + result.sections.append(("Captured log call", log)) + return result @@ -242,17 +256,17 @@ def __exit__( report = self.ihook.pytest_runtest_makereport( item=self.request.node, call=call_info ) - sub_report = SubtestReport._from_test_report( - report, SubtestContext(msg=self.msg, kwargs=self.kwargs) + sub_report = SubtestReport._new( + report, + SubtestContext(msg=self.msg, kwargs=self.kwargs), + captured_output=self._captured_output, + captured_logs=self._captured_logs, ) if sub_report.failed: failed_subtests = self.config.stash[failed_subtests_key] failed_subtests[self.request.node.nodeid] += 1 - self._captured_output.update_report(sub_report) - self._captured_logs.update_report(sub_report) - with self.suspend_capture_ctx(): self.ihook.pytest_runtest_logreport(report=sub_report) @@ -299,10 +313,10 @@ def capturing_output(request: SubRequest) -> Iterator[Captured]: @contextmanager def capturing_logs( request: SubRequest, -) -> Iterator[CapturedLogs | NullCapturedLogs]: +) -> Iterator[CapturedLogs | None]: logging_plugin = request.config.pluginmanager.getplugin("logging-plugin") if logging_plugin is None: - yield NullCapturedLogs() + yield None else: handler = LogCaptureHandler() handler.setFormatter(logging_plugin.formatter) @@ -317,27 +331,11 @@ class Captured: out: str = "" err: str = "" - def update_report(self, report: TestReport) -> None: - if self.out: - report.sections.append(("Captured stdout call", self.out)) - if self.err: - report.sections.append(("Captured stderr call", self.err)) - @dataclasses.dataclass class CapturedLogs: handler: LogCaptureHandler - def update_report(self, report: TestReport) -> None: - captured_log = self.handler.stream.getvalue() - if captured_log: - report.sections.append(("Captured log call", captured_log)) - - -class NullCapturedLogs: - def update_report(self, report: TestReport) -> None: - pass - def pytest_report_to_serializable(report: TestReport) -> dict[str, Any] | None: if isinstance(report, SubtestReport): diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index 035f9533cf5..64321050853 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -429,9 +429,11 @@ def addSubTest( ) msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) - sub_report = SubtestReport._from_test_report( + sub_report = SubtestReport._new( report, SubtestContext(msg=msg, kwargs=dict(test.params)), # type: ignore[attr-defined] + captured_output=None, + captured_logs=None, ) self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): From 8f1a0384fd92761074b52d0e056d2cd84259fc1e Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 18:10:32 -0300 Subject: [PATCH 16/18] Do not suppress pytest.exit() or keyboard interrupt when working with pdb --- src/_pytest/runner.py | 17 +++++++++++++---- src/_pytest/subtests.py | 3 +++ testing/test_subtests.py | 20 ++++++++++++++++++++ 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/src/_pytest/runner.py b/src/_pytest/runner.py index ec08025d897..9c20ff9e638 100644 --- a/src/_pytest/runner.py +++ b/src/_pytest/runner.py @@ -16,6 +16,7 @@ from typing import TYPE_CHECKING from typing import TypeVar +from .config import Config from .reports import BaseReport from .reports import CollectErrorRepr from .reports import CollectReport @@ -239,11 +240,11 @@ def call_and_report( runtest_hook = ihook.pytest_runtest_teardown else: assert False, f"Unhandled runtest hook case: {when}" - reraise: tuple[type[BaseException], ...] = (Exit,) - if not item.config.getoption("usepdb", False): - reraise += (KeyboardInterrupt,) + call = CallInfo.from_call( - lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise + lambda: runtest_hook(item=item, **kwds), + when=when, + reraise=get_reraise_exceptions(item.config), ) report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call) if log: @@ -253,6 +254,14 @@ def call_and_report( return report +def get_reraise_exceptions(config: Config) -> tuple[type[BaseException], ...]: + """Return exception types that should not be suppressed in general.""" + reraise: tuple[type[BaseException], ...] = (Exit,) + if not config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return reraise + + def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool: """Check whether the call raised an exception that should be reported as interactive.""" diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index c76ceb19fa5..c930c630df9 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -34,6 +34,7 @@ from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception +from _pytest.runner import get_reraise_exceptions from _pytest.stash import StashKey @@ -276,6 +277,8 @@ def __exit__( ) if exc_val is not None: + if isinstance(exc_val, get_reraise_exceptions(self.config)): + return False if self.request.session.shouldfail: return False return True diff --git a/testing/test_subtests.py b/testing/test_subtests.py index dfe6001830e..8f92c00c123 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -931,3 +931,23 @@ def test_foo(subtests): consecutive=True, ) result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. + + +def test_do_not_swallow_pytest_exit(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + def test(subtests): + with subtests.test(): + pytest.exit() + + def test2(): pass + """ + ) + result = pytester.runpytest_subprocess() + result.stdout.fnmatch_lines( + [ + "* _pytest.outcomes.Exit *", + "* 1 failed in *", + ] + ) From 5c0611c26a63dece8fb668860d74b31b7b127cd4 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 18:19:45 -0300 Subject: [PATCH 17/18] Forward log_level from plugin --- src/_pytest/subtests.py | 7 +++++-- testing/test_subtests.py | 28 ++++++++++++++++++++++++++-- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py index c930c630df9..c6ed5a7db0c 100644 --- a/src/_pytest/subtests.py +++ b/src/_pytest/subtests.py @@ -31,6 +31,7 @@ from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs from _pytest.logging import LogCaptureHandler +from _pytest.logging import LoggingPlugin from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception @@ -317,7 +318,9 @@ def capturing_output(request: SubRequest) -> Iterator[Captured]: def capturing_logs( request: SubRequest, ) -> Iterator[CapturedLogs | None]: - logging_plugin = request.config.pluginmanager.getplugin("logging-plugin") + logging_plugin: LoggingPlugin | None = request.config.pluginmanager.getplugin( + "logging-plugin" + ) if logging_plugin is None: yield None else: @@ -325,7 +328,7 @@ def capturing_logs( handler.setFormatter(logging_plugin.formatter) captured_logs = CapturedLogs(handler) - with catching_logs(handler): + with catching_logs(handler, level=logging_plugin.log_level): yield captured_logs diff --git a/testing/test_subtests.py b/testing/test_subtests.py index 8f92c00c123..44f8b5f532a 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -769,15 +769,17 @@ def test_foo(subtests): with subtests.test("sub1"): print("sub1 stdout") logging.info("sub1 logging") + logging.debug("sub1 logging debug") with subtests.test("sub2"): print("sub2 stdout") logging.info("sub2 logging") + logging.debug("sub2 logging debug") assert False """ ) - def test_capturing(self, pytester: pytest.Pytester) -> None: + def test_capturing_info(self, pytester: pytest.Pytester) -> None: self.create_file(pytester) result = pytester.runpytest("--log-level=INFO") result.stdout.fnmatch_lines( @@ -786,7 +788,29 @@ def test_capturing(self, pytester: pytest.Pytester) -> None: "*-- Captured stdout call --*", "sub2 stdout", "*-- Captured log call ---*", - "INFO root:test_capturing.py:12 sub2 logging", + "INFO * before", + "INFO * sub1 logging", + "INFO * sub2 logging", + "*== short test summary info ==*", + ] + ) + result.stdout.no_fnmatch_line("sub1 logging debug") + result.stdout.no_fnmatch_line("sub2 logging debug") + + def test_capturing_debug(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("--log-level=DEBUG") + result.stdout.fnmatch_lines( + [ + "*___ test_foo [[]sub2[]] __*", + "*-- Captured stdout call --*", + "sub2 stdout", + "*-- Captured log call ---*", + "INFO * before", + "INFO * sub1 logging", + "DEBUG * sub1 logging debug", + "INFO * sub2 logging", + "DEBUG * sub2 logging debug", "*== short test summary info ==*", ] ) From 73a54029dc2e09dca8122ec17a02ee181409f85e Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 23 Oct 2025 18:25:18 -0300 Subject: [PATCH 18/18] Add test case for nested subtests --- testing/test_subtests.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/testing/test_subtests.py b/testing/test_subtests.py index 44f8b5f532a..595c28874ec 100644 --- a/testing/test_subtests.py +++ b/testing/test_subtests.py @@ -975,3 +975,29 @@ def test2(): pass "* 1 failed in *", ] ) + + +def test_nested(pytester: pytest.Pytester) -> None: + """ + Currently we do nothing special with nested subtests. + + This test only sediments how they work now, we might reconsider adding some kind of nesting support in the future. + """ + pytester.makepyfile( + """ + import pytest + def test(subtests): + with subtests.test("a"): + with subtests.test("b"): + assert False, "b failed" + assert False, "a failed" + """ + ) + result = pytester.runpytest_subprocess() + result.stdout.fnmatch_lines( + [ + "[b] SUBFAILED test_nested.py::test - AssertionError: b failed", + "[a] SUBFAILED test_nested.py::test - AssertionError: a failed", + "* 3 failed in *", + ] + )