diff --git a/changelog/1367.feature.rst b/changelog/1367.feature.rst new file mode 100644 index 00000000000..b88480338b5 --- /dev/null +++ b/changelog/1367.feature.rst @@ -0,0 +1,28 @@ +**Support for subtests** has been added. + +:ref:`subtests ` are an alternative to parametrization, useful in situations where the parametrization values are not all known at collection time. + +**Example** + +.. code-block:: python + + def contains_docstring(p: Path) -> bool: + """Return True if the given Python file contains a top-level docstring.""" + ... + + + def test_py_files_contain_docstring(subtests: pytest.Subtests) -> None: + for path in Path.cwd().glob("*.py"): + with subtests.test(path=str(path)): + assert contains_docstring(path) + + +Each assert failure or error is caught by the context manager and reported individually, giving a clear picture of all files that are missing a docstring. + +In addition, :meth:`unittest.TestCase.subTest` is now also supported. + +This feature was originally implemented as a separate plugin in `pytest-subtests `__, but since then has been merged into the core. + +.. note:: + + This feature is experimental and will likely evolve in future releases. By that we mean that we might change how subtests are reported on failure, but the functionality and how to use it are stable. diff --git a/doc/en/how-to/index.rst b/doc/en/how-to/index.rst index 225f289651e..9796f1f8090 100644 --- a/doc/en/how-to/index.rst +++ b/doc/en/how-to/index.rst @@ -16,6 +16,7 @@ Core pytest functionality fixtures mark parametrize + subtests tmp_path monkeypatch doctest diff --git a/doc/en/how-to/parametrize.rst b/doc/en/how-to/parametrize.rst index fe186146434..5c39358d32a 100644 --- a/doc/en/how-to/parametrize.rst +++ b/doc/en/how-to/parametrize.rst @@ -20,6 +20,11 @@ pytest enables test parametrization at several levels: * `pytest_generate_tests`_ allows one to define custom parametrization schemes or extensions. + +.. note:: + + See :ref:`subtests` for an alternative to parametrization. + .. _parametrizemark: .. _`@pytest.mark.parametrize`: @@ -194,6 +199,7 @@ To get all combinations of multiple parametrized arguments you can stack This will run the test with the arguments set to ``x=0/y=2``, ``x=1/y=2``, ``x=0/y=3``, and ``x=1/y=3`` exhausting parameters in the order of the decorators. + .. _`pytest_generate_tests`: Basic ``pytest_generate_tests`` example diff --git a/doc/en/how-to/subtests.rst b/doc/en/how-to/subtests.rst new file mode 100644 index 00000000000..fbcdb9a9b0a --- /dev/null +++ b/doc/en/how-to/subtests.rst @@ -0,0 +1,102 @@ +.. _subtests: + +How to use subtests +=================== + +.. versionadded:: 9.0 + +.. note:: + + This feature is experimental. Its behavior, particularly how failures are reported, may evolve in future releases. However, the core functionality and usage are considered stable. + +pytest allows for grouping assertions within a normal test, known as *subtests*. + +Subtests are an alternative to parametrization, particularly useful when the exact parametrization values are not known at collection time. + + +.. code-block:: python + + # content of test_subtest.py + + + def test(subtests): + for i in range(5): + with subtests.test(msg="custom message", i=i): + assert i % 2 == 0 + +Each assertion failure or error is caught by the context manager and reported individually: + +.. code-block:: pytest + + $ pytest -q test_subtest.py + + +In the output above: + +* Subtest failures are reported as ``SUBFAILED``. +* Each subtest is reported with the ``,`` character. +* Subtests are reported first and the "top-level" test is reported at the end on its own. + +Note that it is possible to use ``subtests`` multiple times in the same test, or even mix and match with normal assertions +outside the ``subtests.test`` block: + +.. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test("stage 1", i=i): + assert i % 2 == 0 + + assert func() == 10 + + for i in range(10, 20): + with subtests.test("stage 2", i=i): + assert i % 2 == 0 + +.. note:: + + See :ref:`parametrize` for an alternative to subtests. + + +Typing +------ + +:class:`pytest.Subtests` is exported so it can be used in type annotations: + +.. code-block:: python + + def test(subtests: pytest.Subtests) -> None: ... + +.. _parametrize_vs_subtests: + +Parametrization vs Subtests +--------------------------- + +While :ref:`traditional pytest parametrization ` and ``subtests`` are similar, they have important differences and use cases. + + +Parametrization +~~~~~~~~~~~~~~~ + +* Happens at collection time. +* Generates individual tests. +* Parametrized tests can be referenced from the command line. +* Plays well with plugins that handle test execution, such as ``--last-failed``. +* Ideal for decision table testing. + +Subtests +~~~~~~~~ + +* Happen during test execution. +* Are not known at collection time. +* Can be generated dynamically. +* Cannot be referenced individually from the command line. +* Plugins that handle test execution cannot target individual subtests. +* An assertion failure inside a subtest does not interrupt the test, letting users see all failures in the same report. + + +.. note:: + + This feature was originally implemented as a separate plugin in `pytest-subtests `__, but since ``9.0`` has been merged into the core. + + The core implementation should be compatible to the plugin implementation, except it does not contain custom command-line options to control subtest output. diff --git a/doc/en/how-to/unittest.rst b/doc/en/how-to/unittest.rst index ba98b366d04..a8c56c266bd 100644 --- a/doc/en/how-to/unittest.rst +++ b/doc/en/how-to/unittest.rst @@ -22,17 +22,14 @@ their ``test`` methods in ``test_*.py`` or ``*_test.py`` files. Almost all ``unittest`` features are supported: -* ``@unittest.skip`` style decorators; -* ``setUp/tearDown``; -* ``setUpClass/tearDownClass``; -* ``setUpModule/tearDownModule``; +* :func:`unittest.skip`/:func:`unittest.skipIf` style decorators +* :meth:`unittest.TestCase.setUp`/:meth:`unittest.TestCase.tearDown` +* :meth:`unittest.TestCase.setUpClass`/:meth:`unittest.TestCase.tearDownClass` +* :func:`unittest.setUpModule`/:func:`unittest.tearDownModule` +* :meth:`unittest.TestCase.subTest` (since version ``9.0``) -.. _`pytest-subtests`: https://github.com/pytest-dev/pytest-subtests .. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol -Additionally, :ref:`subtests ` are supported by the -`pytest-subtests`_ plugin. - Up to this point pytest does not have support for the following features: * `load_tests protocol`_; diff --git a/doc/en/reference/fixtures.rst b/doc/en/reference/fixtures.rst index 566304d3330..02e235ceb9e 100644 --- a/doc/en/reference/fixtures.rst +++ b/doc/en/reference/fixtures.rst @@ -52,6 +52,9 @@ Built-in fixtures :fixture:`pytestconfig` Access to configuration values, pluginmanager and plugin hooks. + :fixture:`subtests` + Enable declaring subtests inside test functions. + :fixture:`record_property` Add extra properties to the test. diff --git a/doc/en/reference/reference.rst b/doc/en/reference/reference.rst index 3dfa11901ea..ab41fd1554c 100644 --- a/doc/en/reference/reference.rst +++ b/doc/en/reference/reference.rst @@ -572,6 +572,19 @@ The ``request`` fixture is a special fixture providing information of the reques :members: +.. fixture:: subtests + +subtests +~~~~~~~~ + +The ``subtests`` fixture enables declaring subtests inside test functions. + +**Tutorial**: :ref:`subtests` + +.. autoclass:: pytest.Subtests() + :members: + + .. fixture:: testdir testdir diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 2af60fa9c3c..23c85f70d64 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -278,6 +278,7 @@ def directory_arg(path: str, optname: str) -> str: "logging", "reports", "faulthandler", + "subtests", ) builtin_plugins = { diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 60540552401..cb5d2e93e93 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -25,6 +25,7 @@ "pytest_catchlog", "pytest_capturelog", "pytest_faulthandler", + "pytest_subtests", } diff --git a/src/_pytest/reports.py b/src/_pytest/reports.py index fb0607bfb95..8deed3be79e 100644 --- a/src/_pytest/reports.py +++ b/src/_pytest/reports.py @@ -251,7 +251,6 @@ def _report_unserialization_failure( raise RuntimeError(stream.getvalue()) -@final class TestReport(BaseReport): """Basic test report object (also used for setup and teardown calls if they fail). diff --git a/src/_pytest/runner.py b/src/_pytest/runner.py index ec08025d897..9c20ff9e638 100644 --- a/src/_pytest/runner.py +++ b/src/_pytest/runner.py @@ -16,6 +16,7 @@ from typing import TYPE_CHECKING from typing import TypeVar +from .config import Config from .reports import BaseReport from .reports import CollectErrorRepr from .reports import CollectReport @@ -239,11 +240,11 @@ def call_and_report( runtest_hook = ihook.pytest_runtest_teardown else: assert False, f"Unhandled runtest hook case: {when}" - reraise: tuple[type[BaseException], ...] = (Exit,) - if not item.config.getoption("usepdb", False): - reraise += (KeyboardInterrupt,) + call = CallInfo.from_call( - lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise + lambda: runtest_hook(item=item, **kwds), + when=when, + reraise=get_reraise_exceptions(item.config), ) report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call) if log: @@ -253,6 +254,14 @@ def call_and_report( return report +def get_reraise_exceptions(config: Config) -> tuple[type[BaseException], ...]: + """Return exception types that should not be suppressed in general.""" + reraise: tuple[type[BaseException], ...] = (Exit,) + if not config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return reraise + + def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool: """Check whether the call raised an exception that should be reported as interactive.""" diff --git a/src/_pytest/subtests.py b/src/_pytest/subtests.py new file mode 100644 index 00000000000..c6ed5a7db0c --- /dev/null +++ b/src/_pytest/subtests.py @@ -0,0 +1,419 @@ +"""Builtin plugin that adds subtests support.""" + +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Iterator +from collections.abc import Mapping +from contextlib import AbstractContextManager +from contextlib import contextmanager +from contextlib import ExitStack +from contextlib import nullcontext +import dataclasses +import time +from types import TracebackType +from typing import Any +from typing import TYPE_CHECKING + +import pluggy + +from _pytest._code import ExceptionInfo +from _pytest._io.saferepr import saferepr +from _pytest.capture import CaptureFixture +from _pytest.capture import FDCapture +from _pytest.capture import SysCapture +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.logging import catching_logs +from _pytest.logging import LogCaptureHandler +from _pytest.logging import LoggingPlugin +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.runner import get_reraise_exceptions +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from typing_extensions import Self + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("subtests") + group.addoption( + "--no-subtests-shortletter", + action="store_true", + dest="no_subtests_shortletter", + default=False, + help="Disables subtest output 'dots' in non-verbose mode (EXPERIMENTAL)", + ) + group.addoption( + "--no-subtests-reports", + action="store_true", + dest="no_subtests_reports", + default=False, + help="Disables subtest output unless it's a failed subtest (EXPERIMENTAL)", + ) + + +@dataclasses.dataclass(frozen=True, slots=True, kw_only=True) +class SubtestContext: + """The values passed to Subtests.test() that are included in the test report.""" + + msg: str | None + kwargs: Mapping[str, Any] + + def _to_json(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + @classmethod + def _from_json(cls, d: dict[str, Any]) -> Self: + return cls(msg=d["msg"], kwargs=d["kwargs"]) + + +@dataclasses.dataclass(init=False) +class SubtestReport(TestReport): + context: SubtestContext + + @property + def head_line(self) -> str: + _, _, domain = self.location + return f"{domain} {self._sub_test_description()}" + + def _sub_test_description(self) -> str: + parts = [] + if self.context.msg is not None: + parts.append(f"[{self.context.msg}]") + if self.context.kwargs: + params_desc = ", ".join( + f"{k}={saferepr(v)}" for (k, v) in self.context.kwargs.items() + ) + parts.append(f"({params_desc})") + return " ".join(parts) or "()" + + def _to_json(self) -> dict[str, Any]: + data = super()._to_json() + del data["context"] + data["_report_type"] = "SubTestReport" + data["_subtest.context"] = self.context._to_json() + return data + + @classmethod + def _from_json(cls, reportdict: dict[str, Any]) -> SubtestReport: + report = super()._from_json(reportdict) + report.context = SubtestContext._from_json(reportdict["_subtest.context"]) + return report + + @classmethod + def _new( + cls, + test_report: TestReport, + context: SubtestContext, + captured_output: Captured | None, + captured_logs: CapturedLogs | None, + ) -> Self: + result = super()._from_json(test_report._to_json()) + result.context = context + + if captured_output: + if captured_output.out: + result.sections.append(("Captured stdout call", captured_output.out)) + if captured_output.err: + result.sections.append(("Captured stderr call", captured_output.err)) + + if captured_logs and (log := captured_logs.handler.stream.getvalue()): + result.sections.append(("Captured log call", log)) + + return result + + +@fixture +def subtests(request: SubRequest) -> Subtests: + """Provides subtests functionality.""" + capmam = request.node.config.pluginmanager.get_plugin("capturemanager") + if capmam is not None: + suspend_capture_ctx = capmam.global_and_fixture_disabled + else: + suspend_capture_ctx = nullcontext + return Subtests(request.node.ihook, suspend_capture_ctx, request, _ispytest=True) + + +class Subtests: + """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" + + def __init__( + self, + ihook: pluggy.HookRelay, + suspend_capture_ctx: Callable[[], AbstractContextManager[None]], + request: SubRequest, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._ihook = ihook + self._suspend_capture_ctx = suspend_capture_ctx + self._request = request + + def test( + self, + msg: str | None = None, + **kwargs: Any, + ) -> _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and + reporting assertion failures and errors individually. + + Usage + ----- + + .. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test("custom message", i=i): + assert i % 2 == 0 + + :param msg: + If given, the message will be shown in the test report in case of subtest failure. + + :param kwargs: + Arbitrary values that are also added to the subtest report. + """ + return _SubTestContextManager( + self._ihook, + msg, + kwargs, + request=self._request, + suspend_capture_ctx=self._suspend_capture_ctx, + config=self._request.config, + ) + + +@dataclasses.dataclass +class _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and handling + them through the pytest machinery. + """ + + # Note: initially the logic for this context manager was implemented directly + # in Subtests.test() as a @contextmanager, however, it is not possible to control the output fully when + # exiting from it due to an exception when in `--exitfirst` mode, so this was refactored into an + # explicit context manager class (pytest-dev/pytest-subtests#134). + + ihook: pluggy.HookRelay + msg: str | None + kwargs: dict[str, Any] + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] + request: SubRequest + config: Config + + def __enter__(self) -> None: + __tracebackhide__ = True + + self._start = time.time() + self._precise_start = time.perf_counter() + self._exc_info = None + + self._exit_stack = ExitStack() + self._captured_output = self._exit_stack.enter_context( + capturing_output(self.request) + ) + self._captured_logs = self._exit_stack.enter_context( + capturing_logs(self.request) + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_val is not None: + exc_info = ExceptionInfo.from_exception(exc_val) + else: + exc_info = None + + self._exit_stack.close() + + precise_stop = time.perf_counter() + duration = precise_stop - self._precise_start + stop = time.time() + + call_info = CallInfo[None]( + None, + exc_info, + start=self._start, + stop=stop, + duration=duration, + when="call", + _ispytest=True, + ) + report = self.ihook.pytest_runtest_makereport( + item=self.request.node, call=call_info + ) + sub_report = SubtestReport._new( + report, + SubtestContext(msg=self.msg, kwargs=self.kwargs), + captured_output=self._captured_output, + captured_logs=self._captured_logs, + ) + + if sub_report.failed: + failed_subtests = self.config.stash[failed_subtests_key] + failed_subtests[self.request.node.nodeid] += 1 + + with self.suspend_capture_ctx(): + self.ihook.pytest_runtest_logreport(report=sub_report) + + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self.request.node, call=call_info, report=sub_report + ) + + if exc_val is not None: + if isinstance(exc_val, get_reraise_exceptions(self.config)): + return False + if self.request.session.shouldfail: + return False + return True + + +@contextmanager +def capturing_output(request: SubRequest) -> Iterator[Captured]: + option = request.config.getoption("capture", None) + + capman = request.config.pluginmanager.getplugin("capturemanager") + if getattr(capman, "_capture_fixture", None): + # capsys or capfd are active, subtest should not capture. + fixture = None + elif option == "sys": + fixture = CaptureFixture(SysCapture, request, _ispytest=True) + elif option == "fd": + fixture = CaptureFixture(FDCapture, request, _ispytest=True) + else: + fixture = None + + if fixture is not None: + fixture._start() + + captured = Captured() + try: + yield captured + finally: + if fixture is not None: + out, err = fixture.readouterr() + fixture.close() + captured.out = out + captured.err = err + + +@contextmanager +def capturing_logs( + request: SubRequest, +) -> Iterator[CapturedLogs | None]: + logging_plugin: LoggingPlugin | None = request.config.pluginmanager.getplugin( + "logging-plugin" + ) + if logging_plugin is None: + yield None + else: + handler = LogCaptureHandler() + handler.setFormatter(logging_plugin.formatter) + + captured_logs = CapturedLogs(handler) + with catching_logs(handler, level=logging_plugin.log_level): + yield captured_logs + + +@dataclasses.dataclass +class Captured: + out: str = "" + err: str = "" + + +@dataclasses.dataclass +class CapturedLogs: + handler: LogCaptureHandler + + +def pytest_report_to_serializable(report: TestReport) -> dict[str, Any] | None: + if isinstance(report, SubtestReport): + return report._to_json() + return None + + +def pytest_report_from_serializable(data: dict[str, Any]) -> SubtestReport | None: + if data.get("_report_type") == "SubTestReport": + return SubtestReport._from_json(data) + return None + + +# Dict of nodeid -> number of failed subtests. +# Used to fail top-level tests that passed but contain failed subtests. +failed_subtests_key = StashKey[defaultdict[str, int]]() + + +def pytest_configure(config: Config) -> None: + config.stash[failed_subtests_key] = defaultdict(lambda: 0) + + +@hookimpl(tryfirst=True) +def pytest_report_teststatus( + report: TestReport, + config: Config, +) -> tuple[str, str, str | Mapping[str, bool]] | None: + if report.when != "call": + return None + + if isinstance(report, SubtestReport): + outcome = report.outcome + description = report._sub_test_description() + no_output = ("", "", "") + + if hasattr(report, "wasxfail"): + if config.option.no_subtests_reports and outcome != "skipped": + return no_output + elif outcome == "skipped": + category = "xfailed" + short = "y" # x letter is used for regular xfail, y for subtest xfail + status = "SUBXFAIL" + elif outcome == "passed": + category = "xpassed" + short = "Y" # X letter is used for regular xpass, Y for subtest xpass + status = "SUBXPASS" + else: + # This should not normally happen, unless some plugin is setting wasxfail without + # the correct outcome. Pytest expects the call outcome to be either skipped or + # passed in case of xfail. + # Let's pass this report to the next hook. + return None + short = "" if config.option.no_subtests_shortletter else short + return f"subtests {category}", short, f"{description} {status}" + + if config.option.no_subtests_reports and outcome != "failed": + return no_output + elif report.passed: + short = "" if config.option.no_subtests_shortletter else "," + return f"subtests {outcome}", short, f"{description} SUBPASSED" + elif report.skipped: + short = "" if config.option.no_subtests_shortletter else "-" + return outcome, short, f"{description} SUBSKIPPED" + elif outcome == "failed": + short = "" if config.option.no_subtests_shortletter else "u" + return outcome, short, f"{description} SUBFAILED" + else: + failed_subtests_count = config.stash[failed_subtests_key][report.nodeid] + # Top-level test, fail it it contains failed subtests and it has passed. + if report.passed and failed_subtests_count > 0: + report.outcome = "failed" + suffix = "s" if failed_subtests_count > 1 else "" + report.longrepr = f"Contains {failed_subtests_count} failed subtest{suffix}" + + return None diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index ed62c9e345e..929b0d51aaa 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -69,6 +69,9 @@ "xpassed", "warnings", "error", + "subtests passed", + "subtests failed", + "subtests skipped", ) _REPORTCHARS_DEFAULT = "fE" @@ -1579,6 +1582,8 @@ def _folded_skips( "error": "red", "warnings": "yellow", "passed": "green", + "subtests passed": "green", + "subtests failed": "red", } _color_for_type_default = "yellow" diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index 282f7b25680..64321050853 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -13,9 +13,13 @@ import sys import traceback import types +from typing import Any from typing import TYPE_CHECKING +from unittest import TestCase import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest.compat import assert_never from _pytest.compat import is_async_function from _pytest.config import hookimpl from _pytest.fixtures import FixtureRequest @@ -30,12 +34,16 @@ from _pytest.python import Function from _pytest.python import Module from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.subtests import SubtestContext +from _pytest.subtests import SubtestReport if sys.version_info[:2] < (3, 11): from exceptiongroup import ExceptionGroup if TYPE_CHECKING: + from types import TracebackType import unittest import twisted.trial.unittest @@ -200,6 +208,7 @@ def unittest_setup_method_fixture( class TestCaseFunction(Function): nofuncargs = True + failfast = False _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None def _getinstance(self): @@ -277,11 +286,42 @@ def addFailure( ) -> None: self._addexcinfo(rawexcinfo) - def addSkip(self, testcase: unittest.TestCase, reason: str) -> None: - try: - raise skip.Exception(reason, _use_item_location=True) - except skip.Exception: - self._addexcinfo(sys.exc_info()) + def addSkip( + self, testcase: unittest.TestCase, reason: str, *, handle_subtests: bool = True + ) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + def add_skip() -> None: + try: + raise skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + if not handle_subtests: + add_skip() + return + + if isinstance(testcase, _SubTest): + add_skip() + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) + else: + # For python < 3.11: the non-subtest skips have to be added by `add_skip` only after all subtest + # failures are processed by `_addSubTest`: `self.instance._outcome` has no attribute + # `skipped/errors` anymore. + # We also need to check if `self.instance._outcome` is `None` (this happens if the test + # class/method is decorated with `unittest.skip`, see pytest-dev/pytest-subtests#173). + if sys.version_info < (3, 11) and self.instance._outcome is not None: + subtest_errors = [ + x + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + if len(subtest_errors) == 0: + add_skip() + else: + add_skip() def addExpectedFailure( self, @@ -361,6 +401,70 @@ def _traceback_filter( ntraceback = traceback return ntraceback + def addSubTest( + self, + test_case: Any, + test: TestCase, + exc_info: ExceptionInfo[BaseException] + | tuple[type[BaseException], BaseException, TracebackType] + | None, + ) -> None: + exception_info: ExceptionInfo[BaseException] | None + match exc_info: + case tuple(): + exception_info = ExceptionInfo(exc_info, _ispytest=True) + case ExceptionInfo() | None: + exception_info = exc_info + case unreachable: + assert_never(unreachable) + + call_info = CallInfo[None]( + None, + exception_info, + start=0, + stop=0, + duration=0, + when="call", + _ispytest=True, + ) + msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubtestReport._new( + report, + SubtestContext(msg=msg, kwargs=dict(test.params)), # type: ignore[attr-defined] + captured_output=None, + captured_logs=None, + ) + self.ihook.pytest_runtest_logreport(report=sub_report) + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self, call=call_info, report=sub_report + ) + + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + from unittest.case import _SubTest # type: ignore[attr-defined] + + non_subtest_skip = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `add_skip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self.addSkip(testcase, reason, handle_subtests=False) + @hookimpl(tryfirst=True) def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: diff --git a/src/pytest/__init__.py b/src/pytest/__init__.py index 31d56deede4..6c09a2461d4 100644 --- a/src/pytest/__init__.py +++ b/src/pytest/__init__.py @@ -71,6 +71,7 @@ from _pytest.runner import CallInfo from _pytest.stash import Stash from _pytest.stash import StashKey +from _pytest.subtests import Subtests from _pytest.terminal import TerminalReporter from _pytest.terminal import TestShortLogReport from _pytest.tmpdir import TempPathFactory @@ -148,6 +149,7 @@ "Session", "Stash", "StashKey", + "Subtests", "TempPathFactory", "TempdirFactory", "TerminalReporter", diff --git a/testing/test_subtests.py b/testing/test_subtests.py new file mode 100644 index 00000000000..595c28874ec --- /dev/null +++ b/testing/test_subtests.py @@ -0,0 +1,1003 @@ +from __future__ import annotations + +from pathlib import Path +import sys +from typing import Literal + +import pytest + + +IS_PY311 = sys.version_info[:2] >= (3, 11) + + +@pytest.mark.parametrize("mode", ["normal", "xdist"]) +class TestFixture: + """Tests for ``subtests`` fixture.""" + + @pytest.fixture + def simple_script(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + assert i % 2 == 0 + """ + ) + + def test_simple_terminal_normal( + self, + simple_script: None, + pytester: pytest.Pytester, + mode: Literal["normal", "xdist"], + ) -> None: + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1", "-pxdist.plugin") + expected_lines = ["1 worker [1 item]"] + + expected_lines += [ + "* test_foo [[]custom[]] (i=1) *", + "* test_foo [[]custom[]] (i=3) *", + "Contains 2 failed subtests", + "* 3 failed, 3 subtests passed in *", + ] + result.stdout.fnmatch_lines(expected_lines) + + def test_simple_terminal_verbose( + self, + simple_script: None, + pytester: pytest.Pytester, + mode: Literal["normal", "xdist"], + ) -> None: + if mode == "normal": + result = pytester.runpytest("-v") + expected_lines = [ + "*collected 1 item", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASSED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAILED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASSED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAILED *100%*", + "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASSED *100%*", + "test_simple_terminal_verbose.py::test_foo FAILED *100%*", + ] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1", "-v", "-pxdist.plugin") + expected_lines = [ + "1 worker [1 item]", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", + ] + + expected_lines += [ + "* test_foo [[]custom[]] (i=1) *", + "* test_foo [[]custom[]] (i=3) *", + "* 3 failed, 3 subtests passed in *", + ] + result.stdout.fnmatch_lines(expected_lines) + + def test_skip( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + if i % 2 == 0: + pytest.skip('even number') + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1", "-pxdist.plugin") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_xfail( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + if i % 2 == 0: + pytest.xfail('even number') + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1", "-pxdist.plugin") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed, 2 subtests passed, 3 subtests xfailed in *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_typing_exported( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + from pytest import Subtests + + def test_typing_exported(subtests: Subtests) -> None: + assert isinstance(subtests, Subtests) + """ + ) + if mode == "normal": + result = pytester.runpytest() + expected_lines = ["collected 1 item"] + else: + assert mode == "xdist" + pytest.importorskip("xdist") + result = pytester.runpytest("-n1", "-pxdist.plugin") + expected_lines = ["1 worker [1 item]"] + expected_lines += ["* 1 passed *"] + result.stdout.fnmatch_lines(expected_lines) + + def test_no_subtests_reports( + self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] + ) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + pass + """ + ) + # Without `--no-subtests-reports`, subtests are reported normally. + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBPASSED*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 1 passed, 5 subtests passed in*", + ] + ) + + # With `--no-subtests-reports`, passing subtests are no longer reported. + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "*test_no_subtests_reports.py::test_foo PASSED*", + "* 1 passed in*", + ] + ) + result.stdout.no_fnmatch_line("*SUBPASSED*") + + # Rewrite the test file so the tests fail. Even with the flag, failed subtests are still reported. + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + assert False + """ + ) + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "test_no_subtests_reports.py::test_foo * (i=0) SUBFAILED*", + "*test_no_subtests_reports.py::test_foo FAILED*", + "* 6 failed in*", + ] + ) + + +def test_subtests_and_parametrization(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + @pytest.mark.parametrize("x", [0, 1]) + def test_foo(subtests, x): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + assert x == 0 + """ + ) + result = pytester.runpytest("-v", "--no-subtests-reports") + result.stdout.fnmatch_lines( + [ + "test_subtests_and_parametrization.py::test_foo[[]0[]] [[]custom[]] (i=1) SUBFAILED*[[] 50%[]]", + "test_subtests_and_parametrization.py::test_foo[[]0[]] FAILED *[[] 50%[]]", + "test_subtests_and_parametrization.py::test_foo[[]1[]] [[]custom[]] (i=1) SUBFAILED *[[]100%[]]", + "test_subtests_and_parametrization.py::test_foo[[]1[]] FAILED *[[]100%[]]", + "Contains 1 failed subtest", + "* 4 failed in *", + ] + ) + + +def test_subtests_fail_top_level_test(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + +def test_subtests_do_not_overwrite_top_level_failure(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + assert False, "top-level failure" + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "*AssertionError: top-level failure", + "* 2 failed, 2 subtests passed in *", + ] + ) + + +@pytest.mark.parametrize("flag", ["--last-failed", "--stepwise"]) +def test_subtests_last_failed_step_wise(pytester: pytest.Pytester, flag: str) -> None: + """Check that --last-failed and --step-wise correctly rerun tests with failed subtests.""" + pytester.makepyfile( + """ + import pytest + + def test_foo(subtests): + for i in range(3): + with subtests.test("custom", i=i): + assert i % 2 == 0 + """ + ) + result = pytester.runpytest("-v") + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + result = pytester.runpytest("-v", flag) + result.stdout.fnmatch_lines( + [ + "* 2 failed, 2 subtests passed in *", + ] + ) + + +class TestUnittestSubTest: + """Test unittest.TestCase.subTest functionality.""" + + @pytest.fixture + def simple_script(self, pytester: pytest.Pytester) -> Path: + return pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + self.assertEqual(i % 2, 0) + + if __name__ == '__main__': + main() + """ + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_simple_terminal_normal( + self, + simple_script: Path, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + suffix = ".test_foo" if IS_PY311 else "" + if runner == "unittest": + result = pytester.run(sys.executable, simple_script) + result.stderr.fnmatch_lines( + [ + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", + "AssertionError: 1 != 0", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", + "AssertionError: 1 != 0", + "Ran 1 test in *", + "FAILED (failures=2)", + ] + ) + else: + if runner == "pytest-normal": + result = pytester.runpytest(simple_script) + expected_lines = ["collected 1 item"] + else: + assert runner == "pytest-xdist" + pytest.importorskip("xdist") + result = pytester.runpytest(simple_script, "-n1", "-pxdist.plugin") + expected_lines = ["1 worker [1 item]"] + result.stdout.fnmatch_lines( + [ + *expected_lines, + "* T.test_foo [[]custom[]] (i=1) *", + "E * AssertionError: 1 != 0", + "* T.test_foo [[]custom[]] (i=3) *", + "E * AssertionError: 1 != 0", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_simple_terminal_verbose( + self, + simple_script: Path, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + suffix = ".test_foo" if IS_PY311 else "" + if runner == "unittest": + result = pytester.run(sys.executable, simple_script, "-v") + result.stderr.fnmatch_lines( + [ + f"test_foo (__main__.T{suffix}) ... ", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", + "AssertionError: 1 != 0", + f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", + "AssertionError: 1 != 0", + "Ran 1 test in *", + "FAILED (failures=2)", + ] + ) + else: + if runner == "pytest-normal": + result = pytester.runpytest(simple_script, "-v") + expected_lines = [ + "*collected 1 item", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAILED *100%*", + "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAILED *100%*", + "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", + ] + else: + assert runner == "pytest-xdist" + pytest.importorskip("xdist") + result = pytester.runpytest( + simple_script, "-n1", "-v", "-pxdist.plugin" + ) + expected_lines = [ + "1 worker [1 item]", + "*gw0*100%* SUBFAILED test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* SUBFAILED test_simple_terminal_verbose.py::T::test_foo*", + "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", + ] + result.stdout.fnmatch_lines( + [ + *expected_lines, + "* T.test_foo [[]custom[]] (i=1) *", + "E * AssertionError: 1 != 0", + "* T.test_foo [[]custom[]] (i=3) *", + "E * AssertionError: 1 != 0", + "* 2 failed, 1 passed, 3 subtests passed in *", + ] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip( + self, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + p = pytester.makepyfile( + """ + from unittest import TestCase, main + + class T(TestCase): + + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + self.skipTest('even number') + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) + else: + pytest.xfail("Not producing the expected results (#13756)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + @pytest.mark.xfail(reason="Not producing the expected results (#13756)") + def test_xfail( + self, + pytester: pytest.Pytester, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + @expectedFailure + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + raise pytest.xfail('even number') + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) + else: + result = pytester.runpytest(p) + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 xfailed, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["pytest-normal"]) + def test_only_original_skip_is_called( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["pytest-normal"], + ) -> None: + """Regression test for pytest-dev/pytest-subtests#173.""" + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import unittest + from unittest import TestCase, main + + @unittest.skip("skip this test") + class T(unittest.TestCase): + def test_foo(self): + assert 1 == 2 + + if __name__ == '__main__': + main() + """ + ) + result = pytester.runpytest(p, "-v", "-rsf") + result.stdout.fnmatch_lines( + ["SKIPPED [1] test_only_original_skip_is_called.py:6: skip this test"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=4\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=4\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIPPED \(skip subtest i=0\) .*", # noqa: E501 + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIPPED \(skip subtest i=3\) .*", # noqa: E501 + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAILED .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAILED .*", + "test_skip_with_failure.py::T::test_foo PASSED .*", + r"[custom message] (i=0) SUBSKIPPED [1] test_skip_with_failure.py:5: skip subtest i=0", + r"[custom message] (i=0) SUBSKIPPED [1] test_skip_with_failure.py:5: skip subtest i=3", + r"[custom message] (i=4) SUBFAILED test_skip_with_failure.py::T::test_foo" + r" - AssertionError: assert 4 < 4", + r"[custom message] (i=9) SUBFAILED test_skip_with_failure.py::T::test_foo" + r" - AssertionError: assert 9 < 4", + r".* 6 failed, 1 passed, 4 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#13756)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure_and_non_subskip( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + self.skipTest(f"skip the test") + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=5\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + r"Ran 1 test in .*", + r"FAILED \(failures=6, skipped=5\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + # The `(i=0)` is not correct but it's given by pytest `TerminalReporter` without `--no-fold-skipped` + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAILED .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", + r"\[custom message\] \(i=0\) SUBSKIPPED \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r" skip subtest i=3", + r"\[custom message\] \(i=0\) SUBSKIPPED \[1\] test_skip_with_failure_and_non_subskip.py:5:" + r" skip the test", + r"\[custom message\] \(i=4\) SUBFAILED test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + # Check with `--no-fold-skipped` (which gives the correct information). + if sys.version_info >= (3, 10) and pytest.version_tuple[:2] >= (8, 3): + result = pytester.runpytest(p, "-v", "--no-fold-skipped", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAILED .*", # noqa: E501 + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", + r"\[custom message\] \(i=3\) SUBSKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo" + r" - Skipped: skip subtest i=3", + r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", + r"\[custom message\] \(i=4\) SUBFAILED test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#13756)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + +class TestCapture: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import sys + def test(subtests): + print() + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + assert 0 + + with subtests.test(i='B'): + print("hello stdout B") + print("hello stderr B", file=sys.stderr) + assert 0 + + print('end test') + assert 0 + """ + ) + + def test_capturing(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*__ test (i='A') __*", + "*Captured stdout call*", + "hello stdout A", + "*Captured stderr call*", + "hello stderr A", + "*__ test (i='B') __*", + "*Captured stdout call*", + "hello stdout B", + "*Captured stderr call*", + "hello stderr B", + "*__ test __*", + "*Captured stdout call*", + "start test", + "end test", + ] + ) + + def test_no_capture(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("-s") + result.stdout.fnmatch_lines( + [ + "start test", + "hello stdout A", + "uhello stdout B", + "uend test", + "*__ test (i='A') __*", + "*__ test (i='B') __*", + "*__ test __*", + ] + ) + result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) + + @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) + def test_capture_with_fixture( + self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"] + ) -> None: + pytester.makepyfile( + rf""" + import sys + + def test(subtests, {fixture}): + print('start test') + + with subtests.test(i='A'): + print("hello stdout A") + print("hello stderr A", file=sys.stderr) + + out, err = {fixture}.readouterr() + assert out == 'start test\nhello stdout A\n' + assert err == 'hello stderr A\n' + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + +class TestLogging: + def create_file(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test_foo(subtests): + logging.info("before") + + with subtests.test("sub1"): + print("sub1 stdout") + logging.info("sub1 logging") + logging.debug("sub1 logging debug") + + with subtests.test("sub2"): + print("sub2 stdout") + logging.info("sub2 logging") + logging.debug("sub2 logging debug") + assert False + """ + ) + + def test_capturing_info(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("--log-level=INFO") + result.stdout.fnmatch_lines( + [ + "*___ test_foo [[]sub2[]] __*", + "*-- Captured stdout call --*", + "sub2 stdout", + "*-- Captured log call ---*", + "INFO * before", + "INFO * sub1 logging", + "INFO * sub2 logging", + "*== short test summary info ==*", + ] + ) + result.stdout.no_fnmatch_line("sub1 logging debug") + result.stdout.no_fnmatch_line("sub2 logging debug") + + def test_capturing_debug(self, pytester: pytest.Pytester) -> None: + self.create_file(pytester) + result = pytester.runpytest("--log-level=DEBUG") + result.stdout.fnmatch_lines( + [ + "*___ test_foo [[]sub2[]] __*", + "*-- Captured stdout call --*", + "sub2 stdout", + "*-- Captured log call ---*", + "INFO * before", + "INFO * sub1 logging", + "DEBUG * sub1 logging debug", + "INFO * sub2 logging", + "DEBUG * sub2 logging debug", + "*== short test summary info ==*", + ] + ) + + def test_caplog(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests, caplog): + caplog.set_level(logging.INFO) + logging.info("start test") + + with subtests.test("sub1"): + logging.info("inside %s", "subtest1") + + assert len(caplog.records) == 2 + assert caplog.records[0].getMessage() == "start test" + assert caplog.records[1].getMessage() == "inside subtest1" + """ + ) + result = pytester.runpytest() + result.stdout.fnmatch_lines( + [ + "*1 passed*", + ] + ) + + def test_no_logging(self, pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import logging + + def test(subtests): + logging.info("start log line") + + with subtests.test("sub passing"): + logging.info("inside %s", "passing log line") + + with subtests.test("sub failing"): + logging.info("inside %s", "failing log line") + assert False + + logging.info("end log line") + """ + ) + result = pytester.runpytest("-p no:logging") + result.stdout.fnmatch_lines( + [ + "*2 failed, 1 subtests passed in*", + ] + ) + result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") + + +class TestDebugging: + """Check --pdb support for subtests fixture and TestCase.subTest.""" + + class _FakePdb: + """Fake debugger class implementation that tracks which methods were called on it.""" + + quitting: bool = False + calls: list[str] = [] + + def __init__(self, *_: object, **__: object) -> None: + self.calls.append("init") + + def reset(self) -> None: + self.calls.append("reset") + + def interaction(self, *_: object) -> None: + self.calls.append("interaction") + + @pytest.fixture(autouse=True) + def cleanup_calls(self) -> None: + self._FakePdb.calls.clear() + + def test_pdb_fixture( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + def test(subtests): + with subtests.test(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def test_pdb_unittest( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + pytester.makepyfile( + """ + from unittest import TestCase + class Test(TestCase): + def test(self): + with self.subTest(): + assert 0 + """ + ) + self.runpytest_and_check_pdb(pytester, monkeypatch) + + def runpytest_and_check_pdb( + self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch + ) -> None: + # Install the fake pdb implementation in _pytest.subtests so we can reference + # it in the command line (any module would do). + import _pytest.subtests + + monkeypatch.setattr( + _pytest.subtests, "_CustomPdb", self._FakePdb, raising=False + ) + result = pytester.runpytest("--pdb", "--pdbcls=_pytest.subtests:_CustomPdb") + + # Ensure pytest entered in debugging mode when encountering the failing + # assert. + result.stdout.fnmatch_lines("*entering PDB*") + assert self._FakePdb.calls == ["init", "reset", "interaction"] + + +def test_exitfirst(pytester: pytest.Pytester) -> None: + """Validate that when passing --exitfirst the test exits after the first failed subtest.""" + pytester.makepyfile( + """ + def test_foo(subtests): + with subtests.test("sub1"): + assert False + + with subtests.test("sub2"): + assert False + """ + ) + result = pytester.runpytest("--exitfirst") + assert result.parseoutcomes()["failed"] == 2 + result.stdout.fnmatch_lines( + [ + "*[[]sub1[]] SUBFAILED test_exitfirst.py::test_foo - assert False*", + "FAILED test_exitfirst.py::test_foo - assert False", + "* stopping after 2 failures*", + ], + consecutive=True, + ) + result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. + + +def test_do_not_swallow_pytest_exit(pytester: pytest.Pytester) -> None: + pytester.makepyfile( + """ + import pytest + def test(subtests): + with subtests.test(): + pytest.exit() + + def test2(): pass + """ + ) + result = pytester.runpytest_subprocess() + result.stdout.fnmatch_lines( + [ + "* _pytest.outcomes.Exit *", + "* 1 failed in *", + ] + ) + + +def test_nested(pytester: pytest.Pytester) -> None: + """ + Currently we do nothing special with nested subtests. + + This test only sediments how they work now, we might reconsider adding some kind of nesting support in the future. + """ + pytester.makepyfile( + """ + import pytest + def test(subtests): + with subtests.test("a"): + with subtests.test("b"): + assert False, "b failed" + assert False, "a failed" + """ + ) + result = pytester.runpytest_subprocess() + result.stdout.fnmatch_lines( + [ + "[b] SUBFAILED test_nested.py::test - AssertionError: b failed", + "[a] SUBFAILED test_nested.py::test - AssertionError: a failed", + "* 3 failed in *", + ] + )