pax_global_header00006660000000000000000000000064150230006060014502gustar00rootroot0000000000000052 comment=14454dfde9b643263a969365011835d32a9eb721 pytest-subtests-0.14.2/000077500000000000000000000000001502300060600147705ustar00rootroot00000000000000pytest-subtests-0.14.2/.github/000077500000000000000000000000001502300060600163305ustar00rootroot00000000000000pytest-subtests-0.14.2/.github/dependabot.yml000066400000000000000000000002311502300060600211540ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: github-actions directory: / schedule: interval: weekly time: "03:00" open-pull-requests-limit: 10 pytest-subtests-0.14.2/.github/workflows/000077500000000000000000000000001502300060600203655ustar00rootroot00000000000000pytest-subtests-0.14.2/.github/workflows/deploy.yml000066400000000000000000000022411502300060600224030ustar00rootroot00000000000000name: deploy on: workflow_dispatch: inputs: version: description: 'Release version' required: true default: '1.2.3' jobs: package: runs-on: ubuntu-latest env: SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.version }} steps: - uses: actions/checkout@v4 - name: Build and Check Package uses: hynek/build-and-inspect-python-package@v2.12 deploy: needs: package runs-on: ubuntu-latest environment: deploy permissions: id-token: write # For PyPI trusted publishers. contents: write # For tag. steps: - uses: actions/checkout@v4 - name: Download Package uses: actions/download-artifact@v4 with: name: Packages path: dist - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@v1.12.4 with: attestations: true - name: GitHub Release env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh release create v${{ github.event.inputs.version }} --target=${{ github.ref_name }} --title v${{ github.event.inputs.version }} gh pr merge ${{ github.ref_name }} --merge pytest-subtests-0.14.2/.github/workflows/test.yml000066400000000000000000000024711502300060600220730ustar00rootroot00000000000000name: test on: push: branches: - main - "test-me-*" pull_request: branches: - "*" # Cancel running jobs for the same workflow and branch. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: package: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Build and Check Package uses: hynek/build-and-inspect-python-package@v2.12 test: needs: [package] runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: ["ubuntu-latest", "windows-latest"] python: ["3.9", "3.10", "3.11", "3.12", "3.13"] tox_env: ["py"] include: - os: "ubuntu-latest" python: "3.9" tox_env: "pytest7-py" steps: - uses: actions/checkout@v4 - name: Download Package uses: actions/download-artifact@v4 with: name: Packages path: dist - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install tox run: | python -m pip install --upgrade pip python -m pip install --upgrade tox - name: Test shell: bash run: | tox run -e ${{ matrix.tox_env }} --installpkg `find dist/*.tar.gz` pytest-subtests-0.14.2/.gitignore000066400000000000000000000023061502300060600167610ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ # PyCharm. .idea/ pytest-subtests-0.14.2/.pre-commit-config.yaml000066400000000000000000000016271502300060600212570ustar00rootroot00000000000000repos: - repo: https://github.com/psf/black rev: 25.1.0 hooks: - id: black args: [--safe, --quiet] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: debug-statements - repo: https://github.com/asottile/reorder-python-imports rev: v3.15.0 hooks: - id: reorder-python-imports - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.16.0 hooks: - id: mypy files: ^(src|tests) args: [] additional_dependencies: [attrs>=19.2.0, pytest>=7, typing-extensions] - repo: local hooks: - id: rst name: rst entry: rst-lint --encoding utf-8 files: ^(CHANGELOG.rst|RELEASING.rst|README.rst)$ language: python additional_dependencies: [pygments, restructuredtext_lint] pytest-subtests-0.14.2/CHANGELOG.rst000066400000000000000000000107741502300060600170220ustar00rootroot00000000000000CHANGELOG ========= 0.14.2 ------ *2025-06-13* * Print output "dots" for successful unittest subtests (`#164`_). * Improved reporting in case subtests raise `pytest.xfail` (`#194`_). .. _#164: https://github.com/pytest-dev/pytest-subtests/issues/164 .. _#194: https://github.com/pytest-dev/pytest-subtests/pull/194 0.14.1 ------ *2024-12-09* * Fix ``self.instance._outcome`` is ``None`` case in #173 (`#174`_). .. _#174: https://github.com/pytest-dev/pytest-subtests/pull/174 0.14.0 ------ *2024-12-07* * Add support for Python 3.13. * Dropped support for EOL Python 3.8. * Fixed output when using ``TestCase.skipTest`` (`#169`_). * Fixed ``pytest`` requirement to ``>=7.3`` (`#159`_). .. _#159: https://github.com/pytest-dev/pytest-subtests/issues/159 .. _#169: https://github.com/pytest-dev/pytest-subtests/pull/169 0.13.1 ------ *2024-07-16* * Fixed bug were an extra test would execute when ``-x/--exitfirst`` was used (`#139`_). .. _#139: https://github.com/pytest-dev/pytest-subtests/pull/139 0.13.0 ------ *2024-07-07* * Dropped support for EOL Python 3.7. * Added support for ``-x/--exitfirst`` (`#134`_). * Hide the traceback inside the ``SubTests.test()`` method (`#131`_). .. _#131: https://github.com/pytest-dev/pytest-subtests/pull/131 .. _#134: https://github.com/pytest-dev/pytest-subtests/pull/134 0.12.1 ------ *2024-03-07* * Fixed compatibility with upcoming pytest ``8.1.x``. (`#125`_). .. _#125: https://github.com/pytest-dev/pytest-subtests/issues/125 0.12.0 ------ *2024-03-06* * Python 3.12 is now officially supported (`#113`_). * Added typing support (`#115`_). * ``SubTests`` can be imported from ``pytest_subtests`` to type-annotate the ``subtests`` fixture. .. _#113: https://github.com/pytest-dev/pytest-subtests/pull/113 .. _#115: https://github.com/pytest-dev/pytest-subtests/pull/115 0.11.0 ------ *2023-05-15* * Logging is displayed for failing subtests (`#92`_) * Passing subtests no longer turn the pytest output to yellow (as if warnings have been issued) (`#86`_). Thanks to `Andrew-Brock`_ for providing the solution. * Now the ``msg`` contents of a subtest is displayed when running pytest with ``-v`` (`#6`_). .. _#6: https://github.com/pytest-dev/pytest-subtests/issues/6 .. _#86: https://github.com/pytest-dev/pytest-subtests/issues/86 .. _#92: https://github.com/pytest-dev/pytest-subtests/issues/87 .. _`Andrew-Brock`: https://github.com/Andrew-Brock 0.10.0 ------ *2022-02-15* * Added experimental support for suppressing subtest output dots in non-verbose mode with ``--no-subtests-shortletter`` -- this allows the native pytest column calculations to not be disrupted and minimizes unneeded output for large CI systems. 0.9.0 ----- *2022-10-28* * Python 3.11 is officially supported. * Dropped support for Python 3.6. 0.8.0 ----- *2022-05-26* * Now passing subtests are shown in the test run summary at the end (for example: ``10 failed, 1 passed, 10 subtests passed in 0.10s``) (`#70`_). .. _#70: https://github.com/pytest-dev/pytest-subtests/pull/70 0.7.0 ----- *2022-02-13* * Fixed support for pytest 7.0, and ``pytest>=7.0`` is now required. 0.6.0 ----- *2022-01-15* * ``pytest>=6.0`` is now required. * Added official support for Python 3.10. * Dropped support for Python 3.5. * Users no longer need to configure a warnings filter for the internal ``A private pytest class or function was used`` pytest warning (`#52`_). * **Experimental**: Use ``SUBPASS`` and ``,`` for passed subtests instead of general ``PASSED``, ``SUBFAIL`` and ``u`` for failed ones instead of ``FAILED`` (`#30`_). .. _#30: https://github.com/pytest-dev/pytest-subtests/pull/30 .. _#52: https://github.com/pytest-dev/pytest-subtests/pull/52 0.5.0 ----- *2021-05-29* * Added support for ``pytest.mark.xfail`` (`#40`_). .. _#40: https://github.com/pytest-dev/pytest-subtests/pull/40 0.4.0 ----- *2020-12-13* * Added support for ``--pdb`` (`#22`_). .. _#22: https://github.com/pytest-dev/pytest-subtests/issues/22 0.3.2 ----- *2020-08-01* * Fixed pytest 6.0 support. 0.3.1 ----- *2020-05-20* * Fixed pytest 5.4 support. 0.3.0 ----- *2020-01-22* * Dropped support for Python 3.4. * ``subtests`` now correctly captures and displays stdout/stderr (`#18`_). .. _#18: https://github.com/pytest-dev/pytest-subtests/issues/18 0.2.1 ----- *2019-04-04* * Fixed verbose output reporting on Linux (`#7`_). .. _#7: https://github.com/pytest-dev/pytest-subtests/issues/7 0.2.0 ----- *2019-04-03* * Subtests are correctly reported with ``pytest-xdist>=1.28``. 0.1.0 ----- *2019-04-01* * First release to PyPI. pytest-subtests-0.14.2/LICENSE000066400000000000000000000020721502300060600157760ustar00rootroot00000000000000 The MIT License (MIT) Copyright (c) 2019 Bruno Oliveira Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-subtests-0.14.2/README.rst000066400000000000000000000114731502300060600164650ustar00rootroot00000000000000=============== pytest-subtests =============== unittest ``subTest()`` support and ``subtests`` fixture. .. image:: https://img.shields.io/pypi/v/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: PyPI version .. image:: https://img.shields.io/conda/vn/conda-forge/pytest-subtests.svg :target: https://anaconda.org/conda-forge/pytest-subtests .. image:: https://img.shields.io/pypi/pyversions/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: Python versions .. image:: https://github.com/pytest-dev/pytest-subtests/workflows/test/badge.svg :target: https://github.com/pytest-dev/pytest-subtests/actions .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/ambv/black ---- This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template. Features -------- * Adds support for `TestCase.subTest `__. * New ``subtests`` fixture, providing similar functionality for pure pytest tests. Installation ------------ You can install ``pytest-subtests`` via `pip`_ from `PyPI`_:: $ pip install pytest-subtests Usage ----- unittest subTest() example ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python import unittest class T(unittest.TestCase): def test_foo(self): for i in range(5): with self.subTest("custom message", i=i): self.assertEqual(i % 2, 0) if __name__ == "__main__": unittest.main() **Output** .. code-block:: λ pytest .tmp\test-unit-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-unit-subtest.py FF. [100%] ============================= FAILURES ============================== _________________ T.test_foo [custom message] (i=1) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError _________________ T.test_foo [custom message] (i=3) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= ``subtests`` fixture example ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python def test(subtests): for i in range(5): with subtests.test(msg="custom message", i=i): assert i % 2 == 0 **Output** .. code-block:: λ pytest .tmp\test-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-subtest.py .F.F.. [100%] ============================= FAILURES ============================== ____________________ test [custom message] (i=1) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (1 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ____________________ test [custom message] (i=3) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (3 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= Contributing ------------ Contributions are very welcome. Tests can be run with `tox`_. License ------- Distributed under the terms of the `MIT`_ license, "pytest-subtests" is free and open source software Issues ------ If you encounter any problems, please `file an issue`_ along with a detailed description. .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter .. _`@hackebrot`: https://github.com/hackebrot .. _`MIT`: http://opensource.org/licenses/MIT .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin .. _`file an issue`: https://github.com/pytest-dev/pytest-subtests/issues .. _`pytest`: https://github.com/pytest-dev/pytest .. _`tox`: https://tox.readthedocs.io/en/latest/ .. _`pip`: https://pypi.org/project/pip/ .. _`PyPI`: https://pypi.org/project/pytest-subtests/ pytest-subtests-0.14.2/RELEASING.rst000066400000000000000000000021551502300060600170360ustar00rootroot00000000000000========================= Releasing pytest-subtests ========================= This document describes the steps to make a new ``pytest-subtests`` release. Version ------- ``master`` should always be green and a potential release candidate. ``pytest-subtests`` follows semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number one needs to look at the ``CHANGELOG.rst`` file: - If there any new feature, then we must make a new **minor** release: next release will be ``X.Y+1.0``. - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. Steps ----- To publish a new release ``X.Y.Z``, the steps are as follows: #. Create a new branch named ``release-X.Y.Z`` from the latest ``main``. #. Update the ``CHANGELOG.rst`` file with the new release information. #. Commit and push the branch to ``upstream`` and open a PR. #. Once the PR is **green** and **approved**, start the ``deploy`` workflow: .. code-block:: console gh workflow run deploy.yml -R pytest-dev/pytest-subtests --ref release-VERSION --field version=VERSION The PR will be automatically merged. pytest-subtests-0.14.2/pyproject.toml000066400000000000000000000004061502300060600177040ustar00rootroot00000000000000[build-system] requires = [ "setuptools", "setuptools-scm[toml]>=6.2.3", ] build-backend = "setuptools.build_meta" [tool.mypy] disallow_untyped_defs = true warn_unreachable = true warn_unused_configs = true warn_unused_ignores = true [tool.setuptools_scm] pytest-subtests-0.14.2/pytest.ini000066400000000000000000000000511502300060600170150ustar00rootroot00000000000000[pytest] addopts = -ra testpaths = tests pytest-subtests-0.14.2/setup.cfg000066400000000000000000000023751502300060600166200ustar00rootroot00000000000000[metadata] name = pytest-subtests description = unittest subTest() support and subtests fixture long_description = file: README.rst long_description_content_type = text/x-rst url = https://github.com/pytest-dev/pytest-subtests author = Bruno Oliveira license = MIT license_file = LICENSE classifiers = Development Status :: 4 - Beta Framework :: Pytest Intended Audience :: Developers Topic :: Software Development :: Testing Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: 3.13 Programming Language :: Python :: Implementation :: CPython Operating System :: OS Independent License :: OSI Approved :: MIT License Typing :: Typed keywords = test, unittest, pytest [options] install_requires = attrs>=19.2.0 pytest>=7.4 python_requires = >=3.9 packages = find: package_dir = = src setup_requires = setuptools setuptools-scm>=6.0 [options.packages.find] where = src [options.entry_points] pytest11 = subtests = pytest_subtests.plugin [options.package_data] pytest_subtests = py.typed pytest-subtests-0.14.2/src/000077500000000000000000000000001502300060600155575ustar00rootroot00000000000000pytest-subtests-0.14.2/src/pytest_subtests/000077500000000000000000000000001502300060600210435ustar00rootroot00000000000000pytest-subtests-0.14.2/src/pytest_subtests/__init__.py000066400000000000000000000000701502300060600231510ustar00rootroot00000000000000from .plugin import SubTests __all__ = ["SubTests"] pytest-subtests-0.14.2/src/pytest_subtests/plugin.py000066400000000000000000000416351502300060600227240ustar00rootroot00000000000000from __future__ import annotations import sys import time from contextlib import contextmanager from contextlib import ExitStack from contextlib import nullcontext from typing import Any from typing import Callable from typing import ContextManager from typing import Generator from typing import Iterator from typing import Mapping from typing import TYPE_CHECKING from unittest import TestCase import attr import pluggy import pytest from _pytest._code import ExceptionInfo from _pytest.capture import CaptureFixture from _pytest.capture import FDCapture from _pytest.capture import SysCapture from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs from _pytest.logging import LogCaptureHandler from _pytest.outcomes import OutcomeException from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception from _pytest.unittest import TestCaseFunction if TYPE_CHECKING: from types import TracebackType from typing import Literal def pytest_addoption(parser: pytest.Parser) -> None: group = parser.getgroup("subtests") group.addoption( "--no-subtests-shortletter", action="store_true", dest="no_subtests_shortletter", default=False, help="Disables subtest output 'dots' in non-verbose mode (EXPERIMENTAL)", ) @attr.s class SubTestContext: msg: str | None = attr.ib() kwargs: dict[str, Any] = attr.ib() @attr.s(init=False) class SubTestReport(TestReport): # type: ignore[misc] context: SubTestContext = attr.ib() @property def head_line(self) -> str: _, _, domain = self.location return f"{domain} {self.sub_test_description()}" def sub_test_description(self) -> str: parts = [] if isinstance(self.context.msg, str): parts.append(f"[{self.context.msg}]") if self.context.kwargs: params_desc = ", ".join( f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) ) parts.append(f"({params_desc})") return " ".join(parts) or "()" def _to_json(self) -> dict: data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" data["_subtest.context"] = attr.asdict(self.context) return data @classmethod def _from_json(cls, reportdict: dict[str, Any]) -> SubTestReport: report = super()._from_json(reportdict) context_data = reportdict["_subtest.context"] report.context = SubTestContext( msg=context_data["msg"], kwargs=context_data["kwargs"] ) return report @classmethod def _from_test_report(cls, test_report: TestReport) -> SubTestReport: return super()._from_json(test_report._to_json()) def _addSkip(self: TestCaseFunction, testcase: TestCase, reason: str) -> None: from unittest.case import _SubTest # type: ignore[attr-defined] if isinstance(testcase, _SubTest): self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] if self._excinfo is not None: exc_info = self._excinfo[-1] self.addSubTest(testcase.test_case, testcase, exc_info) # type: ignore[attr-defined] else: # For python < 3.11: the non-subtest skips have to be added by `_originaladdSkip` only after all subtest # failures are processed by `_addSubTest`. (`self.instance._outcome` has no attribute `skipped/errors` anymore.) # For python < 3.11, we also need to check if `self.instance._outcome` is `None` (this happens if the test # class/method is decorated with `unittest.skip`, see #173). if sys.version_info < (3, 11) and self.instance._outcome is not None: subtest_errors = [ x for x, y in self.instance._outcome.errors if isinstance(x, _SubTest) and y is not None ] if len(subtest_errors) == 0: self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] else: self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] def _addSubTest( self: TestCaseFunction, test_case: Any, test: TestCase, exc_info: tuple[type[BaseException], BaseException, TracebackType] | None, ) -> None: msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] call_info = make_call_info( ExceptionInfo(exc_info, _ispytest=True) if exc_info else None, start=0, stop=0, duration=0, when="call", ) report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self, call=call_info, report=sub_report ) # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. if sys.version_info < (3, 11): from unittest.case import _SubTest # type: ignore[attr-defined] non_subtest_skip = [ (x, y) for x, y in self.instance._outcome.skipped if not isinstance(x, _SubTest) ] subtest_errors = [ (x, y) for x, y in self.instance._outcome.errors if isinstance(x, _SubTest) and y is not None ] # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in # `_addSubTest` and have to be added using `_originaladdSkip` after all subtest failures are processed. if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: # Make sure we have processed the last subtest failure last_subset_error = subtest_errors[-1] if exc_info is last_subset_error[-1]: # Add non-subtest skips (as they could not be treated in `_addSkip`) for testcase, reason in non_subtest_skip: self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] def pytest_configure(config: pytest.Config) -> None: TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] TestCaseFunction.failfast = False # type: ignore[attr-defined] # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a # parent python process where `addSkip` is already `_addSkip`. A such case is when running tests in # `test_subtests.py` where `pytester.runpytest` is used. Without this guard condition, `_originaladdSkip` is # assigned to `_addSkip` which is wrong as well as causing an infinite recursion in some cases. if not hasattr(TestCaseFunction, "_originaladdSkip"): TestCaseFunction._originaladdSkip = TestCaseFunction.addSkip # type: ignore[attr-defined] TestCaseFunction.addSkip = _addSkip # type: ignore[method-assign] # Hack (#86): the terminal does not know about the "subtests" # status, so it will by default turn the output to yellow. # This forcibly adds the new 'subtests' status. import _pytest.terminal new_types = tuple( f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") ) # We need to check if we are not re-adding because we run our own tests # with pytester in-process mode, so this will be called multiple times. if new_types[0] not in _pytest.terminal.KNOWN_TYPES: _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types # type: ignore[assignment] _pytest.terminal._color_for_type.update( { f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] for outcome in ("passed", "failed", "skipped") if outcome in _pytest.terminal._color_for_type } ) def pytest_unconfigure() -> None: if hasattr(TestCaseFunction, "addSubTest"): del TestCaseFunction.addSubTest if hasattr(TestCaseFunction, "failfast"): del TestCaseFunction.failfast if hasattr(TestCaseFunction, "_originaladdSkip"): TestCaseFunction.addSkip = TestCaseFunction._originaladdSkip # type: ignore[method-assign] del TestCaseFunction._originaladdSkip @pytest.fixture def subtests(request: SubRequest) -> Generator[SubTests, None, None]: capmam = request.node.config.pluginmanager.get_plugin("capturemanager") if capmam is not None: suspend_capture_ctx = capmam.global_and_fixture_disabled else: suspend_capture_ctx = nullcontext yield SubTests(request.node.ihook, suspend_capture_ctx, request) @attr.s class SubTests: ihook: pluggy.HookRelay = attr.ib() suspend_capture_ctx: Callable[[], ContextManager] = attr.ib() request: SubRequest = attr.ib() @property def item(self) -> pytest.Item: return self.request.node def test( self, msg: str | None = None, **kwargs: Any, ) -> _SubTestContextManager: """ Context manager for subtests, capturing exceptions raised inside the subtest scope and handling them through the pytest machinery. Usage: .. code-block:: python with subtests.test(msg="subtest"): assert 1 == 1 """ return _SubTestContextManager( self.ihook, msg, kwargs, request=self.request, suspend_capture_ctx=self.suspend_capture_ctx, ) @attr.s(auto_attribs=True) class _SubTestContextManager: """ Context manager for subtests, capturing exceptions raised inside the subtest scope and handling them through the pytest machinery. Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however it is not possible to control the output fully when exiting from it due to an exception when in --exitfirst mode, so this was refactored into an explicit context manager class (#134). """ ihook: pluggy.HookRelay msg: str | None kwargs: dict[str, Any] suspend_capture_ctx: Callable[[], ContextManager] request: SubRequest def __enter__(self) -> None: __tracebackhide__ = True self._start = time.time() self._precise_start = time.perf_counter() self._exc_info = None self._exit_stack = ExitStack() self._captured_output = self._exit_stack.enter_context( capturing_output(self.request) ) self._captured_logs = self._exit_stack.enter_context( capturing_logs(self.request) ) def __exit__( self, exc_type: type[Exception] | None, exc_val: Exception | None, exc_tb: TracebackType | None, ) -> bool: __tracebackhide__ = True try: if exc_val is not None: exc_info = ExceptionInfo.from_exception(exc_val) else: exc_info = None finally: self._exit_stack.close() precise_stop = time.perf_counter() duration = precise_stop - self._precise_start stop = time.time() call_info = make_call_info( exc_info, start=self._start, stop=stop, duration=duration, when="call" ) report = self.ihook.pytest_runtest_makereport( item=self.request.node, call=call_info ) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(self.msg, self.kwargs.copy()) self._captured_output.update_report(sub_report) self._captured_logs.update_report(sub_report) with self.suspend_capture_ctx(): self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self.request.node, call=call_info, report=sub_report ) if exc_val is not None: if self.request.session.shouldfail: return False return True def make_call_info( exc_info: ExceptionInfo[BaseException] | None, *, start: float, stop: float, duration: float, when: Literal["collect", "setup", "call", "teardown"], ) -> CallInfo: return CallInfo( None, exc_info, start=start, stop=stop, duration=duration, when=when, _ispytest=True, ) @contextmanager def capturing_output(request: SubRequest) -> Iterator[Captured]: option = request.config.getoption("capture", None) # capsys or capfd are active, subtest should not capture. capman = request.config.pluginmanager.getplugin("capturemanager") capture_fixture_active = getattr(capman, "_capture_fixture", None) if option == "sys" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(SysCapture, request) elif option == "fd" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(FDCapture, request) else: fixture = None if fixture is not None: fixture._start() captured = Captured() try: yield captured finally: if fixture is not None: out, err = fixture.readouterr() fixture.close() captured.out = out captured.err = err @contextmanager def capturing_logs( request: SubRequest, ) -> Iterator[CapturedLogs | NullCapturedLogs]: logging_plugin = request.config.pluginmanager.getplugin("logging-plugin") if logging_plugin is None: yield NullCapturedLogs() else: handler = LogCaptureHandler() handler.setFormatter(logging_plugin.formatter) captured_logs = CapturedLogs(handler) with catching_logs(handler): yield captured_logs @contextmanager def ignore_pytest_private_warning() -> Generator[None, None, None]: import warnings with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "A private pytest class or function was used.", category=pytest.PytestDeprecationWarning, ) yield @attr.s class Captured: out = attr.ib(default="", type=str) err = attr.ib(default="", type=str) def update_report(self, report: pytest.TestReport) -> None: if self.out: report.sections.append(("Captured stdout call", self.out)) if self.err: report.sections.append(("Captured stderr call", self.err)) class CapturedLogs: def __init__(self, handler: LogCaptureHandler) -> None: self._handler = handler def update_report(self, report: pytest.TestReport) -> None: report.sections.append(("Captured log call", self._handler.stream.getvalue())) class NullCapturedLogs: def update_report(self, report: pytest.TestReport) -> None: pass def pytest_report_to_serializable(report: pytest.TestReport) -> dict[str, Any] | None: if isinstance(report, SubTestReport): return report._to_json() return None def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | None: if data.get("_report_type") == "SubTestReport": return SubTestReport._from_json(data) return None @pytest.hookimpl(tryfirst=True) def pytest_report_teststatus( report: pytest.TestReport, config: pytest.Config, ) -> tuple[str, str, str | Mapping[str, bool]] | None: if report.when != "call" or not isinstance(report, SubTestReport): return None outcome = report.outcome description = report.sub_test_description() if hasattr(report, "wasxfail"): if outcome == "skipped": category = "xfailed" short = "y" # x letter is used for regular xfail, y for subtest xfail status = "SUBXFAIL" elif outcome == "passed": category = "xpassed" short = "Y" # X letter is used for regular xpass, Y for subtest xpass status = "SUBXPASS" else: # This should not normally happen, unless some plugin is setting wasxfail without # the correct outcome. Pytest expects the call outcome to be either skipped or passed in case of xfail. # Let's pass this report to the next hook. return None short = "" if config.option.no_subtests_shortletter else short return f"subtests {category}", short, f"{description} {status}" elif report.passed: short = "" if config.option.no_subtests_shortletter else "," return f"subtests {outcome}", short, f"{description} SUBPASS" elif report.skipped: short = "" if config.option.no_subtests_shortletter else "-" return outcome, short, f"{description} SUBSKIP" elif outcome == "failed": short = "" if config.option.no_subtests_shortletter else "u" return outcome, short, f"{description} SUBFAIL" return None pytest-subtests-0.14.2/src/pytest_subtests/py.typed000066400000000000000000000000001502300060600225300ustar00rootroot00000000000000pytest-subtests-0.14.2/tests/000077500000000000000000000000001502300060600161325ustar00rootroot00000000000000pytest-subtests-0.14.2/tests/conftest.py000066400000000000000000000000341502300060600203260ustar00rootroot00000000000000pytest_plugins = "pytester" pytest-subtests-0.14.2/tests/test_subtests.py000066400000000000000000000712271502300060600214300ustar00rootroot00000000000000from __future__ import annotations import sys from pathlib import Path from typing import Literal import pytest IS_PY311 = sys.version_info[:2] >= (3, 11) @pytest.mark.parametrize("mode", ["normal", "xdist"]) class TestFixture: """ Tests for ``subtests`` fixture. """ @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): assert i % 2 == 0 """ ) def test_simple_terminal_normal( self, simple_script: None, pytester: pytest.Pytester, mode: Literal["normal", "xdist"], ) -> None: if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_simple_terminal_verbose( self, simple_script: None, pytester: pytest.Pytester, mode: Literal["normal", "xdist"], ) -> None: if mode == "normal": result = pytester.runpytest("-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo PASSED *100%*", ] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1", "-v") expected_lines = [ "1 worker [1 item]", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", ] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_skip( self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] ) -> None: pytester.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.skip('even number') """ ) if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] result.stdout.fnmatch_lines(expected_lines) def test_xfail( self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] ) -> None: pytester.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.xfail('even number') """ ) if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 2 subtests passed, 3 subtests xfailed in *"] result.stdout.fnmatch_lines(expected_lines) def test_typing_exported( self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] ) -> None: pytester.makepyfile( """ from pytest_subtests import SubTests def test_typing_exported(subtests: SubTests) -> None: assert isinstance(subtests, SubTests) """ ) if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed *"] result.stdout.fnmatch_lines(expected_lines) class TestSubTest: """ Test Test.subTest functionality. """ @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> Path: return pytester.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): self.assertEqual(i % 2, 0) if __name__ == '__main__': main() """ ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_normal( self, simple_script: Path, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: suffix = ".test_foo" if IS_PY311 else "" if runner == "unittest": result = pytester.run(sys.executable, simple_script) result.stderr.fnmatch_lines( [ f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", "AssertionError: 1 != 0", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = pytester.runpytest(simple_script) expected_lines = ["collected 1 item"] else: assert runner == "pytest-xdist" pytest.importorskip("xdist") result = pytester.runpytest(simple_script, "-n1") expected_lines = ["1 worker [1 item]"] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed, 3 subtests passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_verbose( self, simple_script: Path, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: suffix = ".test_foo" if IS_PY311 else "" if runner == "unittest": result = pytester.run(sys.executable, simple_script, "-v") result.stderr.fnmatch_lines( [ f"test_foo (__main__.T{suffix}) ... ", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", "AssertionError: 1 != 0", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = pytester.runpytest(simple_script, "-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", ] else: assert runner == "pytest-xdist" pytest.importorskip("xdist") result = pytester.runpytest(simple_script, "-n1", "-v") expected_lines = [ "1 worker [1 item]", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", ] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed, 3 subtests passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_skip( self, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: p = pytester.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: self.skipTest('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = pytester.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) else: pytest.xfail("Not producing the expected results (#5)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) @pytest.mark.xfail(reason="Not producing the expected results (#5)") def test_xfail( self, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: p = pytester.makepyfile( """ import pytest from unittest import expectedFailure, TestCase, main class T(TestCase): @expectedFailure def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: raise pytest.xfail('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = pytester.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) else: result = pytester.runpytest(p) result.stdout.fnmatch_lines( ["collected 1 item", "* 3 xfailed, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["pytest-normal"]) def test_only_original_skip_is_called( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, runner: Literal["pytest-normal"], ) -> None: """Regression test for #173.""" monkeypatch.setenv("COLUMNS", "200") p = pytester.makepyfile( """ import unittest from unittest import TestCase, main @unittest.skip("skip this test") class T(unittest.TestCase): def test_foo(self): assert 1 == 2 if __name__ == '__main__': main() """ ) result = pytester.runpytest(p, "-v", "-rsf") result.stdout.fnmatch_lines( ["SKIPPED [1] test_only_original_skip_is_called.py:6: skip this test"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_skip_with_failure( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: monkeypatch.setenv("COLUMNS", "200") p = pytester.makepyfile( """ import pytest from unittest import expectedFailure, TestCase, main class T(TestCase): def test_foo(self): for i in range(10): with self.subTest("custom message", i=i): if i < 4: self.skipTest(f"skip subtest i={i}") assert i < 4 if __name__ == '__main__': main() """ ) if runner == "unittest": result = pytester.runpython(p) if sys.version_info < (3, 11): result.stderr.re_match_lines( [ r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", r"Ran 1 test in .*", r"FAILED \(failures=6, skipped=4\)", ] ) else: result.stderr.re_match_lines( [ r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", r"Ran 1 test in .*", r"FAILED \(failures=6, skipped=4\)", ] ) elif runner == "pytest-normal": result = pytester.runpytest(p, "-v", "-rsf") result.stdout.re_match_lines( [ r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP \(skip subtest i=0\) .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP \(skip subtest i=3\) .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", "test_skip_with_failure.py::T::test_foo PASSED .*", r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 4 < 4", r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 9 < 4", r".* 6 failed, 1 passed, 4 skipped in .*", ] ) else: pytest.xfail("Not producing the expected results (#5)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_skip_with_failure_and_non_subskip( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: monkeypatch.setenv("COLUMNS", "200") p = pytester.makepyfile( """ import pytest from unittest import expectedFailure, TestCase, main class T(TestCase): def test_foo(self): for i in range(10): with self.subTest("custom message", i=i): if i < 4: self.skipTest(f"skip subtest i={i}") assert i < 4 self.skipTest(f"skip the test") if __name__ == '__main__': main() """ ) if runner == "unittest": result = pytester.runpython(p) if sys.version_info < (3, 11): result.stderr.re_match_lines( [ r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", r"Ran 1 test in .*", r"FAILED \(failures=6, skipped=5\)", ] ) else: result.stderr.re_match_lines( [ r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", r"Ran 1 test in .*", r"FAILED \(failures=6, skipped=5\)", ] ) elif runner == "pytest-normal": result = pytester.runpytest(p, "-v", "-rsf") # The `(i=0)` is not correct but it's given by pytest `TerminalReporter` without `--no-fold-skipped` result.stdout.re_match_lines( [ r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip subtest i=3", r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip the test", r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", ] ) # Check with `--no-fold-skipped` (which gives the correct information). if sys.version_info >= (3, 10) and pytest.version_tuple[:2] >= (8, 3): result = pytester.runpytest(p, "-v", "--no-fold-skipped", "-rsf") result.stdout.re_match_lines( [ r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip subtest i=3", r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", r".* 6 failed, 5 skipped in .*", ] ) else: pytest.xfail("Not producing the expected results (#5)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) class TestCapture: def create_file(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import sys def test(subtests): print() print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) assert 0 with subtests.test(i='B'): print("hello stdout B") print("hello stderr B", file=sys.stderr) assert 0 print('end test') assert 0 """ ) def test_capturing(self, pytester: pytest.Pytester) -> None: self.create_file(pytester) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*__ test (i='A') __*", "*Captured stdout call*", "hello stdout A", "*Captured stderr call*", "hello stderr A", "*__ test (i='B') __*", "*Captured stdout call*", "hello stdout B", "*Captured stderr call*", "hello stderr B", "*__ test __*", "*Captured stdout call*", "start test", "end test", ] ) def test_no_capture(self, pytester: pytest.Pytester) -> None: self.create_file(pytester) result = pytester.runpytest("-s") result.stdout.fnmatch_lines( [ "start test", "hello stdout A", "uhello stdout B", "uend test", "*__ test (i='A') __*", "*__ test (i='B') __*", "*__ test __*", ] ) result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) def test_capture_with_fixture( self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"] ) -> None: pytester.makepyfile( rf""" import sys def test(subtests, {fixture}): print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) out, err = {fixture}.readouterr() assert out == 'start test\nhello stdout A\n' assert err == 'hello stderr A\n' """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*1 passed*", ] ) class TestLogging: def create_file(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import logging def test_foo(subtests): logging.info("before") with subtests.test("sub1"): print("sub1 stdout") logging.info("sub1 logging") with subtests.test("sub2"): print("sub2 stdout") logging.info("sub2 logging") assert False """ ) def test_capturing(self, pytester: pytest.Pytester) -> None: self.create_file(pytester) result = pytester.runpytest("--log-level=INFO") result.stdout.fnmatch_lines( [ "*___ test_foo [[]sub2[]] __*", "*-- Captured stdout call --*", "sub2 stdout", "*-- Captured log call ---*", "INFO root:test_capturing.py:12 sub2 logging", "*== short test summary info ==*", ] ) def test_caplog(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import logging def test(subtests, caplog): caplog.set_level(logging.INFO) logging.info("start test") with subtests.test("sub1"): logging.info("inside %s", "subtest1") assert len(caplog.records) == 2 assert caplog.records[0].getMessage() == "start test" assert caplog.records[1].getMessage() == "inside subtest1" """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*1 passed*", ] ) def test_no_logging(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import logging def test(subtests): logging.info("start log line") with subtests.test("sub passing"): logging.info("inside %s", "passing log line") with subtests.test("sub failing"): logging.info("inside %s", "failing log line") assert False logging.info("end log line") """ ) result = pytester.runpytest("-p no:logging") result.stdout.fnmatch_lines( [ "*1 passed*", ] ) result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") class TestDebugging: """Check --pdb support for subtests fixture and TestCase.subTest.""" class _FakePdb: """ Fake debugger class implementation that tracks which methods were called on it. """ quitting: bool = False calls: list[str] = [] def __init__(self, *_: object, **__: object) -> None: self.calls.append("init") def reset(self) -> None: self.calls.append("reset") def interaction(self, *_: object) -> None: self.calls.append("interaction") @pytest.fixture(autouse=True) def cleanup_calls(self) -> None: self._FakePdb.calls.clear() def test_pdb_fixture( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch ) -> None: pytester.makepyfile( """ def test(subtests): with subtests.test(): assert 0 """ ) self.runpytest_and_check_pdb(pytester, monkeypatch) def test_pdb_unittest( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch ) -> None: pytester.makepyfile( """ from unittest import TestCase class Test(TestCase): def test(self): with self.subTest(): assert 0 """ ) self.runpytest_and_check_pdb(pytester, monkeypatch) def runpytest_and_check_pdb( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch ) -> None: # Install the fake pdb implementation in pytest_subtests so we can reference # it in the command line (any module would do). import pytest_subtests monkeypatch.setattr(pytest_subtests, "_CustomPdb", self._FakePdb, raising=False) result = pytester.runpytest("--pdb", "--pdbcls=pytest_subtests:_CustomPdb") # Ensure pytest entered in debugging mode when encountering the failing # assert. result.stdout.fnmatch_lines("*entering PDB*") assert self._FakePdb.calls == ["init", "reset", "interaction"] def test_exitfirst(pytester: pytest.Pytester) -> None: """ Validate that when passing --exitfirst the test exits after the first failed subtest. """ pytester.makepyfile( """ def test_foo(subtests): with subtests.test("sub1"): assert False with subtests.test("sub2"): assert False """ ) result = pytester.runpytest("--exitfirst") assert result.parseoutcomes()["failed"] == 2 result.stdout.fnmatch_lines( [ "*[[]sub1[]] SUBFAIL test_exitfirst.py::test_foo - assert False*", "FAILED test_exitfirst.py::test_foo - assert False", "* stopping after 2 failures*", ], consecutive=True, ) result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. pytest-subtests-0.14.2/tox.ini000066400000000000000000000002401502300060600162770ustar00rootroot00000000000000[tox] envlist = py39,py310,py311,py312,py313,pytest7 [testenv] deps = pytest-xdist>=3.3.0 pytest7: pytest ~=7.4 commands = pytest {posargs:tests}