pax_global_header00006660000000000000000000000064146456102640014522gustar00rootroot0000000000000052 comment=3671b40691440fcb01e96e346220ac4fe62d3580 pytest-subtests-0.13.1/000077500000000000000000000000001464561026400150065ustar00rootroot00000000000000pytest-subtests-0.13.1/.github/000077500000000000000000000000001464561026400163465ustar00rootroot00000000000000pytest-subtests-0.13.1/.github/workflows/000077500000000000000000000000001464561026400204035ustar00rootroot00000000000000pytest-subtests-0.13.1/.github/workflows/deploy.yml000066400000000000000000000021141464561026400224200ustar00rootroot00000000000000name: deploy on: workflow_dispatch: inputs: version: description: 'Release version' required: true default: '1.2.3' jobs: package: runs-on: ubuntu-latest env: SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.version }} steps: - uses: actions/checkout@v3 - name: Build and Check Package uses: hynek/build-and-inspect-python-package@v1.5 deploy: needs: package runs-on: ubuntu-latest environment: deploy permissions: id-token: write # For PyPI trusted publishers. contents: write # For tag. steps: - uses: actions/checkout@v3 - name: Download Package uses: actions/download-artifact@v3 with: name: Packages path: dist - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@v1.8.5 - name: GitHub Release env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh release create v${{ github.event.inputs.version }} --target=${{ github.ref_name }} gh pr merge ${{ github.ref_name }} --merge pytest-subtests-0.13.1/.github/workflows/test.yml000066400000000000000000000022371464561026400221110ustar00rootroot00000000000000name: test on: push: branches: - main - "test-me-*" pull_request: branches: - "*" # Cancel running jobs for the same workflow and branch. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: package: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Build and Check Package uses: hynek/build-and-inspect-python-package@v1.5 test: needs: [package] runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: ["ubuntu-latest", "windows-latest"] python: ["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 - name: Download Package uses: actions/download-artifact@v3 with: name: Packages path: dist - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - name: Install tox run: | python -m pip install --upgrade pip python -m pip install --upgrade tox - name: Test shell: bash run: | tox run -e py --installpkg `find dist/*.tar.gz` pytest-subtests-0.13.1/.gitignore000066400000000000000000000023061464561026400167770ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ # PyCharm. .idea/ pytest-subtests-0.13.1/.pre-commit-config.yaml000066400000000000000000000016271464561026400212750ustar00rootroot00000000000000repos: - repo: https://github.com/psf/black rev: 24.4.2 hooks: - id: black args: [--safe, --quiet] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: debug-statements - repo: https://github.com/asottile/reorder-python-imports rev: v3.13.0 hooks: - id: reorder-python-imports - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.10.1 hooks: - id: mypy files: ^(src|tests) args: [] additional_dependencies: [attrs>=19.2.0, pytest>=7, typing-extensions] - repo: local hooks: - id: rst name: rst entry: rst-lint --encoding utf-8 files: ^(CHANGELOG.rst|RELEASING.rst|README.rst)$ language: python additional_dependencies: [pygments, restructuredtext_lint] pytest-subtests-0.13.1/CHANGELOG.rst000066400000000000000000000076471464561026400170450ustar00rootroot00000000000000CHANGELOG ========= 0.13.1 (2024-07-16) ------------------- * Fixed bug were an extra test would execute when ``-x/--exitfirst`` was used (`#139`_). .. _#139: https://github.com/pytest-dev/pytest-subtests/pull/139 0.13.0 (2024-07-07) ------------------- * Dropped support for EOL Python 3.7. * Added support for ``-x/--exitfirst`` (`#134`_). * Hide the traceback inside the ``SubTests.test()`` method (`#131`_). .. _#131: https://github.com/pytest-dev/pytest-subtests/pull/131 .. _#134: https://github.com/pytest-dev/pytest-subtests/pull/134 0.12.1 (2024-03-07) ------------------- * Fixed compatibility with upcoming pytest ``8.1.x``. (`#125`_). .. _#125: https://github.com/pytest-dev/pytest-subtests/issues/125 0.12.0 (2024-03-06) ------------------- * Python 3.12 is now officially supported (`#113`_). * Added typing support (`#115`_). * ``SubTests`` can be imported from ``pytest_subtests`` to type-annotate the ``subtests`` fixture. .. _#113: https://github.com/pytest-dev/pytest-subtests/pull/113 .. _#115: https://github.com/pytest-dev/pytest-subtests/pull/115 0.11.0 (2023-05-15) ------------------- * Logging is displayed for failing subtests (`#92`_) * Passing subtests no longer turn the pytest output to yellow (as if warnings have been issued) (`#86`_). Thanks to `Andrew-Brock`_ for providing the solution. * Now the ``msg`` contents of a subtest is displayed when running pytest with ``-v`` (`#6`_). .. _#6: https://github.com/pytest-dev/pytest-subtests/issues/6 .. _#86: https://github.com/pytest-dev/pytest-subtests/issues/86 .. _#92: https://github.com/pytest-dev/pytest-subtests/issues/87 .. _`Andrew-Brock`: https://github.com/Andrew-Brock 0.10.0 (2022-02-15) ------------------- * Added experimental support for suppressing subtest output dots in non-verbose mode with ``--no-subtests-shortletter`` -- this allows the native pytest column calculations to not be disrupted and minimizes unneeded output for large CI systems. 0.9.0 (2022-10-28) ------------------ * Python 3.11 is officially supported. * Dropped support for Python 3.6. 0.8.0 (2022-05-26) ------------------ * Now passing subtests are shown in the test run summary at the end (for example: ``10 failed, 1 passed, 10 subtests passed in 0.10s``) (`#70`_). .. _#70: https://github.com/pytest-dev/pytest-subtests/pull/70 0.7.0 (2022-02-13) ------------------ * Fixed support for pytest 7.0, and ``pytest>=7.0`` is now required. 0.6.0 (2022-01-15) ------------------ * ``pytest>=6.0`` is now required. * Added official support for Python 3.10. * Dropped support for Python 3.5. * Users no longer need to configure a warnings filter for the internal ``A private pytest class or function was used`` pytest warning (`#52`_). * **Experimental**: Use ``SUBPASS`` and ``,`` for passed subtests instead of general ``PASSED``, ``SUBFAIL`` and ``u`` for failed ones instead of ``FAILED`` (`#30`_). .. _#30: https://github.com/pytest-dev/pytest-subtests/pull/30 .. _#52: https://github.com/pytest-dev/pytest-subtests/pull/52 0.5.0 (2021-05-29) ------------------ * Added support for ``pytest.mark.xfail`` (`#40`_). .. _#40: https://github.com/pytest-dev/pytest-subtests/pull/40 0.4.0 (2020-12-13) ------------------ * Added support for ``--pdb`` (`#22`_). .. _#22: https://github.com/pytest-dev/pytest-subtests/issues/22 0.3.2 (2020-08-01) ------------------ * Fixed pytest 6.0 support. 0.3.1 (2020-05-20) ------------------ * Fixed pytest 5.4 support. 0.3.0 (2020-01-22) ------------------ * Dropped support for Python 3.4. * ``subtests`` now correctly captures and displays stdout/stderr (`#18`_). .. _#18: https://github.com/pytest-dev/pytest-subtests/issues/18 0.2.1 (2019-04-04) ------------------ * Fixed verbose output reporting on Linux (`#7`_). .. _#7: https://github.com/pytest-dev/pytest-subtests/issues/7 0.2.0 (2019-04-03) ------------------ * Subtests are correctly reported with ``pytest-xdist>=1.28``. 0.1.0 (2019-04-01) ------------------ * First release to PyPI. pytest-subtests-0.13.1/LICENSE000066400000000000000000000020721464561026400160140ustar00rootroot00000000000000 The MIT License (MIT) Copyright (c) 2019 Bruno Oliveira Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-subtests-0.13.1/README.rst000066400000000000000000000115351464561026400165020ustar00rootroot00000000000000=============== pytest-subtests =============== unittest ``subTest()`` support and ``subtests`` fixture. .. image:: https://img.shields.io/pypi/v/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: PyPI version .. image:: https://img.shields.io/conda/vn/conda-forge/pytest-subtests.svg :target: https://anaconda.org/conda-forge/pytest-subtests .. image:: https://img.shields.io/pypi/pyversions/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: Python versions .. image:: https://github.com/pytest-dev/pytest-subtests/workflows/test/badge.svg :target: https://github.com/pytest-dev/pytest-subtests/actions .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/ambv/black ---- This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template. Features -------- * Adds support for `TestCase.subTest `__. * New ``subtests`` fixture, providing similar functionality for pure pytest tests. Installation ------------ You can install ``pytest-subtests`` via `pip`_ from `PyPI`_:: $ pip install pytest-subtests Usage ----- unittest subTest() example ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python import unittest class T(unittest.TestCase): def test_foo(self): for i in range(5): with self.subTest("custom message", i=i): self.assertEqual(i % 2, 0) if __name__ == "__main__": unittest.main() **Output** .. code-block:: λ pytest .tmp\test-unit-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-unit-subtest.py FF. [100%] ============================= FAILURES ============================== _________________ T.test_foo [custom message] (i=1) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError _________________ T.test_foo [custom message] (i=3) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= ``subtests`` fixture example ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python def test(subtests): for i in range(5): with subtests.test(msg="custom message", i=i): assert i % 2 == 0 **Output** .. code-block:: λ pytest .tmp\test-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-subtest.py .F.F.. [100%] ============================= FAILURES ============================== ____________________ test [custom message] (i=1) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (1 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ____________________ test [custom message] (i=3) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (3 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= Contributing ------------ Contributions are very welcome. Tests can be run with `tox`_: .. code-block:: tox -e py37 License ------- Distributed under the terms of the `MIT`_ license, "pytest-subtests" is free and open source software Issues ------ If you encounter any problems, please `file an issue`_ along with a detailed description. .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter .. _`@hackebrot`: https://github.com/hackebrot .. _`MIT`: http://opensource.org/licenses/MIT .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin .. _`file an issue`: https://github.com/pytest-dev/pytest-subtests/issues .. _`pytest`: https://github.com/pytest-dev/pytest .. _`tox`: https://tox.readthedocs.io/en/latest/ .. _`pip`: https://pypi.org/project/pip/ .. _`PyPI`: https://pypi.org/project/pytest-subtests/ pytest-subtests-0.13.1/RELEASING.rst000066400000000000000000000021551464561026400170540ustar00rootroot00000000000000========================= Releasing pytest-subtests ========================= This document describes the steps to make a new ``pytest-subtests`` release. Version ------- ``master`` should always be green and a potential release candidate. ``pytest-subtests`` follows semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number one needs to look at the ``CHANGELOG.rst`` file: - If there any new feature, then we must make a new **minor** release: next release will be ``X.Y+1.0``. - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. Steps ----- To publish a new release ``X.Y.Z``, the steps are as follows: #. Create a new branch named ``release-X.Y.Z`` from the latest ``main``. #. Update the ``CHANGELOG.rst`` file with the new release information. #. Commit and push the branch to ``upstream`` and open a PR. #. Once the PR is **green** and **approved**, start the ``deploy`` workflow: .. code-block:: console gh workflow run deploy.yml -R pytest-dev/pytest-subtests --ref release-VERSION --field version=VERSION The PR will be automatically merged. pytest-subtests-0.13.1/pyproject.toml000066400000000000000000000004061464561026400177220ustar00rootroot00000000000000[build-system] requires = [ "setuptools", "setuptools-scm[toml]>=6.2.3", ] build-backend = "setuptools.build_meta" [tool.mypy] disallow_untyped_defs = true warn_unreachable = true warn_unused_configs = true warn_unused_ignores = true [tool.setuptools_scm] pytest-subtests-0.13.1/pytest.ini000066400000000000000000000000511464561026400170330ustar00rootroot00000000000000[pytest] addopts = -ra testpaths = tests pytest-subtests-0.13.1/setup.cfg000066400000000000000000000024471464561026400166360ustar00rootroot00000000000000[metadata] name = pytest-subtests description = unittest subTest() support and subtests fixture long_description = file: README.rst long_description_content_type = text/x-rst url = https://github.com/pytest-dev/pytest-subtests author = Bruno Oliveira license = MIT license_file = LICENSE classifiers = Development Status :: 4 - Beta Framework :: Pytest Intended Audience :: Developers Topic :: Software Development :: Testing Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: Implementation :: CPython Operating System :: OS Independent License :: OSI Approved :: MIT License Typing :: Typed keywords = test, unittest, pytest [options] install_requires = attrs>=19.2.0 pytest>=7.0 typing_extensions;python_version<"3.8" python_requires = >=3.7 packages = find: package_dir = = src setup_requires = setuptools setuptools-scm>=6.0 [options.packages.find] where = src [options.entry_points] pytest11 = subtests = pytest_subtests.plugin [options.package_data] pytest_subtests = py.typed pytest-subtests-0.13.1/src/000077500000000000000000000000001464561026400155755ustar00rootroot00000000000000pytest-subtests-0.13.1/src/pytest_subtests/000077500000000000000000000000001464561026400210615ustar00rootroot00000000000000pytest-subtests-0.13.1/src/pytest_subtests/__init__.py000066400000000000000000000000701464561026400231670ustar00rootroot00000000000000from .plugin import SubTests __all__ = ["SubTests"] pytest-subtests-0.13.1/src/pytest_subtests/plugin.py000066400000000000000000000316611464561026400227400ustar00rootroot00000000000000from __future__ import annotations import sys import time from contextlib import contextmanager from contextlib import ExitStack from contextlib import nullcontext from typing import Any from typing import Callable from typing import ContextManager from typing import Generator from typing import Iterator from typing import Mapping from typing import TYPE_CHECKING from unittest import TestCase import attr import pluggy import pytest from _pytest._code import ExceptionInfo from _pytest.capture import CaptureFixture from _pytest.capture import FDCapture from _pytest.capture import SysCapture from _pytest.fixtures import SubRequest from _pytest.logging import catching_logs from _pytest.logging import LogCaptureHandler from _pytest.outcomes import OutcomeException from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception from _pytest.unittest import TestCaseFunction if TYPE_CHECKING: from types import TracebackType if sys.version_info < (3, 8): from typing_extensions import Literal else: from typing import Literal def pytest_addoption(parser: pytest.Parser) -> None: group = parser.getgroup("subtests") group.addoption( "--no-subtests-shortletter", action="store_true", dest="no_subtests_shortletter", default=False, help="Disables subtest output 'dots' in non-verbose mode (EXPERIMENTAL)", ) @attr.s class SubTestContext: msg: str | None = attr.ib() kwargs: dict[str, Any] = attr.ib() @attr.s(init=False) class SubTestReport(TestReport): # type: ignore[misc] context: SubTestContext = attr.ib() @property def head_line(self) -> str: _, _, domain = self.location return f"{domain} {self.sub_test_description()}" def sub_test_description(self) -> str: parts = [] if isinstance(self.context.msg, str): parts.append(f"[{self.context.msg}]") if self.context.kwargs: params_desc = ", ".join( f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) ) parts.append(f"({params_desc})") return " ".join(parts) or "()" def _to_json(self) -> dict: data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" data["_subtest.context"] = attr.asdict(self.context) return data @classmethod def _from_json(cls, reportdict: dict[str, Any]) -> SubTestReport: report = super()._from_json(reportdict) context_data = reportdict["_subtest.context"] report.context = SubTestContext( msg=context_data["msg"], kwargs=context_data["kwargs"] ) return report @classmethod def _from_test_report(cls, test_report: TestReport) -> SubTestReport: return super()._from_json(test_report._to_json()) def _addSubTest( self: TestCaseFunction, test_case: Any, test: TestCase, exc_info: tuple[type[BaseException], BaseException, TracebackType] | None, ) -> None: if exc_info is not None: msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] call_info = make_call_info( ExceptionInfo(exc_info, _ispytest=True), start=0, stop=0, duration=0, when="call", ) report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self, call=call_info, report=sub_report ) def pytest_configure(config: pytest.Config) -> None: TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] TestCaseFunction.failfast = False # type: ignore[attr-defined] # Hack (#86): the terminal does not know about the "subtests" # status, so it will by default turn the output to yellow. # This forcibly adds the new 'subtests' status. import _pytest.terminal new_types = tuple( f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") ) # We need to check if we are not re-adding because we run our own tests # with pytester in-process mode, so this will be called multiple times. if new_types[0] not in _pytest.terminal.KNOWN_TYPES: _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types # type: ignore[assignment] _pytest.terminal._color_for_type.update( { f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] for outcome in ("passed", "failed", "skipped") if outcome in _pytest.terminal._color_for_type } ) def pytest_unconfigure() -> None: if hasattr(TestCaseFunction, "addSubTest"): del TestCaseFunction.addSubTest if hasattr(TestCaseFunction, "failfast"): del TestCaseFunction.failfast @pytest.fixture def subtests(request: SubRequest) -> Generator[SubTests, None, None]: capmam = request.node.config.pluginmanager.get_plugin("capturemanager") if capmam is not None: suspend_capture_ctx = capmam.global_and_fixture_disabled else: suspend_capture_ctx = nullcontext yield SubTests(request.node.ihook, suspend_capture_ctx, request) @attr.s class SubTests: ihook: pluggy.HookRelay = attr.ib() suspend_capture_ctx: Callable[[], ContextManager] = attr.ib() request: SubRequest = attr.ib() @property def item(self) -> pytest.Item: return self.request.node def test( self, msg: str | None = None, **kwargs: Any, ) -> _SubTestContextManager: """ Context manager for subtests, capturing exceptions raised inside the subtest scope and handling them through the pytest machinery. Usage: .. code-block:: python with subtests.test(msg="subtest"): assert 1 == 1 """ return _SubTestContextManager( self.ihook, msg, kwargs, request=self.request, suspend_capture_ctx=self.suspend_capture_ctx, ) @attr.s(auto_attribs=True) class _SubTestContextManager: """ Context manager for subtests, capturing exceptions raised inside the subtest scope and handling them through the pytest machinery. Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however it is not possible to control the output fully when exiting from it due to an exception when in --exitfirst mode, so this was refactored into an explicit context manager class (#134). """ ihook: pluggy.HookRelay msg: str | None kwargs: dict[str, Any] suspend_capture_ctx: Callable[[], ContextManager] request: SubRequest def __enter__(self) -> None: __tracebackhide__ = True self._start = time.time() self._precise_start = time.perf_counter() self._exc_info = None self._exit_stack = ExitStack() self._captured_output = self._exit_stack.enter_context( capturing_output(self.request) ) self._captured_logs = self._exit_stack.enter_context( capturing_logs(self.request) ) def __exit__( self, exc_type: type[Exception] | None, exc_val: Exception | None, exc_tb: TracebackType | None, ) -> bool: __tracebackhide__ = True try: if exc_val is not None: exc_info = ExceptionInfo.from_exception(exc_val) else: exc_info = None finally: self._exit_stack.close() precise_stop = time.perf_counter() duration = precise_stop - self._precise_start stop = time.time() call_info = make_call_info( exc_info, start=self._start, stop=stop, duration=duration, when="call" ) report = self.ihook.pytest_runtest_makereport( item=self.request.node, call=call_info ) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(self.msg, self.kwargs.copy()) self._captured_output.update_report(sub_report) self._captured_logs.update_report(sub_report) with self.suspend_capture_ctx(): self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self.request.node, call=call_info, report=sub_report ) if exc_val is not None: if self.request.session.shouldfail: return False return True def make_call_info( exc_info: ExceptionInfo[BaseException] | None, *, start: float, stop: float, duration: float, when: Literal["collect", "setup", "call", "teardown"], ) -> CallInfo: return CallInfo( None, exc_info, start=start, stop=stop, duration=duration, when=when, _ispytest=True, ) @contextmanager def capturing_output(request: SubRequest) -> Iterator[Captured]: option = request.config.getoption("capture", None) # capsys or capfd are active, subtest should not capture. capman = request.config.pluginmanager.getplugin("capturemanager") capture_fixture_active = getattr(capman, "_capture_fixture", None) if option == "sys" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(SysCapture, request) elif option == "fd" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(FDCapture, request) else: fixture = None if fixture is not None: fixture._start() captured = Captured() try: yield captured finally: if fixture is not None: out, err = fixture.readouterr() fixture.close() captured.out = out captured.err = err @contextmanager def capturing_logs( request: SubRequest, ) -> Iterator[CapturedLogs | NullCapturedLogs]: logging_plugin = request.config.pluginmanager.getplugin("logging-plugin") if logging_plugin is None: yield NullCapturedLogs() else: handler = LogCaptureHandler() handler.setFormatter(logging_plugin.formatter) captured_logs = CapturedLogs(handler) with catching_logs(handler): yield captured_logs @contextmanager def ignore_pytest_private_warning() -> Generator[None, None, None]: import warnings with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "A private pytest class or function was used.", category=pytest.PytestDeprecationWarning, ) yield @attr.s class Captured: out = attr.ib(default="", type=str) err = attr.ib(default="", type=str) def update_report(self, report: pytest.TestReport) -> None: if self.out: report.sections.append(("Captured stdout call", self.out)) if self.err: report.sections.append(("Captured stderr call", self.err)) class CapturedLogs: def __init__(self, handler: LogCaptureHandler) -> None: self._handler = handler def update_report(self, report: pytest.TestReport) -> None: report.sections.append(("Captured log call", self._handler.stream.getvalue())) class NullCapturedLogs: def update_report(self, report: pytest.TestReport) -> None: pass def pytest_report_to_serializable(report: pytest.TestReport) -> dict[str, Any] | None: if isinstance(report, SubTestReport): return report._to_json() return None def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | None: if data.get("_report_type") == "SubTestReport": return SubTestReport._from_json(data) return None @pytest.hookimpl(tryfirst=True) def pytest_report_teststatus( report: pytest.TestReport, config: pytest.Config, ) -> tuple[str, str, str | Mapping[str, bool]] | None: if report.when != "call" or not isinstance(report, SubTestReport): return None if hasattr(report, "wasxfail"): return None outcome = report.outcome description = report.sub_test_description() if report.passed: short = "" if config.option.no_subtests_shortletter else "," return f"subtests {outcome}", short, f"{description} SUBPASS" elif report.skipped: short = "" if config.option.no_subtests_shortletter else "-" return outcome, short, f"{description} SUBSKIP" elif outcome == "failed": short = "" if config.option.no_subtests_shortletter else "u" return outcome, short, f"{description} SUBFAIL" return None pytest-subtests-0.13.1/src/pytest_subtests/py.typed000066400000000000000000000000001464561026400225460ustar00rootroot00000000000000pytest-subtests-0.13.1/tests/000077500000000000000000000000001464561026400161505ustar00rootroot00000000000000pytest-subtests-0.13.1/tests/conftest.py000066400000000000000000000000341464561026400203440ustar00rootroot00000000000000pytest_plugins = "pytester" pytest-subtests-0.13.1/tests/test_subtests.py000066400000000000000000000510421464561026400214370ustar00rootroot00000000000000from __future__ import annotations import sys from pathlib import Path import pytest IS_PY311 = sys.version_info[:2] >= (3, 11) if sys.version_info < (3, 8): from typing_extensions import Literal else: from typing import Literal @pytest.mark.parametrize("mode", ["normal", "xdist"]) class TestFixture: """ Tests for ``subtests`` fixture. """ @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): assert i % 2 == 0 """ ) def test_simple_terminal_normal( self, simple_script: None, pytester: pytest.Pytester, mode: Literal["normal", "xdist"], ) -> None: if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_simple_terminal_verbose( self, simple_script: None, pytester: pytest.Pytester, mode: Literal["normal", "xdist"], ) -> None: if mode == "normal": result = pytester.runpytest("-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo PASSED *100%*", ] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1", "-v") expected_lines = [ "1 worker [1 item]", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", ] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_skip( self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] ) -> None: pytester.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.skip('even number') """ ) if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] result.stdout.fnmatch_lines(expected_lines) def test_xfail( self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] ) -> None: pytester.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.xfail('even number') """ ) if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 3 xfailed, 2 subtests passed in *"] result.stdout.fnmatch_lines(expected_lines) def test_typing_exported( self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] ) -> None: pytester.makepyfile( """ from pytest_subtests import SubTests def test_typing_exported(subtests: SubTests) -> None: assert isinstance(subtests, SubTests) """ ) if mode == "normal": result = pytester.runpytest() expected_lines = ["collected 1 item"] else: assert mode == "xdist" pytest.importorskip("xdist") result = pytester.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed *"] result.stdout.fnmatch_lines(expected_lines) class TestSubTest: """ Test Test.subTest functionality. """ @pytest.fixture def simple_script(self, pytester: pytest.Pytester) -> Path: return pytester.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): self.assertEqual(i % 2, 0) if __name__ == '__main__': main() """ ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_normal( self, simple_script: Path, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: suffix = ".test_foo" if IS_PY311 else "" if runner == "unittest": result = pytester.run(sys.executable, simple_script) result.stderr.fnmatch_lines( [ f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", "AssertionError: 1 != 0", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = pytester.runpytest(simple_script) expected_lines = ["collected 1 item"] else: assert runner == "pytest-xdist" pytest.importorskip("xdist") result = pytester.runpytest(simple_script, "-n1") expected_lines = ["1 worker [1 item]"] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_verbose( self, simple_script: Path, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: suffix = ".test_foo" if IS_PY311 else "" if runner == "unittest": result = pytester.run(sys.executable, simple_script, "-v") result.stderr.fnmatch_lines( [ f"test_foo (__main__.T{suffix}) ... ", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", "AssertionError: 1 != 0", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = pytester.runpytest(simple_script, "-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", ] else: assert runner == "pytest-xdist" pytest.importorskip("xdist") result = pytester.runpytest(simple_script, "-n1", "-v") expected_lines = [ "1 worker [1 item]", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", ] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_skip( self, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: p = pytester.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: self.skipTest('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = pytester.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) else: pytest.xfail("Not producing the expected results (#5)") result = pytester.runpytest(p) # type:ignore[unreachable] result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) @pytest.mark.xfail(reason="Not producing the expected results (#5)") def test_xfail( self, pytester: pytest.Pytester, runner: Literal["unittest", "pytest-normal", "pytest-xdist"], ) -> None: p = pytester.makepyfile( """ import pytest from unittest import expectedFailure, TestCase, main class T(TestCase): @expectedFailure def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: raise pytest.xfail('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = pytester.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) else: result = pytester.runpytest(p) result.stdout.fnmatch_lines( ["collected 1 item", "* 3 xfailed, 1 passed in *"] ) class TestCapture: def create_file(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import sys def test(subtests): print() print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) assert 0 with subtests.test(i='B'): print("hello stdout B") print("hello stderr B", file=sys.stderr) assert 0 print('end test') assert 0 """ ) def test_capturing(self, pytester: pytest.Pytester) -> None: self.create_file(pytester) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*__ test (i='A') __*", "*Captured stdout call*", "hello stdout A", "*Captured stderr call*", "hello stderr A", "*__ test (i='B') __*", "*Captured stdout call*", "hello stdout B", "*Captured stderr call*", "hello stderr B", "*__ test __*", "*Captured stdout call*", "start test", "end test", ] ) def test_no_capture(self, pytester: pytest.Pytester) -> None: self.create_file(pytester) result = pytester.runpytest("-s") result.stdout.fnmatch_lines( [ "start test", "hello stdout A", "uhello stdout B", "uend test", "*__ test (i='A') __*", "*__ test (i='B') __*", "*__ test __*", ] ) result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) def test_capture_with_fixture( self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"] ) -> None: pytester.makepyfile( rf""" import sys def test(subtests, {fixture}): print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) out, err = {fixture}.readouterr() assert out == 'start test\nhello stdout A\n' assert err == 'hello stderr A\n' """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*1 passed*", ] ) class TestLogging: def create_file(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import logging def test_foo(subtests): logging.info("before") with subtests.test("sub1"): print("sub1 stdout") logging.info("sub1 logging") with subtests.test("sub2"): print("sub2 stdout") logging.info("sub2 logging") assert False """ ) def test_capturing(self, pytester: pytest.Pytester) -> None: self.create_file(pytester) result = pytester.runpytest("--log-level=INFO") result.stdout.fnmatch_lines( [ "*___ test_foo [[]sub2[]] __*", "*-- Captured stdout call --*", "sub2 stdout", "*-- Captured log call ---*", "INFO root:test_capturing.py:12 sub2 logging", "*== short test summary info ==*", ] ) def test_caplog(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import logging def test(subtests, caplog): caplog.set_level(logging.INFO) logging.info("start test") with subtests.test("sub1"): logging.info("inside %s", "subtest1") assert len(caplog.records) == 2 assert caplog.records[0].getMessage() == "start test" assert caplog.records[1].getMessage() == "inside subtest1" """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*1 passed*", ] ) def test_no_logging(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ import logging def test(subtests): logging.info("start log line") with subtests.test("sub passing"): logging.info("inside %s", "passing log line") with subtests.test("sub failing"): logging.info("inside %s", "failing log line") assert False logging.info("end log line") """ ) result = pytester.runpytest("-p no:logging") result.stdout.fnmatch_lines( [ "*1 passed*", ] ) result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") class TestDebugging: """Check --pdb support for subtests fixture and TestCase.subTest.""" class _FakePdb: """ Fake debugger class implementation that tracks which methods were called on it. """ quitting: bool = False calls: list[str] = [] def __init__(self, *_: object, **__: object) -> None: self.calls.append("init") def reset(self) -> None: self.calls.append("reset") def interaction(self, *_: object) -> None: self.calls.append("interaction") @pytest.fixture(autouse=True) def cleanup_calls(self) -> None: self._FakePdb.calls.clear() def test_pdb_fixture( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch ) -> None: pytester.makepyfile( """ def test(subtests): with subtests.test(): assert 0 """ ) self.runpytest_and_check_pdb(pytester, monkeypatch) def test_pdb_unittest( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch ) -> None: pytester.makepyfile( """ from unittest import TestCase class Test(TestCase): def test(self): with self.subTest(): assert 0 """ ) self.runpytest_and_check_pdb(pytester, monkeypatch) def runpytest_and_check_pdb( self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch ) -> None: # Install the fake pdb implementation in pytest_subtests so we can reference # it in the command line (any module would do). import pytest_subtests monkeypatch.setattr(pytest_subtests, "_CustomPdb", self._FakePdb, raising=False) result = pytester.runpytest("--pdb", "--pdbcls=pytest_subtests:_CustomPdb") # Ensure pytest entered in debugging mode when encountering the failing # assert. result.stdout.fnmatch_lines("*entering PDB*") assert self._FakePdb.calls == ["init", "reset", "interaction"] def test_exitfirst(pytester: pytest.Pytester) -> None: """ Validate that when passing --exitfirst the test exits after the first failed subtest. """ pytester.makepyfile( """ def test_foo(subtests): with subtests.test("sub1"): assert False with subtests.test("sub2"): assert False """ ) result = pytester.runpytest("--exitfirst") assert result.parseoutcomes()["failed"] == 2 result.stdout.fnmatch_lines( [ "*[[]sub1[]] SUBFAIL test_exitfirst.py::test_foo - assert False*", "FAILED test_exitfirst.py::test_foo - assert False", "* stopping after 2 failures*", ], consecutive=True, ) result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. pytest-subtests-0.13.1/tox.ini000066400000000000000000000002731464561026400163230ustar00rootroot00000000000000[tox] envlist = py38,py39,py310,py311,py312 [testenv] passenv = USER USERNAME TRAVIS PYTEST_ADDOPTS deps = pytest-xdist>=3.3.0 commands = pytest {posargs:tests}