pax_global_header00006660000000000000000000000064144304174240014515gustar00rootroot0000000000000052 comment=06315b41dc778301ae5a4184fb747da24fd7b261 pytest-subtests-0.11.0/000077500000000000000000000000001443041742400147765ustar00rootroot00000000000000pytest-subtests-0.11.0/.github/000077500000000000000000000000001443041742400163365ustar00rootroot00000000000000pytest-subtests-0.11.0/.github/workflows/000077500000000000000000000000001443041742400203735ustar00rootroot00000000000000pytest-subtests-0.11.0/.github/workflows/deploy.yml000066400000000000000000000022251443041742400224130ustar00rootroot00000000000000name: deploy on: workflow_dispatch: inputs: version: description: 'Release version' required: true default: '1.2.3' jobs: package: runs-on: ubuntu-latest env: SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.version }} steps: - uses: actions/checkout@v3 - name: Build and Check Package uses: hynek/build-and-inspect-python-package@v1.5 deploy: needs: package runs-on: ubuntu-latest environment: deploy permissions: id-token: write # For PyPI trusted publishers. contents: write # For tag. steps: - uses: actions/checkout@v3 - name: Download Package uses: actions/download-artifact@v3 with: name: Packages path: dist - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@v1.8.5 - name: Push tag run: | git config user.name "pytest bot" git config user.email "pytestbot@gmail.com" git tag --annotate --message=v${{ github.event.inputs.version }} v${{ github.event.inputs.version }} ${{ github.sha }} git push origin v${{ github.event.inputs.version }} pytest-subtests-0.11.0/.github/workflows/test.yml000066400000000000000000000050421443041742400220760ustar00rootroot00000000000000name: test on: push: branches: - main - "test-me-*" pull_request: branches: - "*" # Cancel running jobs for the same workflow and branch. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: package: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Build and Check Package uses: hynek/build-and-inspect-python-package@v1.5 test: needs: [package] runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: name: [ "windows-py37", "windows-py38", "windows-py39", "windows-py310", "windows-py311", "ubuntu-py37", "ubuntu-py38", "ubuntu-py39", "ubuntu-py310", "ubuntu-py311", ] include: - name: "windows-py37" python: "3.7" os: windows-latest tox_env: "py37" - name: "windows-py38" python: "3.8" os: windows-latest tox_env: "py38" - name: "windows-py39" python: "3.9" os: windows-latest tox_env: "py39" - name: "windows-py310" python: "3.10" os: windows-latest tox_env: "py310" - name: "windows-py311" python: "3.11-dev" os: windows-latest tox_env: "py311" - name: "ubuntu-py37" python: "3.7" os: ubuntu-latest tox_env: "py37" - name: "ubuntu-py38" python: "3.8" os: ubuntu-latest tox_env: "py38" - name: "ubuntu-py39" python: "3.9" os: ubuntu-latest tox_env: "py39" - name: "ubuntu-py310" python: "3.10" os: ubuntu-latest tox_env: "py310" - name: "ubuntu-py311" python: "3.11-dev" os: ubuntu-latest tox_env: "py311" steps: - uses: actions/checkout@v3 - name: Download Package uses: actions/download-artifact@v3 with: name: Packages path: dist - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - name: Install tox run: | python -m pip install --upgrade pip python -m pip install --upgrade tox - name: Test shell: bash run: | tox run -e ${{ matrix.tox_env }} --installpkg `find dist/*.tar.gz` pytest-subtests-0.11.0/.gitignore000066400000000000000000000023061443041742400167670ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ # PyCharm. .idea/ pytest-subtests-0.11.0/.pre-commit-config.yaml000066400000000000000000000012701443041742400212570ustar00rootroot00000000000000repos: - repo: https://github.com/psf/black rev: 22.3.0 hooks: - id: black args: [--safe, --quiet] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.2.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: debug-statements - repo: https://github.com/asottile/reorder_python_imports rev: v3.1.0 hooks: - id: reorder-python-imports - repo: local hooks: - id: rst name: rst entry: rst-lint --encoding utf-8 files: ^(CHANGELOG.rst|RELEASING.rst|README.rst)$ language: python additional_dependencies: [pygments, restructuredtext_lint] pytest-subtests-0.11.0/CHANGELOG.rst000066400000000000000000000055771443041742400170350ustar00rootroot00000000000000CHANGELOG ========= 0.11.0 (2023-05-15) ------------------- * Logging is displayed for failing subtests (`#92`_) * Passing subtests no longer turn the pytest output to yellow (as if warnings have been issued) (`#86`_). Thanks to `Andrew-Brock`_ for providing the solution. * Now the ``msg`` contents of a subtest is displayed when running pytest with ``-v`` (`#6`_). .. _#6: https://github.com/pytest-dev/pytest-subtests/issues/6 .. _#86: https://github.com/pytest-dev/pytest-subtests/issues/86 .. _#92: https://github.com/pytest-dev/pytest-subtests/issues/87 .. _`Andrew-Brock`: https://github.com/Andrew-Brock 0.10.0 (2022-02-15) ------------------- * Added experimental support for suppressing subtest output dots in non-verbose mode with ``--no-subtests-shortletter`` -- this allows the native pytest column calculations to not be disrupted and minimizes unneeded output for large CI systems. 0.9.0 (2022-10-28) ------------------ * Python 3.11 is officially supported. * Dropped support for Python 3.6. 0.8.0 (2022-05-26) ------------------ * Now passing subtests are shown in the test run summary at the end (for example: ``10 failed, 1 passed, 10 subtests passed in 0.10s``) (`#70`_). .. _#70: https://github.com/pytest-dev/pytest-subtests/pull/70 0.7.0 (2022-02-13) ------------------ * Fixed support for pytest 7.0, and ``pytest>=7.0`` is now required. 0.6.0 (2022-01-15) ------------------ * ``pytest>=6.0`` is now required. * Added official support for Python 3.10. * Dropped support for Python 3.5. * Users no longer need to configure a warnings filter for the internal ``A private pytest class or function was used`` pytest warning (`#52`_). * **Experimental**: Use ``SUBPASS`` and ``,`` for passed subtests instead of general ``PASSED``, ``SUBFAIL`` and ``u`` for failed ones instead of ``FAILED`` (`#30`_). .. _#30: https://github.com/pytest-dev/pytest-subtests/pull/30 .. _#52: https://github.com/pytest-dev/pytest-subtests/pull/52 0.5.0 (2021-05-29) ------------------ * Added support for ``pytest.mark.xfail`` (`#40`_). .. _#40: https://github.com/pytest-dev/pytest-subtests/pull/40 0.4.0 (2020-12-13) ------------------ * Added support for ``--pdb`` (`#22`_). .. _#22: https://github.com/pytest-dev/pytest-subtests/issues/22 0.3.2 (2020-08-01) ------------------ * Fixed pytest 6.0 support. 0.3.1 (2020-05-20) ------------------ * Fixed pytest 5.4 support. 0.3.0 (2020-01-22) ------------------ * Dropped support for Python 3.4. * ``subtests`` now correctly captures and displays stdout/stderr (`#18`_). .. _#18: https://github.com/pytest-dev/pytest-subtests/issues/18 0.2.1 (2019-04-04) ------------------ * Fixed verbose output reporting on Linux (`#7`_). .. _#7: https://github.com/pytest-dev/pytest-subtests/issues/7 0.2.0 (2019-04-03) ------------------ * Subtests are correctly reported with ``pytest-xdist>=1.28``. 0.1.0 (2019-04-01) ------------------ * First release to PyPI. pytest-subtests-0.11.0/LICENSE000066400000000000000000000020721443041742400160040ustar00rootroot00000000000000 The MIT License (MIT) Copyright (c) 2019 Bruno Oliveira Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-subtests-0.11.0/README.rst000066400000000000000000000120171443041742400164660ustar00rootroot00000000000000=============== pytest-subtests =============== unittest ``subTest()`` support and ``subtests`` fixture. .. image:: https://img.shields.io/pypi/v/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: PyPI version .. image:: https://img.shields.io/conda/vn/conda-forge/pytest-subtests.svg :target: https://anaconda.org/conda-forge/pytest-subtests .. image:: https://img.shields.io/pypi/pyversions/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: Python versions .. image:: https://github.com/pytest-dev/pytest-subtests/workflows/test/badge.svg :target: https://github.com/pytest-dev/pytest-subtests/actions .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/ambv/black ---- This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template. Features -------- * Adds support for `TestCase.subTest `__. * New ``subtests`` fixture, providing similar functionality for pure pytest tests. Installation ------------ You can install ``pytest-subtests`` via `pip`_ from `PyPI`_:: $ pip install pytest-subtests Usage ----- unittest subTest() example ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python import unittest class T(unittest.TestCase): def test_foo(self): for i in range(5): with self.subTest("custom message", i=i): self.assertEqual(i % 2, 0) if __name__ == "__main__": unittest.main() **Output** .. code-block:: λ pytest .tmp\test-unit-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-unit-subtest.py FF. [100%] ============================= FAILURES ============================== _________________ T.test_foo [custom message] (i=1) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError _________________ T.test_foo [custom message] (i=3) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= ``subtests`` fixture example ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python def test(subtests): for i in range(5): with subtests.test(msg="custom message", i=i): assert i % 2 == 0 **Output** .. code-block:: λ pytest .tmp\test-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-subtest.py .F.F.. [100%] ============================= FAILURES ============================== ____________________ test [custom message] (i=1) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (1 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ____________________ test [custom message] (i=3) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (3 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= Contributing ------------ Contributions are very welcome. Tests can be run with `tox`_: .. code-block:: tox -e py37 License ------- Distributed under the terms of the `MIT`_ license, "pytest-subtests" is free and open source software Issues ------ If you encounter any problems, please `file an issue`_ along with a detailed description. .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter .. _`@hackebrot`: https://github.com/hackebrot .. _`MIT`: http://opensource.org/licenses/MIT .. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause .. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt .. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0 .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin .. _`file an issue`: https://github.com/pytest-dev/pytest-subtests/issues .. _`pytest`: https://github.com/pytest-dev/pytest .. _`tox`: https://tox.readthedocs.io/en/latest/ .. _`pip`: https://pypi.org/project/pip/ .. _`PyPI`: https://pypi.org/project pytest-subtests-0.11.0/RELEASING.rst000066400000000000000000000020551443041742400170430ustar00rootroot00000000000000========================= Releasing pytest-subtests ========================= This document describes the steps to make a new ``pytest-subtests`` release. Version ------- ``master`` should always be green and a potential release candidate. ``pytest-subtests`` follows semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number one needs to look at the ``CHANGELOG.rst`` file: - If there any new feature, then we must make a new **minor** release: next release will be ``X.Y+1.0``. - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. Steps ----- To publish a new release ``X.Y.Z``, the steps are as follows: #. Create a new branch named ``release-X.Y.Z`` from the latest ``main``. #. Update the ``CHANGELOG.rst`` file with the new release information. #. Commit and push the branch to ``upstream`` and open a PR. #. Once the PR is **green** and **approved**, start the ``deploy`` workflow manually from the branch ``release-VERSION``, passing ``VERSION`` as parameter. #. Merge the release PR to ``main``. pytest-subtests-0.11.0/pyproject.toml000066400000000000000000000002161443041742400177110ustar00rootroot00000000000000[build-system] requires = [ "setuptools", "setuptools-scm[toml]>=6.2.3", ] build-backend = "setuptools.build_meta" [tool.setuptools_scm] pytest-subtests-0.11.0/pytest.ini000066400000000000000000000000511443041742400170230ustar00rootroot00000000000000[pytest] addopts = -ra testpaths = tests pytest-subtests-0.11.0/setup.cfg000066400000000000000000000022231443041742400166160ustar00rootroot00000000000000[metadata] name = pytest-subtests description = unittest subTest() support and subtests fixture long_description = file: README.rst long_description_content_type = text/x-rst url = https://github.com/pytest-dev/pytest-subtests author = Bruno Oliveira license = MIT license_file = LICENSE classifiers = Development Status :: 4 - Beta Framework :: Pytest Intended Audience :: Developers Topic :: Software Development :: Testing Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: Implementation :: CPython Operating System :: OS Independent License :: OSI Approved :: MIT License keywords = test, unittest, pytest [options] py_modules = pytest_subtests install_requires = pytest>=7.0 attrs>=19.2.0 python_requires = >=3.7 package_dir = =src setup_requires = setuptools setuptools-scm>=6.0 [options.entry_points] pytest11 = subtests = pytest_subtests pytest-subtests-0.11.0/src/000077500000000000000000000000001443041742400155655ustar00rootroot00000000000000pytest-subtests-0.11.0/src/pytest_subtests.py000066400000000000000000000227331443041742400214320ustar00rootroot00000000000000import time from contextlib import contextmanager from contextlib import nullcontext import attr import pytest from _pytest._code import ExceptionInfo from _pytest.capture import CaptureFixture from _pytest.capture import FDCapture from _pytest.capture import SysCapture from _pytest.logging import LogCaptureHandler, catching_logs from _pytest.outcomes import OutcomeException from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception from _pytest.unittest import TestCaseFunction def pytest_addoption(parser): group = parser.getgroup("subtests") group.addoption( "--no-subtests-shortletter", action="store_true", dest="no_subtests_shortletter", default=False, help="Disables subtest output 'dots' in non-verbose mode (EXPERIMENTAL)", ) @attr.s class SubTestContext: msg = attr.ib() kwargs = attr.ib() @attr.s(init=False) class SubTestReport(TestReport): context = attr.ib() @property def head_line(self): _, _, domain = self.location return f"{domain} {self.sub_test_description()}" def sub_test_description(self): parts = [] if isinstance(self.context.msg, str): parts.append(f"[{self.context.msg}]") if self.context.kwargs: params_desc = ", ".join( f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) ) parts.append(f"({params_desc})") return " ".join(parts) or "()" def _to_json(self): data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" data["_subtest.context"] = attr.asdict(self.context) return data @classmethod def _from_json(cls, reportdict): report = super()._from_json(reportdict) context_data = reportdict["_subtest.context"] report.context = SubTestContext( msg=context_data["msg"], kwargs=context_data["kwargs"] ) return report @classmethod def _from_test_report(cls, test_report): return super()._from_json(test_report._to_json()) def _addSubTest(self, test_case, test, exc_info): if exc_info is not None: msg = test._message if isinstance(test._message, str) else None call_info = make_call_info( ExceptionInfo(exc_info, _ispytest=True), start=0, stop=0, duration=0, when="call", ) report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(msg, dict(test.params)) self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self, call=call_info, report=sub_report ) def pytest_configure(config): TestCaseFunction.addSubTest = _addSubTest TestCaseFunction.failfast = False # Hack (#86): the terminal does not know about the "subtests" # status, so it will by default turn the output to yellow. # This forcibly adds the new 'subtests' status. import _pytest.terminal new_types = tuple( f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") ) # We need to check if we are not re-adding because we run our own tests # with pytester in-process mode, so this will be called multiple times. if new_types[0] not in _pytest.terminal.KNOWN_TYPES: _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types _pytest.terminal._color_for_type.update( { f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] for outcome in ("passed", "failed", "skipped") if outcome in _pytest.terminal._color_for_type } ) def pytest_unconfigure(): if hasattr(TestCaseFunction, "addSubTest"): del TestCaseFunction.addSubTest if hasattr(TestCaseFunction, "failfast"): del TestCaseFunction.failfast @pytest.fixture def subtests(request): capmam = request.node.config.pluginmanager.get_plugin("capturemanager") if capmam is not None: suspend_capture_ctx = capmam.global_and_fixture_disabled else: suspend_capture_ctx = nullcontext yield SubTests(request.node.ihook, suspend_capture_ctx, request) @attr.s class SubTests: ihook = attr.ib() suspend_capture_ctx = attr.ib() request = attr.ib() @property def item(self): return self.request.node @contextmanager def _capturing_output(self): option = self.request.config.getoption("capture", None) # capsys or capfd are active, subtest should not capture capman = self.request.config.pluginmanager.getplugin("capturemanager") capture_fixture_active = getattr(capman, "_capture_fixture", None) if option == "sys" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(SysCapture, self.request) elif option == "fd" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(FDCapture, self.request) else: fixture = None if fixture is not None: fixture._start() captured = Captured() try: yield captured finally: if fixture is not None: out, err = fixture.readouterr() fixture.close() captured.out = out captured.err = err @contextmanager def _capturing_logs(self): logging_plugin = self.request.config.pluginmanager.getplugin("logging-plugin") if logging_plugin is None: yield NullCapturedLogs() else: handler = LogCaptureHandler() handler.setFormatter(logging_plugin.formatter) captured_logs = CapturedLogs(handler) with catching_logs(handler): yield captured_logs @contextmanager def test(self, msg=None, **kwargs): start = time.time() precise_start = time.perf_counter() exc_info = None with self._capturing_output() as captured_output, self._capturing_logs() as captured_logs: try: yield except (Exception, OutcomeException): exc_info = ExceptionInfo.from_current() precise_stop = time.perf_counter() duration = precise_stop - precise_start stop = time.time() call_info = make_call_info( exc_info, start=start, stop=stop, duration=duration, when="call" ) report = self.ihook.pytest_runtest_makereport(item=self.item, call=call_info) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(msg, kwargs.copy()) captured_output.update_report(sub_report) captured_logs.update_report(sub_report) with self.suspend_capture_ctx(): self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self.item, call=call_info, report=sub_report ) def make_call_info(exc_info, *, start, stop, duration, when): return CallInfo( None, exc_info, start=start, stop=stop, duration=duration, when=when, _ispytest=True, ) @contextmanager def ignore_pytest_private_warning(): import warnings with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "A private pytest class or function was used.", category=pytest.PytestDeprecationWarning, ) yield @attr.s class Captured: out = attr.ib(default="", type=str) err = attr.ib(default="", type=str) def update_report(self, report): if self.out: report.sections.append(("Captured stdout call", self.out)) if self.err: report.sections.append(("Captured stderr call", self.err)) class CapturedLogs: def __init__(self, handler): self._handler = handler def update_report(self, report): report.sections.append(("Captured log call", self._handler.stream.getvalue())) class NullCapturedLogs: def update_report(self, report): pass def pytest_report_to_serializable(report): if isinstance(report, SubTestReport): return report._to_json() def pytest_report_from_serializable(data): if data.get("_report_type") == "SubTestReport": return SubTestReport._from_json(data) @pytest.hookimpl(tryfirst=True) def pytest_report_teststatus(report, config): if report.when != "call" or not isinstance(report, SubTestReport): return if hasattr(report, "wasxfail"): return None outcome = report.outcome description = report.sub_test_description() if report.passed: short = "" if config.option.no_subtests_shortletter else "," return f"subtests {outcome}", short, f"{description} SUBPASS" elif report.skipped: short = "" if config.option.no_subtests_shortletter else "-" return outcome, short, f"{description} SUBSKIP" elif outcome == "failed": short = "" if config.option.no_subtests_shortletter else "u" return outcome, short, f"{description} SUBFAIL" pytest-subtests-0.11.0/tests/000077500000000000000000000000001443041742400161405ustar00rootroot00000000000000pytest-subtests-0.11.0/tests/conftest.py000066400000000000000000000000341443041742400203340ustar00rootroot00000000000000pytest_plugins = "pytester" pytest-subtests-0.11.0/tests/test_subtests.py000066400000000000000000000424311443041742400214310ustar00rootroot00000000000000import sys import pytest IS_PY311 = sys.version_info[:2] >= (3, 11) @pytest.mark.parametrize("mode", ["normal", "xdist"]) class TestFixture: """ Tests for ``subtests`` fixture. """ @pytest.fixture def simple_script(self, testdir): testdir.makepyfile( """ def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): assert i % 2 == 0 """ ) def test_simple_terminal_normal(self, simple_script, testdir, mode): if mode == "normal": result = testdir.runpytest() expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_simple_terminal_verbose(self, simple_script, testdir, mode): if mode == "normal": result = testdir.runpytest("-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo PASSED *100%*", ] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1", "-v") expected_lines = [ "1 worker [1 item]", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", ] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed, 3 subtests passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_skip(self, testdir, mode): testdir.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.skip('even number') """ ) if mode == "normal": result = testdir.runpytest() expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] result.stdout.fnmatch_lines(expected_lines) def test_xfail(self, testdir, mode): testdir.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.xfail('even number') """ ) if mode == "normal": result = testdir.runpytest() expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1") expected_lines = ["1 worker [1 item]"] expected_lines += ["* 1 passed, 3 xfailed, 2 subtests passed in *"] result.stdout.fnmatch_lines(expected_lines) class TestSubTest: """ Test Test.subTest functionality. """ @pytest.fixture def simple_script(self, testdir): return testdir.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): self.assertEqual(i % 2, 0) if __name__ == '__main__': main() """ ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_normal(self, simple_script, testdir, runner): suffix = ".test_foo" if IS_PY311 else "" if runner == "unittest": result = testdir.run(sys.executable, simple_script) result.stderr.fnmatch_lines( [ f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", "AssertionError: 1 != 0", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = testdir.runpytest(simple_script) expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest(simple_script, "-n1") expected_lines = ["1 worker [1 item]"] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_verbose(self, simple_script, testdir, runner): suffix = ".test_foo" if IS_PY311 else "" if runner == "unittest": result = testdir.run(sys.executable, simple_script, "-v") result.stderr.fnmatch_lines( [ f"test_foo (__main__.T{suffix}) ... ", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", "AssertionError: 1 != 0", f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = testdir.runpytest(simple_script, "-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", ] else: pytest.importorskip("xdist") result = testdir.runpytest(simple_script, "-n1", "-v") expected_lines = [ "1 worker [1 item]", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", ] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_skip(self, testdir, runner): p = testdir.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: self.skipTest('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = testdir.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) else: pytest.xfail("Not producing the expected results (#5)") result = testdir.runpytest(p) result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) @pytest.mark.xfail(reason="Not producing the expected results (#5)") def test_xfail(self, testdir, runner): p = testdir.makepyfile( """ import pytest from unittest import expectedFailure, TestCase, main class T(TestCase): @expectedFailure def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: raise pytest.xfail('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = testdir.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) else: result = testdir.runpytest(p) result.stdout.fnmatch_lines( ["collected 1 item", "* 3 xfailed, 1 passed in *"] ) class TestCapture: def create_file(self, testdir): testdir.makepyfile( """ import sys def test(subtests): print() print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) assert 0 with subtests.test(i='B'): print("hello stdout B") print("hello stderr B", file=sys.stderr) assert 0 print('end test') assert 0 """ ) def test_capturing(self, testdir): self.create_file(testdir) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "*__ test (i='A') __*", "*Captured stdout call*", "hello stdout A", "*Captured stderr call*", "hello stderr A", "*__ test (i='B') __*", "*Captured stdout call*", "hello stdout B", "*Captured stderr call*", "hello stderr B", "*__ test __*", "*Captured stdout call*", "start test", "end test", ] ) def test_no_capture(self, testdir): self.create_file(testdir) result = testdir.runpytest("-s") result.stdout.fnmatch_lines( [ "start test", "hello stdout A", "uhello stdout B", "uend test", "*__ test (i='A') __*", "*__ test (i='B') __*", "*__ test __*", ] ) result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) def test_capture_with_fixture(self, testdir, fixture): testdir.makepyfile( r""" import sys def test(subtests, {fixture}): print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) out, err = {fixture}.readouterr() assert out == 'start test\nhello stdout A\n' assert err == 'hello stderr A\n' """.format( fixture=fixture ) ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "*1 passed*", ] ) class TestLogging: def create_file(self, testdir): testdir.makepyfile( """ import logging def test_foo(subtests): logging.info("before") with subtests.test("sub1"): print("sub1 stdout") logging.info("sub1 logging") with subtests.test("sub2"): print("sub2 stdout") logging.info("sub2 logging") assert False """ ) def test_capturing(self, testdir): self.create_file(testdir) result = testdir.runpytest("--log-level=INFO") result.stdout.fnmatch_lines( [ "*___ test_foo [[]sub2[]] __*", "*-- Captured stdout call --*", "sub2 stdout", "*-- Captured log call ---*", "INFO root:test_capturing.py:12 sub2 logging", "*== short test summary info ==*" ] ) def test_caplog(self, testdir): testdir.makepyfile( """ import logging def test(subtests, caplog): caplog.set_level(logging.INFO) logging.info("start test") with subtests.test("sub1"): logging.info("inside %s", "subtest1") assert len(caplog.records) == 2 assert caplog.records[0].getMessage() == "start test" assert caplog.records[1].getMessage() == "inside subtest1" """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "*1 passed*", ] ) def test_no_logging(self, testdir): testdir.makepyfile( """ import logging def test(subtests): logging.info("start log line") with subtests.test("sub passing"): logging.info("inside %s", "passing log line") with subtests.test("sub failing"): logging.info("inside %s", "failing log line") assert False logging.info("end log line") """ ) result = testdir.runpytest("-p no:logging") result.stdout.fnmatch_lines( [ "*1 passed*", ] ) result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") class TestDebugging: """Check --pdb support for subtests fixture and TestCase.subTest.""" class _FakePdb: """ Fake debugger class implementation that tracks which methods were called on it. """ quitting = False calls = [] def __init__(self, *args, **kwargs): self.calls.append("init") def reset(self): self.calls.append("reset") def interaction(self, *args): self.calls.append("interaction") @pytest.fixture(autouse=True) def cleanup_calls(self): self._FakePdb.calls.clear() def test_pdb_fixture(self, testdir, monkeypatch): testdir.makepyfile( """ def test(subtests): with subtests.test(): assert 0 """ ) self.runpytest_and_check_pdb(testdir, monkeypatch) def test_pdb_unittest(self, testdir, monkeypatch): testdir.makepyfile( """ from unittest import TestCase class Test(TestCase): def test(self): with self.subTest(): assert 0 """ ) self.runpytest_and_check_pdb(testdir, monkeypatch) def runpytest_and_check_pdb(self, testdir, monkeypatch): # Install the fake pdb implementation in pytest_subtests so we can reference # it in the command line (any module would do). import pytest_subtests monkeypatch.setattr(pytest_subtests, "_CustomPdb", self._FakePdb, raising=False) result = testdir.runpytest("--pdb", "--pdbcls=pytest_subtests:_CustomPdb") # Ensure pytest entered in debugging mode when encountering the failing # assert. result.stdout.fnmatch_lines("*entering PDB*") assert self._FakePdb.calls == ["init", "reset", "interaction"] pytest-subtests-0.11.0/tox.ini000066400000000000000000000002721443041742400163120ustar00rootroot00000000000000[tox] envlist = py37,py38,py39,py310,py311 [testenv] passenv = USER USERNAME TRAVIS PYTEST_ADDOPTS deps = pytest-xdist>=3.3.0 commands = pytest {posargs:tests}