pax_global_header00006660000000000000000000000064141705534540014522gustar00rootroot0000000000000052 comment=29a221c22ff526654a59de3977a99e18294c0810 pytest-subtests-0.6.0/000077500000000000000000000000001417055345400147275ustar00rootroot00000000000000pytest-subtests-0.6.0/.github/000077500000000000000000000000001417055345400162675ustar00rootroot00000000000000pytest-subtests-0.6.0/.github/workflows/000077500000000000000000000000001417055345400203245ustar00rootroot00000000000000pytest-subtests-0.6.0/.github/workflows/main.yml000066400000000000000000000051361417055345400220000ustar00rootroot00000000000000name: build on: [push, pull_request] jobs: build: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: name: [ "windows-py36", "windows-py37", "windows-py38", "windows-py39", "windows-py310", "ubuntu-py36", "ubuntu-py37", "ubuntu-py38", "ubuntu-py39", "ubuntu-py310", ] include: - name: "windows-py36" python: "3.6" os: windows-latest tox_env: "py36" - name: "windows-py37" python: "3.7" os: windows-latest tox_env: "py37" - name: "windows-py38" python: "3.8" os: windows-latest tox_env: "py38" - name: "windows-py39" python: "3.9" os: windows-latest tox_env: "py39" - name: "windows-py310" python: "3.10" os: windows-latest tox_env: "py310" - name: "ubuntu-py36" python: "3.6" os: ubuntu-latest tox_env: "py36" - name: "ubuntu-py37" python: "3.7" os: ubuntu-latest tox_env: "py37" - name: "ubuntu-py38" python: "3.8" os: ubuntu-latest tox_env: "py38" - name: "ubuntu-py39" python: "3.9" os: ubuntu-latest tox_env: "py39" - name: "ubuntu-py310" python: "3.10" os: ubuntu-latest tox_env: "py310" steps: - uses: actions/checkout@v1 - name: Set up Python uses: actions/setup-python@v1 with: python-version: ${{ matrix.python }} - name: Install tox run: | python -m pip install --upgrade pip python -m pip install --upgrade tox setuptools - name: Test run: | tox -e ${{ matrix.tox_env }} deploy: if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') runs-on: ubuntu-latest needs: [build] steps: - uses: actions/checkout@v1 - name: Set up Python uses: actions/setup-python@v1 with: python-version: "3.7" - name: Install wheel run: | python -m pip install --upgrade pip python -m pip install --upgrade wheel setuptools - name: Build package run: | python setup.py sdist bdist_wheel - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@master with: user: __token__ password: ${{ secrets.pypi_token }} pytest-subtests-0.6.0/.gitignore000066400000000000000000000022631417055345400167220ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ pytest-subtests-0.6.0/.pre-commit-config.yaml000066400000000000000000000012711417055345400212110ustar00rootroot00000000000000repos: - repo: https://github.com/psf/black rev: 21.12b0 hooks: - id: black args: [--safe, --quiet] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: debug-statements - repo: https://github.com/asottile/reorder_python_imports rev: v2.6.0 hooks: - id: reorder-python-imports - repo: local hooks: - id: rst name: rst entry: rst-lint --encoding utf-8 files: ^(CHANGELOG.rst|RELEASING.rst|README.rst)$ language: python additional_dependencies: [pygments, restructuredtext_lint] pytest-subtests-0.6.0/CHANGELOG.rst000066400000000000000000000030631417055345400167520ustar00rootroot00000000000000CHANGELOG ========= 0.6.0 (2022-01-15) ------------------ * ``pytest>=6.0`` is now required. * Added official support for Python 3.10. * Dropped support for Python 3.5. * Users no longer need to configure a warnings filter for the internal ``A private pytest class or function was used`` pytest warning (`#52`_). * **Experimental**: Use ``SUBPASS`` and ``,`` for passed subtests instead of general ``PASSED``, ``SUBFAIL`` and ``u`` for failed ones instead of ``FAILED`` (`#30`_). .. _#30: https://github.com/pytest-dev/pytest-subtests/pull/30 .. _#52: https://github.com/pytest-dev/pytest-subtests/pull/52 0.5.0 (2021-05-29) ------------------ * Added support for ``pytest.mark.xfail`` (`#40`_). .. _#40: https://github.com/pytest-dev/pytest-subtests/pull/40 0.4.0 (2020-12-13) ------------------ * Added support for ``--pdb`` (`#22`_). .. _#22: https://github.com/pytest-dev/pytest-subtests/issues/22 0.3.2 (2020-08-01) ------------------ * Fixed pytest 6.0 support. 0.3.1 (2020-05-20) ------------------ * Fixed pytest 5.4 support. 0.3.0 (2020-01-22) ------------------ * Dropped support for Python 3.4. * ``subtests`` now correctly captures and displays stdout/stderr (`#18`_). .. _#18: https://github.com/pytest-dev/pytest-subtests/issues/18 0.2.1 (2019-04-04) ------------------ * Fixed verbose output reporting on Linux (`#7`_). .. _#7: https://github.com/pytest-dev/pytest-subtests/issues/7 0.2.0 (2019-04-03) ------------------ * Subtests are correctly reported with ``pytest-xdist>=1.28``. 0.1.0 (2019-04-01) ------------------ * First release to PyPI. pytest-subtests-0.6.0/LICENSE000066400000000000000000000020721417055345400157350ustar00rootroot00000000000000 The MIT License (MIT) Copyright (c) 2019 Bruno Oliveira Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-subtests-0.6.0/README.rst000066400000000000000000000120201417055345400164110ustar00rootroot00000000000000=============== pytest-subtests =============== unittest ``subTest()`` support and ``subtests`` fixture. .. image:: https://img.shields.io/pypi/v/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: PyPI version .. image:: https://img.shields.io/conda/vn/conda-forge/pytest-subtests.svg :target: https://anaconda.org/conda-forge/pytest-subtests .. image:: https://img.shields.io/pypi/pyversions/pytest-subtests.svg :target: https://pypi.org/project/pytest-subtests :alt: Python versions .. image:: https://github.com/pytest-dev/pytest-subtests/workflows/build/badge.svg :target: https://github.com/pytest-dev/pytest-subtests/actions .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/ambv/black ---- This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template. Features -------- * Adds support for `TestCase.subTest `__. * New ``subtests`` fixture, providing similar functionality for pure pytest tests. Installation ------------ You can install ``pytest-subtests`` via `pip`_ from `PyPI`_:: $ pip install pytest-subtests Usage ----- unittest subTest() example ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python import unittest class T(unittest.TestCase): def test_foo(self): for i in range(5): with self.subTest("custom message", i=i): self.assertEqual(i % 2, 0) if __name__ == "__main__": unittest.main() **Output** .. code-block:: λ pytest .tmp\test-unit-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-unit-subtest.py FF. [100%] ============================= FAILURES ============================== _________________ T.test_foo [custom message] (i=1) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError _________________ T.test_foo [custom message] (i=3) _________________ self = def test_foo(self): for i in range(5): with self.subTest('custom message', i=i): > self.assertEqual(i % 2, 0) E AssertionError: 1 != 0 .tmp\test-unit-subtest.py:9: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= ``subtests`` fixture example ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python def test(subtests): for i in range(5): with subtests.test(msg="custom message", i=i): assert i % 2 == 0 **Output** .. code-block:: λ pytest .tmp\test-subtest.py ======================== test session starts ======================== ... collected 1 item .tmp\test-subtest.py .F.F.. [100%] ============================= FAILURES ============================== ____________________ test [custom message] (i=1) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (1 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ____________________ test [custom message] (i=3) ____________________ def test(subtests): for i in range(5): with subtests.test(msg='custom message', i=i): > assert i % 2 == 0 E assert (3 % 2) == 0 .tmp\test-subtest.py:4: AssertionError ================ 2 failed, 1 passed in 0.07 seconds ================= Contributing ------------ Contributions are very welcome. Tests can be run with `tox`_: .. code-block:: tox -e py37 License ------- Distributed under the terms of the `MIT`_ license, "pytest-subtests" is free and open source software Issues ------ If you encounter any problems, please `file an issue`_ along with a detailed description. .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter .. _`@hackebrot`: https://github.com/hackebrot .. _`MIT`: http://opensource.org/licenses/MIT .. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause .. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt .. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0 .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin .. _`file an issue`: https://github.com/pytest-dev/pytest-subtests/issues .. _`pytest`: https://github.com/pytest-dev/pytest .. _`tox`: https://tox.readthedocs.io/en/latest/ .. _`pip`: https://pypi.org/project/pip/ .. _`PyPI`: https://pypi.org/project pytest-subtests-0.6.0/RELEASING.rst000066400000000000000000000021571417055345400167770ustar00rootroot00000000000000========================= Releasing pytest-subtests ========================= This document describes the steps to make a new ``pytest-subtests`` release. Version ------- ``master`` should always be green and a potential release candidate. ``pytest-subtests`` follows semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number one needs to look at the ``CHANGELOG.rst`` file: - If there any new feature, then we must make a new **minor** release: next release will be ``X.Y+1.0``. - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. Steps ----- To publish a new release ``X.Y.Z``, the steps are as follows: #. Create a new branch named ``release-X.Y.Z`` from the latest ``master``. #. Update the ``CHANGELOG.rst`` file with the new release information. #. Commit and push the branch for review. #. Once PR is **green** and **approved**, create and push a tag:: $ export VERSION=X.Y.Z $ git tag v$VERSION release-$VERSION $ git push git@github.com:pytest-dev/pytest-subtests.git v$VERSION That will build the package and publish it on ``PyPI`` automatically. pytest-subtests-0.6.0/pytest_subtests.py000066400000000000000000000165101417055345400205700ustar00rootroot00000000000000import sys import time from contextlib import contextmanager import attr import pytest from _pytest._code import ExceptionInfo from _pytest.capture import CaptureFixture from _pytest.capture import FDCapture from _pytest.capture import SysCapture from _pytest.outcomes import OutcomeException from _pytest.reports import TestReport from _pytest.runner import CallInfo from _pytest.runner import check_interactive_exception from _pytest.unittest import TestCaseFunction if sys.version_info[:2] < (3, 7): @contextmanager def nullcontext(): yield else: from contextlib import nullcontext @attr.s class SubTestContext: msg = attr.ib() kwargs = attr.ib() @attr.s(init=False) class SubTestReport(TestReport): context = attr.ib() @property def count_towards_summary(self): return not self.passed @property def head_line(self): _, _, domain = self.location return f"{domain} {self.sub_test_description()}" def sub_test_description(self): parts = [] if isinstance(self.context.msg, str): parts.append(f"[{self.context.msg}]") if self.context.kwargs: params_desc = ", ".join( f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) ) parts.append(f"({params_desc})") return " ".join(parts) or "()" def _to_json(self): data = super()._to_json() del data["context"] data["_report_type"] = "SubTestReport" data["_subtest.context"] = attr.asdict(self.context) return data @classmethod def _from_json(cls, reportdict): report = super()._from_json(reportdict) context_data = reportdict["_subtest.context"] report.context = SubTestContext( msg=context_data["msg"], kwargs=context_data["kwargs"] ) return report @classmethod def _from_test_report(cls, test_report): return super()._from_json(test_report._to_json()) def _addSubTest(self, test_case, test, exc_info): if exc_info is not None: msg = test._message if isinstance(test._message, str) else None call_info = make_call_info( ExceptionInfo(exc_info), start=0, stop=0, duration=0, when="call" ) report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(msg, dict(test.params)) self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self, call=call_info, report=sub_report ) def pytest_configure(config): TestCaseFunction.addSubTest = _addSubTest TestCaseFunction.failfast = False def pytest_unconfigure(): if hasattr(TestCaseFunction, "_addSubTest"): del TestCaseFunction.addSubTest if hasattr(TestCaseFunction, "failfast"): del TestCaseFunction.failfast @pytest.fixture def subtests(request): capmam = request.node.config.pluginmanager.get_plugin("capturemanager") if capmam is not None: suspend_capture_ctx = capmam.global_and_fixture_disabled else: suspend_capture_ctx = nullcontext yield SubTests(request.node.ihook, suspend_capture_ctx, request) @attr.s class SubTests: ihook = attr.ib() suspend_capture_ctx = attr.ib() request = attr.ib() @property def item(self): return self.request.node @contextmanager def _capturing_output(self): option = self.request.config.getoption("capture", None) # capsys or capfd are active, subtest should not capture capman = self.request.config.pluginmanager.getplugin("capturemanager") capture_fixture_active = getattr(capman, "_capture_fixture", None) if option == "sys" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(SysCapture, self.request) elif option == "fd" and not capture_fixture_active: with ignore_pytest_private_warning(): fixture = CaptureFixture(FDCapture, self.request) else: fixture = None if fixture is not None: fixture._start() captured = Captured() try: yield captured finally: if fixture is not None: out, err = fixture.readouterr() fixture.close() captured.out = out captured.err = err @contextmanager def test(self, msg=None, **kwargs): start = time.time() precise_start = time.perf_counter() exc_info = None with self._capturing_output() as captured: try: yield except (Exception, OutcomeException): exc_info = ExceptionInfo.from_current() precise_stop = time.perf_counter() duration = precise_stop - precise_start stop = time.time() call_info = make_call_info( exc_info, start=start, stop=stop, duration=duration, when="call" ) report = self.ihook.pytest_runtest_makereport(item=self.item, call=call_info) sub_report = SubTestReport._from_test_report(report) sub_report.context = SubTestContext(msg, kwargs.copy()) captured.update_report(sub_report) with self.suspend_capture_ctx(): self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): self.ihook.pytest_exception_interact( node=self.item, call=call_info, report=sub_report ) def make_call_info(exc_info, *, start, stop, duration, when): try: return CallInfo( None, exc_info, start=start, stop=stop, duration=duration, when=when ) except TypeError: # support for pytest<6: didn't have a duration parameter then return CallInfo(None, exc_info, start=start, stop=stop, when=when) @contextmanager def ignore_pytest_private_warning(): import warnings with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "A private pytest class or function was used.", category=pytest.PytestDeprecationWarning, ) yield @attr.s class Captured: out = attr.ib(default="", type=str) err = attr.ib(default="", type=str) def update_report(self, report): if self.out: report.sections.append(("Captured stdout call", self.out)) if self.err: report.sections.append(("Captured stderr call", self.err)) def pytest_report_to_serializable(report): if isinstance(report, SubTestReport): return report._to_json() def pytest_report_from_serializable(data): if data.get("_report_type") == "SubTestReport": return SubTestReport._from_json(data) @pytest.hookimpl(tryfirst=True) def pytest_report_teststatus(report): if report.when != "call" or not isinstance(report, SubTestReport): return if hasattr(report, "wasxfail"): return None outcome = report.outcome if report.passed: return outcome, ",", "SUBPASS" elif report.skipped: return outcome, "-", "SUBSKIP" elif outcome == "failed": return outcome, "u", "SUBFAIL" pytest-subtests-0.6.0/setup.py000066400000000000000000000027501417055345400164450ustar00rootroot00000000000000import codecs import os from setuptools import setup def read(fname): file_path = os.path.join(os.path.dirname(__file__), fname) return codecs.open(file_path, encoding="utf-8").read() setup( name="pytest-subtests", author="Bruno Oliveira", author_email="nicoddemus@gmail.com", maintainer="Bruno Oliveira", maintainer_email="nicoddemus@gmail.com", license="MIT", url="https://github.com/pytest-dev/pytest-subtests", description="unittest subTest() support and subtests fixture", long_description=read("README.rst"), py_modules=["pytest_subtests"], use_scm_version=True, setup_requires=["setuptools-scm", "setuptools>=40.0"], python_requires=">=3.6", install_requires=["pytest>=6.0"], classifiers=[ "Development Status :: 4 - Beta", "Framework :: Pytest", "Intended Audience :: Developers", "Topic :: Software Development :: Testing", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: Implementation :: CPython", "Operating System :: OS Independent", "License :: OSI Approved :: MIT License", ], entry_points={"pytest11": ["subtests = pytest_subtests"]}, ) pytest-subtests-0.6.0/tests/000077500000000000000000000000001417055345400160715ustar00rootroot00000000000000pytest-subtests-0.6.0/tests/conftest.py000066400000000000000000000000341417055345400202650ustar00rootroot00000000000000pytest_plugins = "pytester" pytest-subtests-0.6.0/tests/test_subtests.py000066400000000000000000000345601417055345400213660ustar00rootroot00000000000000import sys import pytest @pytest.mark.parametrize("mode", ["normal", "xdist"]) class TestFixture: """ Tests for ``subtests`` fixture. """ @pytest.fixture def simple_script(self, testdir): testdir.makepyfile( """ def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): assert i % 2 == 0 """ ) def test_simple_terminal_normal(self, simple_script, testdir, mode): if mode == "normal": result = testdir.runpytest() expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1") expected_lines = ["gw0 [1]"] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_simple_terminal_verbose(self, simple_script, testdir, mode): if mode == "normal": result = testdir.runpytest("-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::test_foo SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo SUBFAIL *100%*", "test_simple_terminal_verbose.py::test_foo SUBPASS *100%*", "test_simple_terminal_verbose.py::test_foo PASSED *100%*", ] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1", "-v") expected_lines = [ "gw0 [1]", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", ] expected_lines += [ "* test_foo [[]custom[]] (i=1) *", "* test_foo [[]custom[]] (i=3) *", "* 2 failed, 1 passed in *", ] result.stdout.fnmatch_lines(expected_lines) def test_skip(self, testdir, mode): testdir.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.skip('even number') """ ) if mode == "normal": result = testdir.runpytest() expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1") expected_lines = ["gw0 [1]"] expected_lines += ["* 1 passed, 3 skipped in *"] result.stdout.fnmatch_lines(expected_lines) def test_xfail(self, testdir, mode): testdir.makepyfile( """ import pytest def test_foo(subtests): for i in range(5): with subtests.test(msg="custom", i=i): if i % 2 == 0: pytest.xfail('even number') """ ) if mode == "normal": result = testdir.runpytest() expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest("-n1") expected_lines = ["gw0 [1]"] expected_lines += ["* 1 passed, 3 xfailed in *"] result.stdout.fnmatch_lines(expected_lines) class TestSubTest: """ Test Test.subTest functionality. """ @pytest.fixture def simple_script(self, testdir): return testdir.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): self.assertEqual(i % 2, 0) if __name__ == '__main__': main() """ ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_normal(self, simple_script, testdir, runner): if runner == "unittest": result = testdir.run(sys.executable, simple_script) result.stderr.fnmatch_lines( [ "FAIL: test_foo (__main__.T) [custom] (i=1)", "AssertionError: 1 != 0", "FAIL: test_foo (__main__.T) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = testdir.runpytest(simple_script) expected_lines = ["collected 1 item"] else: pytest.importorskip("xdist") result = testdir.runpytest(simple_script, "-n1") expected_lines = ["gw0 [1]"] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_simple_terminal_verbose(self, simple_script, testdir, runner): if runner == "unittest": result = testdir.run(sys.executable, simple_script, "-v") result.stderr.fnmatch_lines( [ "test_foo (__main__.T) ... ", "FAIL: test_foo (__main__.T) [custom] (i=1)", "AssertionError: 1 != 0", "FAIL: test_foo (__main__.T) [custom] (i=3)", "AssertionError: 1 != 0", "Ran 1 test in *", "FAILED (failures=2)", ] ) else: if runner == "pytest-normal": result = testdir.runpytest(simple_script, "-v") expected_lines = [ "*collected 1 item", "test_simple_terminal_verbose.py::T::test_foo SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo SUBFAIL *100%*", "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", ] else: pytest.importorskip("xdist") result = testdir.runpytest(simple_script, "-n1", "-v") expected_lines = [ "gw0 [1]", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", ] result.stdout.fnmatch_lines( expected_lines + [ "* T.test_foo [[]custom[]] (i=1) *", "E * AssertionError: 1 != 0", "* T.test_foo [[]custom[]] (i=3) *", "E * AssertionError: 1 != 0", "* 2 failed, 1 passed in *", ] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) def test_skip(self, testdir, runner): p = testdir.makepyfile( """ from unittest import TestCase, main class T(TestCase): def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: self.skipTest('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = testdir.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) else: pytest.xfail("Not producing the expected results (#5)") result = testdir.runpytest(p) result.stdout.fnmatch_lines( ["collected 1 item", "* 3 skipped, 1 passed in *"] ) @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) @pytest.mark.xfail(reason="Not producing the expected results (#5)") def test_xfail(self, testdir, runner): p = testdir.makepyfile( """ import pytest from unittest import expectedFailure, TestCase, main class T(TestCase): @expectedFailure def test_foo(self): for i in range(5): with self.subTest(msg="custom", i=i): if i % 2 == 0: raise pytest.xfail('even number') if __name__ == '__main__': main() """ ) if runner == "unittest": result = testdir.runpython(p) result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) else: result = testdir.runpytest(p) result.stdout.fnmatch_lines( ["collected 1 item", "* 3 xfailed, 1 passed in *"] ) class TestCapture: def create_file(self, testdir): testdir.makepyfile( """ import sys def test(subtests): print() print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) assert 0 with subtests.test(i='B'): print("hello stdout B") print("hello stderr B", file=sys.stderr) assert 0 print('end test') assert 0 """ ) def test_capturing(self, testdir): self.create_file(testdir) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "*__ test (i='A') __*", "*Captured stdout call*", "hello stdout A", "*Captured stderr call*", "hello stderr A", "*__ test (i='B') __*", "*Captured stdout call*", "hello stdout B", "*Captured stderr call*", "hello stderr B", "*__ test __*", "*Captured stdout call*", "start test", "end test", ] ) def test_no_capture(self, testdir): self.create_file(testdir) result = testdir.runpytest("-s") result.stdout.fnmatch_lines( [ "start test", "hello stdout A", "uhello stdout B", "uend test", "*__ test (i='A') __*", "*__ test (i='B') __*", "*__ test __*", ] ) result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) def test_capture_with_fixture(self, testdir, fixture): testdir.makepyfile( r""" import sys def test(subtests, {fixture}): print('start test') with subtests.test(i='A'): print("hello stdout A") print("hello stderr A", file=sys.stderr) out, err = {fixture}.readouterr() assert out == 'start test\nhello stdout A\n' assert err == 'hello stderr A\n' """.format( fixture=fixture ) ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "*1 passed*", ] ) class TestDebugging: """Check --pdb support for subtests fixture and TestCase.subTest.""" class _FakePdb: """ Fake debugger class implementation that tracks which methods were called on it. """ quitting = False calls = [] def __init__(self, *args, **kwargs): self.calls.append("init") def reset(self): self.calls.append("reset") def interaction(self, *args): self.calls.append("interaction") @pytest.fixture(autouse=True) def cleanup_calls(self): self._FakePdb.calls.clear() def test_pdb_fixture(self, testdir, monkeypatch): testdir.makepyfile( """ def test(subtests): with subtests.test(): assert 0 """ ) self.runpytest_and_check_pdb(testdir, monkeypatch) def test_pdb_unittest(self, testdir, monkeypatch): testdir.makepyfile( """ from unittest import TestCase class Test(TestCase): def test(self): with self.subTest(): assert 0 """ ) self.runpytest_and_check_pdb(testdir, monkeypatch) def runpytest_and_check_pdb(self, testdir, monkeypatch): # Install the fake pdb implementation in pytest_subtests so we can reference # it in the command line (any module would do). import pytest_subtests monkeypatch.setattr(pytest_subtests, "_CustomPdb", self._FakePdb, raising=False) result = testdir.runpytest("--pdb", "--pdbcls=pytest_subtests:_CustomPdb") # Ensure pytest entered in debugging mode when encountering the failing # assert. result.stdout.fnmatch_lines("*entering PDB*") assert self._FakePdb.calls == ["init", "reset", "interaction"] pytest-subtests-0.6.0/tox.ini000066400000000000000000000002501417055345400162370ustar00rootroot00000000000000[tox] envlist = py36,py37,py38,py39,py310 [testenv] passenv = USER USERNAME TRAVIS PYTEST_ADDOPTS deps = pytest-xdist>=1.28 commands = pytest {posargs:tests}