pax_global_header00006660000000000000000000000064143302517130014511gustar00rootroot0000000000000052 comment=250ec45f5d72eca78ab0ea2d1571667d2c51f8b5 pytest-trio-0.8.0/000077500000000000000000000000001433025171300140215ustar00rootroot00000000000000pytest-trio-0.8.0/.coveragerc000066400000000000000000000001651433025171300161440ustar00rootroot00000000000000[run] branch=True source=pytest_trio [report] precision = 1 exclude_lines = pragma: no cover abc.abstractmethod pytest-trio-0.8.0/.github/000077500000000000000000000000001433025171300153615ustar00rootroot00000000000000pytest-trio-0.8.0/.github/workflows/000077500000000000000000000000001433025171300174165ustar00rootroot00000000000000pytest-trio-0.8.0/.github/workflows/ci.yml000066400000000000000000000046601433025171300205420ustar00rootroot00000000000000name: CI on: push: branches-ignore: - "dependabot/**" pull_request: jobs: Windows: name: 'Windows (${{ matrix.python }})' runs-on: 'windows-latest' strategy: fail-fast: false matrix: python: ['3.7', '3.8', '3.9', '3.10', '3.11'] steps: - name: Checkout uses: actions/checkout@v2 - name: Setup python uses: actions/setup-python@v2 with: python-version: '${{ matrix.python }}' - name: Run tests run: ./ci.sh shell: bash env: # Should match 'name:' up above JOB_NAME: 'Windows (${{ matrix.python }})' Ubuntu: name: 'Ubuntu (${{ matrix.python }}${{ matrix.extra_name }})' timeout-minutes: 10 runs-on: 'ubuntu-latest' strategy: fail-fast: false matrix: python: ['pypy-3.7', '3.7', 'pypy-3.8', '3.8', 'pypy-3.9', '3.9', '3.10', '3.11'] check_formatting: ['0'] check_docs: ['0'] extra_name: [''] include: - python: '3.9' check_formatting: '1' extra_name: ', check formatting' - python: '3.9' check_docs: '1' extra_name: ', check docs' steps: - name: Checkout uses: actions/checkout@v2 - name: Setup python uses: actions/setup-python@v2 if: "!endsWith(matrix.python, '-dev')" with: python-version: '${{ matrix.python }}' - name: Setup python (dev) uses: deadsnakes/action@v2.0.2 if: endsWith(matrix.python, '-dev') with: python-version: '${{ matrix.python }}' - name: Run tests run: ./ci.sh env: CHECK_FORMATTING: '${{ matrix.check_formatting }}' CHECK_DOCS: '${{ matrix.check_docs }}' # Should match 'name:' up above JOB_NAME: 'Ubuntu (${{ matrix.python }}${{ matrix.extra_name }})' macOS: name: 'macOS (${{ matrix.python }})' timeout-minutes: 10 runs-on: 'macos-latest' strategy: fail-fast: false matrix: python: ['3.7', '3.8', '3.9', '3.10', '3.11'] steps: - name: Checkout uses: actions/checkout@v2 - name: Setup python uses: actions/setup-python@v2 with: python-version: '${{ matrix.python }}' - name: Run tests run: ./ci.sh env: # Should match 'name:' up above JOB_NAME: 'macOS (${{ matrix.python }})' pytest-trio-0.8.0/.gitignore000066400000000000000000000011221433025171300160050ustar00rootroot00000000000000# Add any project-specific files here: # Sphinx docs docs/build/ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *~ \#* .#* # C extensions *.so # Distribution / packaging .Python /build/ /develop-eggs/ /dist/ /eggs/ /lib/ /lib64/ /parts/ /sdist/ /var/ *.egg-info/ .installed.cfg *.egg # Installer logs pip-log.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml .hypothesis/ .pytest_cache/ # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject # Rope .ropeproject # Django stuff: *.log *.pot pytest-trio-0.8.0/.readthedocs.yml000066400000000000000000000003271433025171300171110ustar00rootroot00000000000000# https://docs.readthedocs.io/en/latest/config-file/index.html version: 2 formats: - htmlzip - epub python: version: 3.7 install: - requirements: docs-requirements.txt sphinx: fail_on_warning: true pytest-trio-0.8.0/CHEATSHEET.rst000066400000000000000000000012161433025171300161700ustar00rootroot00000000000000Tips ==== To run tests ------------ * Install requirements: ``pip install -r test-requirements.txt`` (possibly in a virtualenv) * Actually run the tests: ``pytest pytest_trio`` * Format the code with ``black .`` To make a release ----------------- * Update the version in ``pytest_trio/_version.py`` * Run ``towncrier`` to collect your release notes. * Review your release notes. * Check everything in. * Double-check it all works, docs build, etc. * Build your sdist and wheel: ``python setup.py sdist bdist_wheel`` * Upload to PyPI: ``twine upload dist/*`` * Use ``git tag`` to tag your version. * Don't forget to ``git push --tags``. pytest-trio-0.8.0/CODE_OF_CONDUCT.md000066400000000000000000000001661433025171300166230ustar00rootroot00000000000000The Trio code of conduct applies to this project. See: https://trio.readthedocs.io/en/latest/code-of-conduct.html pytest-trio-0.8.0/CONTRIBUTING.md000066400000000000000000000002041433025171300162460ustar00rootroot00000000000000This is an official Trio project. For the Trio contributing guide, see: https://trio.readthedocs.io/en/latest/contributing.html pytest-trio-0.8.0/LICENSE000066400000000000000000000002711433025171300150260ustar00rootroot00000000000000This software is made available under the terms of *either* of the licenses found in LICENSE.APACHE2 or LICENSE.MIT. Contributions to are made under the terms of *both* these licenses. pytest-trio-0.8.0/LICENSE.APACHE2000066400000000000000000000261361433025171300160400ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pytest-trio-0.8.0/LICENSE.MIT000066400000000000000000000020261433025171300154560ustar00rootroot00000000000000The MIT License (MIT) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-trio-0.8.0/MANIFEST.in000066400000000000000000000002461433025171300155610ustar00rootroot00000000000000include README.rst CHEATSHEET.rst LICENSE* CODE_OF_CONDUCT* CONTRIBUTING* include .coveragerc include test-requirements.txt recursive-include docs * prune docs/build pytest-trio-0.8.0/README.rst000066400000000000000000000030431433025171300155100ustar00rootroot00000000000000pytest-trio =========== .. image:: https://img.shields.io/badge/chat-join%20now-blue.svg :target: https://gitter.im/python-trio/general :alt: Join chatroom .. image:: https://img.shields.io/badge/docs-read%20now-blue.svg :target: https://pytest-trio.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status .. image:: https://img.shields.io/pypi/v/pytest-trio.svg :target: https://pypi.org/project/pytest-trio :alt: Latest PyPi version .. image:: https://github.com/python-trio/pytest-trio/workflows/CI/badge.svg?branch=master :target: https://github.com/python-trio/pytest-trio/actions :alt: Automated test status .. image:: https://codecov.io/gh/python-trio/pytest-trio/branch/master/graph/badge.svg :target: https://codecov.io/gh/python-trio/pytest-trio :alt: Test coverage This is a pytest plugin to help you test projects that use `Trio `__, a friendly library for concurrency and async I/O in Python. For an overview of features, see our `manual `__, or jump straight to the `quickstart guide `__. Vital statistics ---------------- **Documentation:** https://pytest-trio.readthedocs.io **Bug tracker and source code:** https://github.com/python-trio/pytest-trio **License:** MIT or Apache 2, your choice. **Code of conduct:** Contributors are requested to follow our `code of conduct `__ in all project spaces. pytest-trio-0.8.0/ci.sh000077500000000000000000000051251433025171300147560ustar00rootroot00000000000000#!/bin/bash set -ex -o pipefail # Log some general info about the environment uname -a env | sort # Curl's built-in retry system is not very robust; it gives up on lots of # network errors that we want to retry on. Wget might work better, but it's # not installed on azure pipelines's windows boxes. So... let's try some good # old-fashioned brute force. (This is also a convenient place to put options # we always want, like -f to tell curl to give an error if the server sends an # error response, and -L to follow redirects.) function curl-harder() { for BACKOFF in 0 1 2 4 8 15 15 15 15; do sleep $BACKOFF if curl -fL --connect-timeout 5 "$@"; then return 0 fi done return 1 } python -m pip install -U pip setuptools wheel python -m pip --version python setup.py sdist --formats=zip python -m pip install dist/*.zip if [ "$CHECK_FORMATTING" = "1" ]; then pip install black if ! black --check . ; then cat <= 1.6.1 sphinx_rtd_theme sphinxcontrib-trio # Workaround for this weird issue: # https://travis-ci.org/python-trio/pytest-trio/jobs/407495415 attrs >= 17.4.0 # != 19.9.0 for https://github.com/twisted/towncrier/issues/180 # != 21.3.0 for https://github.com/twisted/towncrier/issues/346 towncrier != 19.9.0,!= 21.3.0 # pytest-trio's own dependencies trio >= 0.22.0 outcome >= 1.1.0 pytest >= 7.2.0 pytest-trio-0.8.0/docs-requirements.txt000066400000000000000000000042641433025171300202410ustar00rootroot00000000000000# # This file is autogenerated by pip-compile with python 3.8 # To update, run: # # pip-compile docs-requirements.in # alabaster==0.7.12 # via sphinx async-generator==1.10 # via trio attrs==22.1.0 # via # -r docs-requirements.in # outcome # pytest # trio babel==2.10.3 # via sphinx certifi==2022.9.24 # via requests charset-normalizer==2.1.1 # via requests click==8.1.3 # via # click-default-group # towncrier click-default-group==1.2.2 # via towncrier docutils==0.17.1 # via # sphinx # sphinx-rtd-theme exceptiongroup==1.0.0 # via # pytest # trio idna==3.4 # via # requests # trio imagesize==1.4.1 # via sphinx importlib-metadata==5.0.0 # via sphinx incremental==22.10.0 # via towncrier iniconfig==1.1.1 # via pytest jinja2==3.1.2 # via # sphinx # towncrier markupsafe==2.1.1 # via jinja2 outcome==1.2.0 # via # -r docs-requirements.in # trio packaging==21.3 # via # pytest # sphinx pluggy==1.0.0 # via pytest pygments==2.13.0 # via sphinx pyparsing==3.0.9 # via packaging pytest==7.2.0 # via -r docs-requirements.in pytz==2022.5 # via babel requests==2.28.1 # via sphinx sniffio==1.3.0 # via trio snowballstemmer==2.2.0 # via sphinx sortedcontainers==2.4.0 # via trio sphinx==5.3.0 # via # -r docs-requirements.in # sphinx-rtd-theme # sphinxcontrib-trio sphinx-rtd-theme==1.0.0 # via -r docs-requirements.in sphinxcontrib-applehelp==1.0.2 # via sphinx sphinxcontrib-devhelp==1.0.2 # via sphinx sphinxcontrib-htmlhelp==2.0.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx sphinxcontrib-qthelp==1.0.3 # via sphinx sphinxcontrib-serializinghtml==1.1.5 # via sphinx sphinxcontrib-trio==1.1.2 # via -r docs-requirements.in tomli==2.0.1 # via # pytest # towncrier towncrier==22.8.0 # via -r docs-requirements.in trio==0.22.0 # via -r docs-requirements.in urllib3==1.26.12 # via requests zipp==3.10.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools pytest-trio-0.8.0/docs/000077500000000000000000000000001433025171300147515ustar00rootroot00000000000000pytest-trio-0.8.0/docs/Makefile000066400000000000000000000011451433025171300164120ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = pytest-trio SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) pytest-trio-0.8.0/docs/make.bat000066400000000000000000000014171433025171300163610ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build set SPHINXPROJ=pytest-trio if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd pytest-trio-0.8.0/docs/source/000077500000000000000000000000001433025171300162515ustar00rootroot00000000000000pytest-trio-0.8.0/docs/source/_static/000077500000000000000000000000001433025171300176775ustar00rootroot00000000000000pytest-trio-0.8.0/docs/source/_static/.gitkeep000066400000000000000000000000001433025171300213160ustar00rootroot00000000000000pytest-trio-0.8.0/docs/source/_static/favicon-32.png000066400000000000000000000034521433025171300222600ustar00rootroot00000000000000PNG  IHDR szzsBIT|d pHYstEXtSoftwarewww.inkscape.org<IDATXV{Tsڀ QH.oX`KK[QY1bm) D &҅-,-k Qm-m!XRE?fΝaP w9wE'_oTО%[&Y>'ɓ$Zgo%YI<|=H25{Q/ YՏ(^L,@fU`%{'¸VmoPf7JBzAlwyP mrɗ fpR8@ᅖ1 5G[KK;t&arR[Ϭ.b!m>gv^\zs2DYͩT7~`uLf[ (#9 -_! jPu+>|dsHf~l.p^ROq:͈TlBHS%Sf{N-Vn4'?]bМ- \e '&+ԒmeVA8CF/ R 8Ehdp?GދZk"\aHzkvupfvG;ۧ9gck唷 .ύEtNCfwJ9ݭێŮJIC &uʋw *tJ܅V]exfd$uMOSWxR>` zN5MdPQ溓|t3$A.S#oꞁ6_lT) IϕL_0rIw(4cނ ^9BtARTDf"w)^(@_|lD.34>6B?Ⱥ8??vɐ'(+-q˧7'Ai@K\U  ހ]>> @=reu >=U<PiR @DAVm9}C4̃Kĵt _H@n/`"tښ.rl$4|>]@9Acr6#xW6G*,o2:b1Y^L\`. 2/vgdz*e}4@Q822;!3o:3Kj8R $ao<y;I!˵xfYKx&,z&6ܿ @s)-01hlQNYgXYpqƃ4]l7Y0o.0xa%_윍˝Lh7w}grw:qG;,?08hVÞ?.o=6y$4 u ۚowSgһ7MŌOum R~[pʛ厡ʼ ѡM[ou%E9=[;oܾkNt!"?moU5hycRyIENDB`pytest-trio-0.8.0/docs/source/_static/favicon.svg000066400000000000000000000233521433025171300220520ustar00rootroot00000000000000 image/svg+xml pytest-trio-0.8.0/docs/source/_static/hackrtd.css000066400000000000000000000065561433025171300220450ustar00rootroot00000000000000/* Temporary hack to work around bug in rtd theme 2.0 through 2.4 See https://github.com/rtfd/sphinx_rtd_theme/pull/382 */ pre { line-height: normal !important; } /* Make .. deprecation:: blocks visible * (by default they're entirely unstyled) */ .deprecated { background-color: #ffe13b; } /* Add a snakey triskelion ornament to
* https://stackoverflow.com/questions/8862344/css-hr-with-ornament/18541258#18541258 * but only do it to
s in the content box, b/c the RTD popup control panel * thingummy also has an
in it, and putting the ornament on that looks * *really weird*. (In particular, the background color is wrong.) */ .rst-content hr:after { /* This .svg gets displayed on top of the middle of the hrule. It has a box * behind the logo that's colored to match the RTD theme body background * color (#fcfcfc), which hides the middle part of the hrule to make it * look like there's a gap in it. The size of the box determines the size * of the gap. */ background: url('ornament.svg') no-repeat top center; background-size: contain; content: ""; display: block; height: 30px; position: relative; top: -15px; } /* Hacks to make the upper-left logo area look nicer */ .wy-side-nav-search { /* Lighter background color to match logo */ background-color: #d2e7fa !important; } .wy-side-nav-search > a { color: #306998 !important; } .wy-side-nav-search > a.logo { display: block !important; padding-bottom: 0.809em !important; } .wy-side-nav-search > a img.logo { display: inline !important; padding: 20 !important; } .trio-version { display: inline; /* I *cannot* figure out how to get the version text vertically centered on the logo. Oh well... height: 32px; line-height: 32px; */ } .wy-side-nav-search > a { /* Mostly this is just to simplify things, so we don't have margin/padding * on both the and the inside it */ margin: 0 !important; padding: 0 !important; } /* Get rid of the weird super dark "Contents" label that wastes vertical space */ .wy-menu-vertical > p.caption { display: none !important; } /* I do not like RTD's use of Roboto Slab for headlines. So force it back to * Lato (or whatever fallback it's using if Lato isn't available for some * reason). I also experimented with using Montserrat to be extra obnoxiously * on brand, but honestly you couldn't really tell so there wasn't much point * in adding page weight for that, and this is going to match the body text * better. (Montserrat for body text *definitely* didn't look good, alas.) */ h1, h2, h3, h4, h5, h6, legend, .rst-content .toctree-wrapper p.caption { font-family: inherit !important; } /* Get rid of the horrible red for literal content */ .rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { color: #222 !important; } /* Style the "Need help?" text just underneath the search box */ .trio-help-hint { line-height: normal; margin-bottom: 0; /* font-size: 12px; */ font-size: 80%; /* matches the "Search docs" box */ padding-top: 6px; color: #306998; text-align: center; } a.trio-help-hint, .trio-help-hint a:link, .trio-help-hint a:visited { color: inherit; /* Like text-decoration: underline, but with a thinner line */ text-decoration: none; border-bottom: 1px solid; } pytest-trio-0.8.0/docs/source/_static/ornament.svg000066400000000000000000000252171433025171300222520ustar00rootroot00000000000000 image/svg+xml pytest-trio-0.8.0/docs/source/_templates/000077500000000000000000000000001433025171300204065ustar00rootroot00000000000000pytest-trio-0.8.0/docs/source/_templates/layout.html000066400000000000000000000016131433025171300226120ustar00rootroot00000000000000{# https://stackoverflow.com/questions/25243482/how-to-add-sphinx-generated-index-to-the-sidebar-when-using-read-the-docs-theme #} {% extends "!layout.html" %} {% block sidebartitle %} {{ project }} {%- set nav_version = version %} {% if READTHEDOCS and current_version %} {%- set nav_version = current_version %} {% endif %} {# don't show the version on RTD if it's the default #} {% if nav_version != 'latest' %}
{{ nav_version }}
{% endif %}
{% include "searchbox.html" %}

Need help? Live chat, forum, StackOverflow.

{% endblock %} pytest-trio-0.8.0/docs/source/conf.py000066400000000000000000000154071433025171300175570ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Documentation build configuration file, created by # sphinx-quickstart on Sat Jan 21 19:11:14 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys # So autodoc can import our package sys.path.insert(0, os.path.abspath("../..")) # https://docs.readthedocs.io/en/stable/builds.html#build-environment if "READTHEDOCS" in os.environ: import glob if glob.glob("../../newsfragments/*.*.rst"): print("-- Found newsfragments; running towncrier --", flush=True) import subprocess subprocess.run( ["towncrier", "--yes", "--date", "not released yet"], cwd="../..", check=True, ) # Warn about all references to unknown targets nitpicky = True # Except for these ones, which we expect to point to unknown targets: nitpick_ignore = [ # Format is ("sphinx reference type", "string"), e.g.: ("py:obj", "bytes-like"), ] # XX hack the RTD theme until # https://github.com/rtfd/sphinx_rtd_theme/pull/382 # is shipped (should be in the release after 0.2.4) # ...note that this has since grown to contain a bunch of other CSS hacks too # though. def setup(app): app.add_css_file("hackrtd.css") # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinxcontrib_trio", ] intersphinx_mapping = { "python": ("https://docs.python.org/3", None), "trio": ("https://trio.readthedocs.io/en/stable", None), } autodoc_member_order = "bysource" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "pytest-trio" copyright = "The pytest-trio authors" author = "The pytest-trio authors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import pytest_trio version = pytest_trio.__version__ # The full version, including alpha/beta/rc tags. release = version html_favicon = "_static/favicon-32.png" html_logo = "../../logo/wordmark-transparent.svg" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "default" highlight_language = "python3" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # This avoids a warning by the epub builder that it can't figure out # the MIME type for our favicon. suppress_warnings = ["epub.unknown_project_files"] # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # We have to set this ourselves, not only because it's useful for local # testing, but also because if we don't then RTD will throw away our # html_theme_options. import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { # default is 2 # show deeper nesting in the RTD theme's sidebar TOC # https://stackoverflow.com/questions/27669376/ # I'm not 100% sure this actually does anything with our current # versions/settings... "navigation_depth": 4, "logo_only": False, "prev_next_buttons_location": "both", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "pytest-triodoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "pytest-trio.tex", "pytest-trio Documentation", author, "manual"), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, "pytest-trio", "pytest-trio Documentation", [author], 1), ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "pytest-trio", "pytest-trio Documentation", author, "pytest-trio", "pytest plugin for Trio", "Miscellaneous", ), ] pytest-trio-0.8.0/docs/source/history.rst000066400000000000000000000130711433025171300205060ustar00rootroot00000000000000Release history =============== .. currentmodule:: pytest_trio .. towncrier release notes start pytest-trio 0.8.0 (2022-11-01) ------------------------------ Features ~~~~~~~~ - If a test raises an ``ExceptionGroup`` (or nested ``ExceptionGroup``\ s) with only a single 'leaf' exception from ``pytest.xfail()`` or ``pytest.skip()``\ , we now unwrap it to have the desired effect on Pytest. ``ExceptionGroup``\ s with two or more leaf exceptions, even of the same type, are not changed and will be treated as ordinary test failures. See `pytest-dev/pytest#9680 `__ for design discussion. This feature is particularly useful if you've enabled `the new strict_exception_groups=True option `__. (`#104 `__) Bugfixes ~~~~~~~~ - Fix an issue where if two fixtures are being set up concurrently, and one crashes and the other hangs, then the test as a whole would hang, rather than being cancelled and unwound after the crash. (`#120 `__) Misc ~~~~ - Trio 0.22.0 deprecated ``MultiError`` in favor of the standard-library (or `backported `__) ``ExceptionGroup`` type; ``pytest-trio`` now uses ``ExceptionGroup`` exclusively and therefore requires Trio 0.22.0 or later. (`#128 `__) - Dropped support for end-of-life Python 3.6, and the ``async_generator`` library necessary to support it, and started testing on Python 3.10 and 3.11. (`#129 `__) pytest-trio 0.7.0 (2020-10-15) ------------------------------ Features ~~~~~~~~ - Support added for :ref:`alternative Trio run functions ` via the ``trio_run`` configuration variable and ``@pytest.mark.trio(run=...)``. Presently supports Trio and QTrio. (`#105 `__) Deprecations and Removals ~~~~~~~~~~~~~~~~~~~~~~~~~ - Python 3.5 support removed. (`#96 `__) pytest-trio 0.6.0 (2020-05-20) ---------------------------------- Features ~~~~~~~~ - Incompatible change: if you use ``yield`` inside a Trio fixture, and the ``yield`` gets cancelled (for example, due to a background task crashing), then the ``yield`` will now raise :exc:`trio.Cancelled`. See :ref:`cancel-yield` for details. Also, in this same case, pytest-trio will now reliably mark the test as failed, even if the fixture doesn't go on to raise an exception. (`#75 `__) - Updated for compatibility with Trio v0.15.0. pytest-trio 0.5.2 (2019-02-13) ------------------------------ Features ~~~~~~~~ - pytest-trio now makes the Trio scheduler deterministic while running inside a Hypothesis test. Hopefully you won't see any change, but if you had scheduler-dependent bugs Hypothesis will be more effective now. (`#73 `__) - Updated for compatibility with trio v0.11.0. pytest-trio 0.5.1 (2018-09-28) ------------------------------ Bugfixes ~~~~~~~~ - The pytest 3.8.1 release broke pytest-trio's handling of trio tests defined as class methods. We fixed it again. (`#64 `__) pytest-trio 0.5.0 (2018-08-26) ------------------------------ This is a major release, including a rewrite of large portions of the internals. We believe it should be backwards compatible with existing projects. Major new features include: * "trio mode": no more writing ``@pytest.mark.trio`` everywhere! * it's now safe to use nurseries inside fixtures (`#55 `__) * new ``@trio_fixture`` decorator to explicitly mark a fixture as a trio fixture * a number of easy-to-make mistakes are now caught and raise informative errors * the :data:`nursery` fixture is now 87% more magical For more details, see the manual. Oh right, speaking of which: we finally have a manual! You should read it. pytest-trio 0.4.2 (2018-06-29) ------------------------------ Features ~~~~~~~~ - pytest-trio now integrates with `Hypothesis `_ to support ``@given`` on async tests using Trio. (`#42 `__) pytest-trio 0.4.1 (2018-04-14) ------------------------------ No significant changes. pytest-trio 0.4.0 (2018-04-14) ------------------------------ - Fix compatibility with trio 0.4.0 (`#25 `__) pytest-trio 0.3.0 (2018-01-03) ------------------------------ Features ~~~~~~~~ - Add ``nursery`` fixture and improve teardown handling for yield fixture (`#25 `__) pytest-trio 0.2.0 (2017-12-15) ------------------------------ - Heavy improvements, add async yield fixture, fix bugs, add tests etc. (`#17 `__) Deprecations and Removals ~~~~~~~~~~~~~~~~~~~~~~~~~ - Remove unused_tcp_port{,_factory} fixtures (`#15 `__) pytest-trio 0.1.1 (2017-12-08) ------------------------------ Disable intersphinx for trio (cause crash in CI for the moment due to 404 in readthedoc). pytest-trio 0.1.0 (2017-12-08) ------------------------------ Initial release. pytest-trio-0.8.0/docs/source/index.rst000066400000000000000000000046151433025171300201200ustar00rootroot00000000000000.. documentation master file, created by sphinx-quickstart on Sat Jan 21 19:11:14 2017. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. =================================== pytest-trio: Pytest plugin for trio =================================== This is a pytest plugin to help you test projects that use `Trio `__, a friendly library for concurrency and async I/O in Python. Features include: * Async tests without the boilerplate: just write ``async def test_whatever(): ...``. * Useful fixtures included: use :data:`autojump_clock` for easy testing of code with timeouts, or :data:`nursery` to easily set up background tasks. * Write your own async fixtures: set up an async database connection or start a server inside a fixture, and then use it in your tests. * If you have multiple async fixtures, pytest-trio will even do setup/teardown concurrently whenever possible. (Though honestly, we're not sure whether this is a good idea or not and might remove it in the future. If it makes your tests harder to debug, or conversely provides you with big speedups, `please let us know `__.) * Integration with the fabulous `Hypothesis `__ library, so your async tests can use property-based testing: just use ``@given`` like you're used to. * Support for testing projects that use Trio exclusively and want to use pytest-trio everywhere, and also for testing projects that support multiple async libraries and only want to enable pytest-trio's features for a subset of their test suite. Vital statistics ================ * Install: ``pip install pytest-trio`` * Documentation: https://pytest-trio.readthedocs.io * Issue tracker, source code: https://github.com/python-trio/pytest-trio * License: MIT or Apache 2, your choice * Contributor guide: https://trio.readthedocs.io/en/latest/contributing.html * Code of conduct: Contributors are requested to follow our `code of conduct `__ in all project spaces. .. toctree:: :maxdepth: 2 quickstart.rst reference.rst .. toctree:: :maxdepth: 1 history.rst ==================== Indices and tables ==================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` * :ref:`glossary` pytest-trio-0.8.0/docs/source/quickstart.rst000066400000000000000000000363231433025171300212040ustar00rootroot00000000000000Quickstart ========== Enabling Trio mode and running your first async tests ----------------------------------------------------- .. note:: If you used `cookiecutter-trio `__ to set up your project, then pytest-trio and Trio mode are already configured! You can write ``async def test_whatever(): ...`` and it should just work. Feel free to skip to the next section. Let's make a temporary directory to work in, and write two trivial tests: one that we expect should pass, and one that we expect should fail:: # test_example.py import trio async def test_sleep(): start_time = trio.current_time() await trio.sleep(1) end_time = trio.current_time() assert end_time - start_time >= 1 async def test_should_fail(): assert False If we run this under pytest normally, then the tests are skipped and we get a warning explaining how pytest itself does not directly support async def tests. Note that in versions of pytest prior to v4.4.0 the tests end up being reported as passing with other warnings despite not actually having been properly run. .. code-block:: none $ pytest test_example.py ======================== test session starts ========================= platform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1 rootdir: /tmp collected 2 items test_example.py ss [100%] ========================== warnings summary ========================== test_example.py::test_sleep test_example.py::test_should_fail .../_pytest/python.py:169: PytestUnhandledCoroutineWarning: async def functions are not natively supported and have been skipped. You need to install a suitable plugin for your async framework, for example: - pytest-asyncio - pytest-trio - pytest-tornasync - pytest-twisted warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid))) -- Docs: https://docs.pytest.org/en/stable/warnings.html =================== 2 skipped, 2 warnings in 0.26s =================== Here's the fix: 1. Install pytest-trio: ``pip install pytest-trio`` 2. In your project root, create a file called ``pytest.ini`` with contents: .. code-block:: none [pytest] trio_mode = true And we're done! Let's try running pytest again: .. code-block:: none $ pip install pytest-trio $ cat <pytest.ini [pytest] trio_mode = true EOF $ pytest test_example.py ======================== test session starts ========================= platform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1 rootdir: /tmp, configfile: pytest.ini plugins: trio-0.6.0 collected 2 items test_example.py .F [100%] ============================== FAILURES ============================== __________________________ test_should_fail __________________________ async def test_should_fail(): > assert False E assert False test_example.py:11: AssertionError ====================== short test summary info ======================= FAILED test_example.py::test_should_fail - assert False ==================== 1 failed, 1 passed in 1.23s ===================== Notice that now it says ``plugins: trio``, which means that pytest-trio is installed, and the results make sense: the good test passed, the bad test failed, no warnings, and it took just over 1 second, like we'd expect. Trio's magic autojump clock --------------------------- Tests involving time are often slow and flaky. But we can fix that. Just add the ``autojump_clock`` fixture to your test, and it will run in a mode where Trio's clock is virtualized and deterministic. Essentially, the clock doesn't move, except that whenever all tasks are blocked waiting, it jumps forward until the next time when something will happen:: # Notice the 'autojump_clock' argument: that's all it takes! async def test_sleep_efficiently_and_reliably(autojump_clock): start_time = trio.current_time() await trio.sleep(1) end_time = trio.current_time() assert end_time - start_time == 1 In the version of this test we saw before that used real time, at the end we had to use a ``>=`` comparison, in order to account for scheduler jitter and so forth. If there were a bug that caused :func:`trio.sleep` to take 10 seconds, our test wouldn't have noticed. But now we're using virtual time, so the call to ``await trio.sleep(1)`` takes *exactly* 1 virtual second, and the ``==`` test will pass every time. Before, we had to wait around for the test to complete; now, it completes essentially instantaneously. (Try it!) And, while here our example is super simple, its integration with Trio's core scheduling logic allows this to work for arbitrarily complex programs (as long as they aren't interacting with the outside world). Async fixtures -------------- We can write async fixtures:: @pytest.fixture async def db_connection(): return await some_async_db_library.connect(...) async def test_example(db_connection): await db_connection.execute("SELECT * FROM ...") If you need to run teardown code, you can use ``yield``, just like a regular pytest fixture:: # DB connection that wraps each test in a transaction and rolls it # back afterwards @pytest.fixture async def rollback_db_connection(): # Setup code connection = await some_async_db_library.connect(...) await connection.execute("START TRANSACTION") # The value of this fixture yield connection # Teardown code, executed after the test is done await connection.execute("ROLLBACK") .. _server-fixture-example: Running a background server from a fixture ------------------------------------------ Here's some code to implement an echo server. It's supposed to take in arbitrary data, and then send it back out again:: async def echo_server_handler(stream): while True: data = await stream.receive_some(1000) if not data: break await stream.send_all(data) # Usage: await trio.serve_tcp(echo_server_handler, ...) Now we need to test it, to make sure it's working correctly. In fact, since this is such complicated and sophisticated code, we're going to write lots of tests for it. And they'll all follow the same basic pattern: we'll start the echo server running in a background task, then connect to it, send it some test data, and see how it responds. Here's a first attempt:: # Let's cross our fingers and hope no-one else is using this port... PORT = 14923 # Don't copy this -- we can do better async def test_attempt_1(): async with trio.open_nursery() as nursery: # Start server running in the background nursery.start_soon( partial(trio.serve_tcp, echo_server_handler, port=PORT) ) # Connect to the server. echo_client = await trio.open_tcp_stream("127.0.0.1", PORT) # Send some test data, and check that it gets echoed back async with echo_client: for test_byte in [b"a", b"b", b"c"]: await echo_client.send_all(test_byte) assert await echo_client.receive_some(1) == test_byte This will mostly work, but it has a few problems. The most obvious one is that when we run it, even if everything works perfectly, it will hang at the end of the test - we never shut down the server, so the nursery block will wait forever for it to exit. To avoid this, we should cancel the nursery at the end of the test: .. code-block:: python3 :emphasize-lines: 7,20,21 # Let's cross our fingers and hope no-one else is using this port... PORT = 14923 # Don't copy this -- we can do better async def test_attempt_2(): async with trio.open_nursery() as nursery: try: # Start server running in the background nursery.start_soon( partial(trio.serve_tcp, echo_server_handler, port=PORT) ) # Connect to the server. echo_client = await trio.open_tcp_stream("127.0.0.1", PORT) # Send some test data, and check that it gets echoed back async with echo_client: for test_byte in [b"a", b"b", b"c"]: await echo_client.send_all(test_byte) assert await echo_client.receive_some(1) == test_byte finally: nursery.cancel_scope.cancel() In fact, this pattern is *so* common, that pytest-trio provides a handy :data:`nursery` fixture to let you skip the boilerplate. Just add ``nursery`` to your test function arguments, and pytest-trio will open a nursery, pass it in to your function, and then cancel it for you afterwards: .. code-block:: python3 :emphasize-lines: 5 # Let's cross our fingers and hope no-one else is using this port... PORT = 14923 # Don't copy this -- we can do better async def test_attempt_3(nursery): # Start server running in the background nursery.start_soon( partial(trio.serve_tcp, echo_server_handler, port=PORT) ) # Connect to the server. echo_client = await trio.open_tcp_stream("127.0.0.1", PORT) # Send some test data, and check that it gets echoed back async with echo_client: for test_byte in [b"a", b"b", b"c"]: await echo_client.send_all(test_byte) assert await echo_client.receive_some(1) == test_byte Next problem: we have a race condition. We spawn a background task to call ``serve_tcp``, and then immediately try to connect to that server. Sometimes this will work fine. But it takes a little while for the server to start up and be ready to accept connections - so other times, randomly, our connection attempt will happen too quickly, and error out. After all - ``nursery.start_soon`` only promises that the task will be started *soon*, not that it has actually happened. So this test will be flaky, and flaky tests are the worst. Fortunately, Trio makes this easy to solve, by switching to using ``await nursery.start(...)``. You can `read its docs for full details `__, but basically the idea is that both ``nursery.start_soon(...)`` and ``await nursery.start(...)`` create background tasks, but only ``start`` waits for the new task to finish getting itself set up. This requires some cooperation from the background task: it has to notify ``nursery.start`` when it's ready. Fortunately, :func:`trio.serve_tcp` already knows how to cooperate with ``nursery.start``, so we can write: .. code-block:: python3 :emphasize-lines: 6-10 # Let's cross our fingers and hope no-one else is using this port... PORT = 14923 # Don't copy this -- we can do better async def test_attempt_4(nursery): # Start server running in the background # AND wait for it to finish starting up before continuing await nursery.start( partial(trio.serve_tcp, echo_server_handler, port=PORT) ) # Connect to the server echo_client = await trio.open_tcp_stream("127.0.0.1", PORT) async with echo_client: for test_byte in [b"a", b"b", b"c"]: await echo_client.send_all(test_byte) assert await echo_client.receive_some(1) == test_byte That solves our race condition. Next issue: hardcoding the port number like this is a bad idea, because port numbers are a machine-wide resource, so if we're unlucky some other program might already be using it. What we really want to do is to tell :func:`~trio.serve_tcp` to pick a random port that no-one else is using. It turns out that this is easy: if you request port 0, then the operating system will pick an unused one for you automatically. Problem solved! But wait... if the operating system is picking the port for us, how do we know which one it picked, so we can connect to it later? Well, there's no way to predict the port ahead of time. But after :func:`~trio.serve_tcp` has opened a port, it can check and see what it got. So we need some way to pass this data back out of :func:`~trio.serve_tcp`. Fortunately, ``nursery.start`` handles this too: it lets the task pass out a piece of data after it has started. And it just so happens that what :func:`~trio.serve_tcp` passes out is a list of :class:`~trio.SocketListener` objects. And there's a handy function called :func:`trio.testing.open_stream_to_socket_listener` that can take a :class:`~trio.SocketListener` and make a connection to it. Putting it all together: .. code-block:: python3 :emphasize-lines: 1,8,13-16 from trio.testing import open_stream_to_socket_listener # Don't copy this -- it finally works, but we can still do better! async def test_attempt_5(nursery): # Start server running in the background # AND wait for it to finish starting up before continuing # AND find out where it's actually listening listeners = await nursery.start( partial(trio.serve_tcp, echo_server_handler, port=0) ) # Connect to the server. # There might be multiple listeners (example: IPv4 and # IPv6), but we don't care which one we connect to, so we # just use the first. echo_client = await open_stream_to_socket_listener(listeners[0]) async with echo_client: for test_byte in [b"a", b"b", b"c"]: await echo_client.send_all(test_byte) assert await echo_client.receive_some(1) == test_byte Now, this works - but there's still a lot of boilerplate. Remember, we need to write lots of tests for this server, and we don't want to have to copy-paste all that stuff into every test. Let's factor out the setup into a fixture:: @pytest.fixture async def echo_client(nursery): listeners = await nursery.start( partial(trio.serve_tcp, echo_server_handler, port=0) ) echo_client = await open_stream_to_socket_listener(listeners[0]) async with echo_client: yield echo_client And now in tests, all we have to do is request the ``echo_client`` fixture, and we get a background server and a client stream connected to it. So here's our complete, final version:: # Final version -- copy this! from functools import partial import pytest import trio from trio.testing import open_stream_to_socket_listener # The code being tested: async def echo_server_handler(stream): while True: data = await stream.receive_some(1000) if not data: break await stream.send_all(data) # The fixture: @pytest.fixture async def echo_client(nursery): listeners = await nursery.start( partial(trio.serve_tcp, echo_server_handler, port=0) ) echo_client = await open_stream_to_socket_listener(listeners[0]) async with echo_client: yield echo_client # A test using the fixture: async def test_final(echo_client): for test_byte in [b"a", b"b", b"c"]: await echo_client.send_all(test_byte) assert await echo_client.receive_some(1) == test_byte No hangs, no race conditions, simple, clean, and reusable. pytest-trio-0.8.0/docs/source/reference.rst000066400000000000000000000353221433025171300207460ustar00rootroot00000000000000Reference ========= Trio mode --------- Most users will want to enable "Trio mode". Without Trio mode: * Pytest-trio only handles tests that have been decorated with ``@pytest.mark.trio`` * Pytest-trio only handles fixtures if they're async *and* used by a test that's decorated with ``@pytest.mark.trio``, or if they're decorated with ``@pytest_trio.trio_fixture`` (instead of ``@pytest.fixture``). When Trio mode is enabled, two extra things happen: * Async tests automatically have the ``trio`` mark added, so you don't have to do it yourself. * Async fixtures using ``@pytest.fixture`` automatically get converted to Trio fixtures. (The main effect of this is that it helps you catch mistakes like using an async fixture with a non-async test.) There are two ways to enable Trio mode. The first option is to **use a pytest configuration file**. The exact rules for how pytest finds configuration files are `a bit complicated `__, but you want to end up with something like: .. code-block:: ini # pytest.ini [pytest] trio_mode = true The second option is **use a conftest.py file**. Inside your tests directory, create a file called ``conftest.py``, with the following contents:: # conftest.py from pytest_trio.enable_trio_mode import * This does exactly the same thing as setting ``trio_mode = true`` in ``pytest.ini``, except for two things: * Some people like to ship their tests as part of their library, so they (or their users) can test the final installed software by running ``pytest --pyargs PACKAGENAME``. In this mode, ``pytest.ini`` files don't work, but ``conftest.py`` files do. * Enabling Trio mode in ``pytest.ini`` always enables it globally for your entire testsuite. Enabling it in ``conftest.py`` only enables it for test files that are in the same directory as the ``conftest.py``, or its subdirectories. If you have software that uses multiple async libraries, then you can use ``conftest.py`` to enable Trio mode for just the part of your testsuite that uses Trio; or, if you need even finer-grained control, you can leave Trio mode disabled and use ``@pytest.mark.trio`` explicitly on all your Trio tests. Trio fixtures ------------- Normally, pytest runs fixture code before starting the test, and teardown code afterwards. For technical reasons, we can't wrap this whole process in :func:`trio.run` – only the test itself. As a workaround, pytest-trio introduces the concept of a "Trio fixture", which acts like a normal fixture for most purposes, but actually does the setup and teardown inside the test's call to :func:`trio.run`. The following fixtures are treated as Trio fixtures: * Any function decorated with ``@pytest_trio.trio_fixture``. * Any async function decorated with ``@pytest.fixture``, *if* Trio mode is enabled *or* this fixture is being requested by a Trio test. * Any fixture which depends on a Trio fixture. The most notable difference between regular fixtures and Trio fixtures is that regular fixtures can't use Trio APIs, but Trio fixtures can. Most of the time you don't need to worry about this, because you normally only call Trio APIs from async functions, and when Trio mode is enabled, all async fixtures are automatically Trio fixtures. However, if for some reason you do want to use Trio APIs from a synchronous fixture, then you'll have to use ``@pytest_trio.trio_fixture``:: # This fixture is not very useful # But it is an example where @pytest.fixture doesn't work @pytest_trio.trio_fixture def trio_time(): return trio.current_time() Only Trio tests can use Trio fixtures. If you have a regular (synchronous) test that tries to use a Trio fixture, then that's an error. And finally, regular fixtures can be `scoped to the test, class, module, or session `__, but Trio fixtures **must be test scoped**. Class, module, and session scope are not supported. .. _cancel-yield: An important note about ``yield`` fixtures ------------------------------------------ Like any pytest fixture, Trio fixtures can contain both setup and teardown code separated by a ``yield``:: @pytest.fixture async def my_fixture(): ... setup code ... yield ... teardown code ... When pytest-trio executes this fixture, it creates a new task, and runs the setup code until it reaches the ``yield``. Then the fixture's task goes to sleep. Once the test has finished, the fixture task wakes up again and resumes at the ``yield``, so it can execute the teardown code. So the ``yield`` in a fixture is sort of like calling ``await wait_for_test_to_finish()``. And in Trio, any ``await``\-able operation can be cancelled. For example, we could put a timeout on the ``yield``:: @pytest.fixture async def my_fixture(): ... setup code ... with trio.move_on_after(5): yield # this yield gets cancelled after 5 seconds ... teardown code ... Now if the test takes more than 5 seconds to execute, this fixture will cancel the ``yield``. That's kind of a strange thing to do, but there's another version of this that's extremely common. Suppose your fixture spawns a background task, and then the background task raises an exception. Whenever a background task raises an exception, it automatically cancels everything inside the nursery's scope – which includes our ``yield``:: @pytest.fixture async def my_fixture(nursery): nursery.start_soon(function_that_raises_exception) yield # this yield gets cancelled after the background task crashes ... teardown code ... If you use fixtures with background tasks, you'll probably end up cancelling one of these ``yield``\s sooner or later. So what happens if the ``yield`` gets cancelled? First, pytest-trio assumes that something has gone wrong and there's no point in continuing the test. If the top-level test function is running, then it cancels it. Then, pytest-trio waits for the test function to finish, and then begins tearing down fixtures as normal. During this teardown process, it will eventually reach the fixture that cancelled its ``yield``. This fixture gets resumed to execute its teardown logic, but with a special twist: since the ``yield`` was cancelled, the ``yield`` raises :exc:`trio.Cancelled`. Now, here's the punchline: this means that in our examples above, the teardown code might not be executed at all! **This is different from how pytest fixtures normally work.** Normally, the ``yield`` in a pytest fixture never raises an exception, so you can be certain that any code you put after it will execute as normal. But if you have a fixture with background tasks, and they crash, then your ``yield`` might raise an exception, and Python will skip executing the code after the ``yield``. In our experience, most fixtures are fine with this, and it prevents some `weird problems `__ that can happen otherwise. But it's something to be aware of. If you have a fixture where the ``yield`` might be cancelled but you still need to run teardown code, then you can use a ``finally`` block:: @pytest.fixture async def my_fixture(nursery): nursery.start_soon(function_that_crashes) try: # This yield could be cancelled... yield finally: # But this code will run anyway ... teardown code ... (But, watch out: the teardown code is still running in a cancelled context, so if it has any ``await``\s it could raise :exc:`trio.Cancelled` again.) Or if you use ``with`` to handle teardown, then you don't have to worry about this because ``with`` blocks always perform cleanup even if there's an exception:: @pytest.fixture async def my_fixture(nursery): with get_obj_that_must_be_torn_down() as obj: nursery.start_soon(function_that_crashes, obj) # This could raise trio.Cancelled... # ...but that's OK, the 'with' block will still tear down 'obj' yield obj Concurrent setup/teardown ------------------------- If your test uses multiple fixtures, then for speed, pytest-trio will try to run their setup and teardown code concurrently whenever this is possible while respecting the fixture dependencies. Here's an example, where a test depends on ``fix_b`` and ``fix_c``, and these both depend on ``fix_a``:: @trio_fixture def fix_a(): ... @trio_fixture def fix_b(fix_a): ... @trio_fixture def fix_c(fix_a): ... @pytest.mark.trio async def test_example(fix_b, fix_c): ... When running ``test_example``, pytest-trio will perform the following sequence of actions: 1. Set up ``fix_a`` 2. Set up ``fix_b`` and ``fix_c``, concurrently. 3. Run the test. 4. Tear down ``fix_b`` and ``fix_c``, concurrently. 5. Tear down ``fix_a``. We're `seeking feedback `__ on whether this feature's benefits outweigh its negatives. Handling of ContextVars ----------------------- The :mod:`contextvars` module lets you create :class:`~contextvars.ContextVar` objects to represent task-local variables. Normally, in Trio, each task gets its own :class:`~contextvars.Context`, so that changes to :class:`~contextvars.ContextVar` objects are only visible inside the task that performs them. But pytest-trio overrides this, and for each test it uses a single :class:`~contextvars.Context` which is shared by all fixtures and the test function itself. The benefit of this is that you can set :class:`~contextvars.ContextVar` values inside a fixture, and your settings will be visible in dependent fixtures and the test itself. For example, `trio-asyncio `__ uses a :class:`~contextvars.ContextVar` to hold the current asyncio loop object, so this lets you open a loop inside a fixture and then use it inside other fixtures or the test itself. The downside is that if two fixtures are run concurrently (see previous section), and both mutate the same :class:`~contextvars.ContextVar`, then there will be a race condition and the the final value will be unpredictable. If you make one fixture depend on the other, then this will force an ordering and make the final value predictable again. Built-in fixtures ----------------- These fixtures are automatically available to any code using pytest-trio. .. data:: autojump_clock A :class:`trio.testing.MockClock`, configured with ``rate=0, autojump_threshold=0``. .. data:: mock_clock A :class:`trio.testing.MockClock`, with its default configuration (``rate=0, autojump_threshold=inf``). What makes these particularly useful is that whenever pytest-trio runs a test, it checks the fixtures to see if one of them is a :class:`trio.abc.Clock` object. If so, it passes that object to :func:`trio.run`. So if your test requests one of these fixtures, it automatically uses that clock. If you implement your own :class:`~trio.abc.Clock`, and implement a fixture that returns it, then it will work the same way. Of course, like any pytest fixture, you also get the actual object available. For example, you can call :meth:`~trio.testing.MockClock.jump`:: async def test_time_travel(mock_clock): assert trio.current_time() == 0 mock_clock.jump(10) assert trio.current_time() == 10 .. data:: nursery A nursery created and managed by pytest-trio itself, which surrounds the test/fixture that requested it, and is automatically cancelled after the test/fixture completes. Basically, these are equivalent:: # Boring way async def test_with_background_task(): async with trio.open_nursery() as nursery: try: ... finally: nursery.cancel_scope.cancel() # Fancy way async def test_with_background_task(nursery): ... For a fixture, the cancellation always happens after the fixture completes its teardown phase. (Or if it doesn't have a teardown phase, then the cancellation happens after the teardown phase *would* have happened.) This fixture is even more magical than most pytest fixtures, because if it gets requested several times within the same test, then it creates multiple nurseries, one for each fixture/test that requested it. See :ref:`server-fixture-example` for an example of how this can be used. Integration with the Hypothesis library --------------------------------------- There isn't too much to say here, since the obvious thing just works:: from hypothesis import given import hypothesis.strategies as st @given(st.binary()) async def test_trio_and_hypothesis(data): ... Under the hood, this requires some coordination between Hypothesis and pytest-trio. Hypothesis runs your test multiple times with different examples of random data. For each example, pytest-trio calls :func:`trio.run` again (so you get a fresh clean Trio environment), sets up any Trio fixtures, runs the actual test, and then tears down any Trio fixtures. Notice that this is a bit different than regular pytest fixtures, which are `instantiated once and then re-used for all `__. Most of the time this shouldn't matter (and `is probably what you want anyway `__), but in some unusual cases it could surprise you. And this only applies to Trio fixtures – if a Trio test uses a mix of regular fixtures and Trio fixtures, then the regular fixtures will be reused, while the Trio fixtures will be repeatedly reinstantiated. Also, pytest-trio only handles ``@given``\-based tests. If you want to write `stateful tests `__ for Trio-based libraries, then check out `hypothesis-trio `__. .. _trio-run-config: Using alternative Trio runners ------------------------------ If you are working with a library that provides integration with Trio, such as via :ref:`guest mode `, it can be used with pytest-trio as well. Setting ``trio_run`` in the pytest configuration makes your choice the global default for both tests explicitly marked with ``@pytest.mark.trio`` and those automatically marked by Trio mode. ``trio_run`` presently supports ``trio`` and ``qtrio``. .. code-block:: ini # pytest.ini [pytest] trio_mode = true trio_run = qtrio .. code-block:: python import pytest @pytest.mark.trio async def test(): assert True If you want more granular control or need to use a specific function, it can be passed directly to the marker. .. code-block:: python import pytest @pytest.mark.trio(run=qtrio.run) async def test(): assert True pytest-trio-0.8.0/logo/000077500000000000000000000000001433025171300147615ustar00rootroot00000000000000pytest-trio-0.8.0/logo/wordmark-transparent.svg000066400000000000000000000260041433025171300216710ustar00rootroot00000000000000 image/svg+xml pytest-trio-0.8.0/newsfragments/000077500000000000000000000000001433025171300167045ustar00rootroot00000000000000pytest-trio-0.8.0/newsfragments/.gitkeep000066400000000000000000000000001433025171300203230ustar00rootroot00000000000000pytest-trio-0.8.0/newsfragments/README.rst000066400000000000000000000025611433025171300203770ustar00rootroot00000000000000Adding newsfragments ==================== This directory collects "newsfragments": short files that each contain a snippet of ReST-formatted text that will be added to the next release notes. This should be a description of aspects of the change (if any) that are relevant to users. (This contrasts with your commit message and PR description, which are a description of the change as relevant to people working on the code itself.) Each file should be named like ``..rst``, where ```` is an issue numbers, and ```` is one of: * ``feature`` * ``bugfix`` * ``doc`` * ``removal`` * ``misc`` So for example: ``123.feature.rst``, ``456.bugfix.rst`` If your PR fixes an issue, use that number here. If there is no issue, then after you submit the PR and get the PR number you can add a newsfragment using that instead. Note that the ``towncrier`` tool will automatically reflow your text, so don't try to do any fancy formatting. You can install ``towncrier`` and then run ``towncrier --draft`` if you want to get a preview of how your change will look in the final release notes. Making releases =============== ``pip install towncrier``, then run ``towncrier``. (You can use ``towncrier --draft`` to get a preview of what this will do.) You can configure ``towncrier`` (for example: customizing the different types of changes) by modifying ``pyproject.toml``. pytest-trio-0.8.0/pyproject.toml000066400000000000000000000004311433025171300167330ustar00rootroot00000000000000[tool.towncrier] package = "pytest_trio" filename = "docs/source/history.rst" directory = "newsfragments" title_format = "pytest-trio {version} ({project_date})" underlines = ["-", "~", "^"] issue_format = "`#{issue} `__" pytest-trio-0.8.0/pytest.ini000066400000000000000000000003531433025171300160530ustar00rootroot00000000000000[pytest] addopts = -ra -v --pyargs pytest_trio --verbose --cov filterwarnings = error default::pytest.PytestAssertRewriteWarning default::pytest.PytestDeprecationWarning default::pytest.PytestUnraisableExceptionWarning pytest-trio-0.8.0/pytest_trio/000077500000000000000000000000001433025171300164065ustar00rootroot00000000000000pytest-trio-0.8.0/pytest_trio/__init__.py000066400000000000000000000002111433025171300205110ustar00rootroot00000000000000"""Top-level package for pytest-trio.""" from ._version import __version__ from .plugin import trio_fixture __all__ = ["trio_fixture"] pytest-trio-0.8.0/pytest_trio/_tests/000077500000000000000000000000001433025171300177075ustar00rootroot00000000000000pytest-trio-0.8.0/pytest_trio/_tests/__init__.py000066400000000000000000000000001433025171300220060ustar00rootroot00000000000000pytest-trio-0.8.0/pytest_trio/_tests/conftest.py000066400000000000000000000000361433025171300221050ustar00rootroot00000000000000pytest_plugins = ["pytester"] pytest-trio-0.8.0/pytest_trio/_tests/helpers.py000066400000000000000000000011421433025171300217210ustar00rootroot00000000000000import pytest def enable_trio_mode_via_pytest_ini(testdir): testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\n") def enable_trio_mode_trio_run_via_pytest_ini(testdir): testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\ntrio_run = trio\n") def enable_trio_mode_via_conftest_py(testdir): testdir.makeconftest("from pytest_trio.enable_trio_mode import *") enable_trio_mode = pytest.mark.parametrize( "enable_trio_mode", [ enable_trio_mode_via_pytest_ini, enable_trio_mode_trio_run_via_pytest_ini, enable_trio_mode_via_conftest_py, ], ) pytest-trio-0.8.0/pytest_trio/_tests/test_async_fixture.py000066400000000000000000000057071433025171300242140ustar00rootroot00000000000000import pytest def test_single_async_fixture(testdir): testdir.makepyfile( """ import pytest import trio @pytest.fixture async def fix1(): await trio.sleep(0) return 'fix1' @pytest.mark.trio async def test_simple(fix1): assert fix1 == 'fix1' """ ) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_async_fixture_recomputed_for_each_test(testdir): testdir.makepyfile( """ import pytest import trio counter = 0 @pytest.fixture async def fix1(): global counter await trio.sleep(0) counter += 1 return counter @pytest.mark.trio async def test_first(fix1): assert fix1 == 1 @pytest.mark.trio async def test_second(fix1): assert fix1 == 2 """ ) result = testdir.runpytest() result.assert_outcomes(passed=2) def test_nested_async_fixture(testdir): testdir.makepyfile( """ import pytest import trio @pytest.fixture async def fix1(): await trio.sleep(0) return 'fix1' @pytest.fixture async def fix2(fix1): await trio.sleep(0) return 'fix2(%s)' % fix1 @pytest.mark.trio async def test_simple(fix2): assert fix2 == 'fix2(fix1)' @pytest.mark.trio async def test_both(fix1, fix2): assert fix1 == 'fix1' assert fix2 == 'fix2(fix1)' """ ) result = testdir.runpytest() result.assert_outcomes(passed=2) def test_async_within_sync_fixture(testdir): testdir.makepyfile( """ import pytest import trio @pytest.fixture async def async_fix(): await trio.sleep(0) return 42 @pytest.fixture def sync_fix(async_fix): return async_fix @pytest.mark.trio async def test_simple(sync_fix): assert sync_fix == 42 """ ) result = testdir.runpytest() result.assert_outcomes(passed=1) # In pytest, ERROR status occurs when an exception is raised in fixture code. # The trouble is our async fixtures must be run within a trio context, hence # they are actually run just before the test, providing no way to make the # difference between an exception coming from the real test or from an # async fixture... @pytest.mark.xfail(reason="Not implemented yet") def test_raise_in_async_fixture_cause_pytest_error(testdir): testdir.makepyfile( """ import pytest @pytest.fixture async def fix1(): raise ValueError('Ouch !') @pytest.mark.trio async def test_base(fix1): pass # Crash should have occurred before arriving here """ ) result = testdir.runpytest() result.assert_outcomes(errors=1) pytest-trio-0.8.0/pytest_trio/_tests/test_async_yield_fixture.py000066400000000000000000000154651433025171300254040ustar00rootroot00000000000000def test_single_async_yield_fixture(testdir): testdir.makepyfile( """ import pytest import trio events = [] @pytest.fixture async def fix1(): events.append('fix1 setup') await trio.sleep(0) yield 'fix1' await trio.sleep(0) events.append('fix1 teardown') def test_before(): assert not events @pytest.mark.trio async def test_actual_test(fix1): assert events == ['fix1 setup'] assert fix1 == 'fix1' def test_after(): assert events == [ 'fix1 setup', 'fix1 teardown', ] """ ) result = testdir.runpytest() result.assert_outcomes(passed=3) def test_nested_async_yield_fixture(testdir): testdir.makepyfile( """ import pytest import trio events = [] @pytest.fixture async def fix2(): events.append('fix2 setup') await trio.sleep(0) yield 'fix2' await trio.sleep(0) events.append('fix2 teardown') @pytest.fixture async def fix1(fix2): events.append('fix1 setup') await trio.sleep(0) yield 'fix1' await trio.sleep(0) events.append('fix1 teardown') def test_before(): assert not events @pytest.mark.trio async def test_actual_test(fix1): assert events == [ 'fix2 setup', 'fix1 setup', ] assert fix1 == 'fix1' def test_after(): assert events == [ 'fix2 setup', 'fix1 setup', 'fix1 teardown', 'fix2 teardown', ] """ ) result = testdir.runpytest() result.assert_outcomes(passed=3) def test_async_yield_fixture_within_sync_fixture(testdir): testdir.makepyfile( """ import pytest import trio events = [] @pytest.fixture async def fix2(): events.append('fix2 setup') await trio.sleep(0) yield 'fix2' await trio.sleep(0) events.append('fix2 teardown') @pytest.fixture def fix1(fix2): return 'fix1' def test_before(): assert not events @pytest.mark.trio async def test_actual_test(fix1): assert events == [ 'fix2 setup', ] assert fix1 == 'fix1' def test_after(): assert events == [ 'fix2 setup', 'fix2 teardown', ] """ ) result = testdir.runpytest() result.assert_outcomes(passed=3) def test_async_yield_fixture_within_sync_yield_fixture(testdir): testdir.makepyfile( """ import pytest import trio events = [] @pytest.fixture async def fix2(): events.append('fix2 setup') await trio.sleep(0) yield 'fix2' await trio.sleep(0) events.append('fix2 teardown') @pytest.fixture def fix1(fix2): events.append('fix1 setup') yield 'fix1' events.append('fix1 teardown') def test_before(): assert not events @pytest.mark.trio async def test_actual_test(fix1): assert events == [ 'fix2 setup', 'fix1 setup', ] assert fix1 == 'fix1' def test_after(): assert events == [ 'fix2 setup', 'fix1 setup', 'fix1 teardown', 'fix2 teardown', ] """ ) result = testdir.runpytest() result.assert_outcomes(passed=3) def test_async_yield_fixture_with_multiple_yields(testdir): testdir.makepyfile( """ import pytest import trio @pytest.fixture async def fix1(): await trio.sleep(0) yield 'good' await trio.sleep(0) yield 'bad' @pytest.mark.trio async def test_actual_test(fix1): pass """ ) result = testdir.runpytest() # TODO: should trigger error instead of failure # result.assert_outcomes(errors=1) result.assert_outcomes(failed=1) def test_async_yield_fixture_with_nursery(testdir): testdir.makepyfile( """ import pytest import trio async def handle_client(stream): while True: buff = await stream.receive_some(4) await stream.send_all(buff) @pytest.fixture async def server(): async with trio.open_nursery() as nursery: listeners = await nursery.start(trio.serve_tcp, handle_client, 0) yield listeners[0] nursery.cancel_scope.cancel() @pytest.mark.trio async def test_actual_test(server): stream = await trio.testing.open_stream_to_socket_listener(server) await stream.send_all(b'ping') rep = await stream.receive_some(4) assert rep == b'ping' """ ) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_async_yield_fixture_crashed_teardown_allow_other_teardowns(testdir): testdir.makepyfile( """ import pytest import trio setup_events = set() teardown_events = set() @pytest.fixture async def good_fixture(): async with trio.open_nursery() as nursery: setup_events.add('good_fixture setup') yield None teardown_events.add('good_fixture teardown') @pytest.fixture async def bad_fixture(): async with trio.open_nursery() as nursery: setup_events.add('bad_fixture setup') yield None teardown_events.add('bad_fixture teardown') raise RuntimeError('Crash during fixture teardown') def test_before(): assert not setup_events assert not teardown_events @pytest.mark.trio async def test_actual_test(bad_fixture, good_fixture): pass def test_after(): assert setup_events == { 'good_fixture setup', 'bad_fixture setup', } assert teardown_events == { 'bad_fixture teardown', 'good_fixture teardown', } """ ) result = testdir.runpytest() result.assert_outcomes(failed=1, passed=2) result.stdout.re_match_lines( [r"(E\W+| +\| )RuntimeError: Crash during fixture teardown"] ) pytest-trio-0.8.0/pytest_trio/_tests/test_basic.py000066400000000000000000000054141433025171300224050ustar00rootroot00000000000000import pytest def test_async_test_is_executed(testdir): testdir.makepyfile( """ import pytest import trio async_test_called = False @pytest.mark.trio async def test_base(): global async_test_called await trio.sleep(0) async_test_called = True def test_check_async_test_called(): assert async_test_called """ ) result = testdir.runpytest("-s") result.assert_outcomes(passed=2) def test_async_test_as_class_method(testdir): testdir.makepyfile( """ import pytest import trio async_test_called = False @pytest.fixture async def fix(): await trio.sleep(0) return 'fix' class TestInClass: @pytest.mark.trio async def test_base(self, fix): global async_test_called assert fix == 'fix' await trio.sleep(0) async_test_called = True def test_check_async_test_called(): assert async_test_called """ ) result = testdir.runpytest() result.assert_outcomes(passed=2) @pytest.mark.xfail(reason="Raises pytest internal error so far...") def test_sync_function_with_trio_mark(testdir): testdir.makepyfile( """ import pytest @pytest.mark.trio def test_invalid(): pass """ ) result = testdir.runpytest() result.assert_outcomes(errors=1) def test_skip_and_xfail(testdir): testdir.makepyfile( """ import functools import pytest import trio trio.run = functools.partial(trio.run, strict_exception_groups=True) @pytest.mark.trio async def test_xfail(): pytest.xfail() @pytest.mark.trio async def test_skip(): pytest.skip() async def callback(fn): fn() async def fail(): raise RuntimeError @pytest.mark.trio async def test_xfail_and_fail(): async with trio.open_nursery() as nursery: nursery.start_soon(callback, pytest.xfail) nursery.start_soon(fail) @pytest.mark.trio async def test_skip_and_fail(): async with trio.open_nursery() as nursery: nursery.start_soon(callback, pytest.skip) nursery.start_soon(fail) @pytest.mark.trio async def test_xfail_and_skip(): async with trio.open_nursery() as nursery: nursery.start_soon(callback, pytest.skip) nursery.start_soon(callback, pytest.xfail) """ ) result = testdir.runpytest("-s") result.assert_outcomes(skipped=1, xfailed=1, failed=3) pytest-trio-0.8.0/pytest_trio/_tests/test_clock_fixture.py000066400000000000000000000004741433025171300241660ustar00rootroot00000000000000import pytest import trio @pytest.mark.trio async def test_sleep_with_autojump_clock(autojump_clock): assert trio.current_time() == 0 for i in range(10): start_time = trio.current_time() await trio.sleep(i) end_time = trio.current_time() assert end_time - start_time == i pytest-trio-0.8.0/pytest_trio/_tests/test_contextvars.py000066400000000000000000000013441433025171300237020ustar00rootroot00000000000000import pytest from pytest_trio import trio_fixture import contextvars cv = contextvars.ContextVar("cv", default=None) @trio_fixture def cv_checker(): assert cv.get() is None yield assert cv.get() is None @trio_fixture def cv_setter(cv_checker): assert cv.get() is None token = cv.set("cv_setter") yield assert cv.get() == "cv_setter2" cv.reset(token) assert cv.get() is None @trio_fixture def cv_setter2(cv_setter): assert cv.get() == "cv_setter" # Intentionally leak, so can check that this is visible back in cv_setter cv.set("cv_setter2") yield assert cv.get() == "cv_setter2" @pytest.mark.trio async def test_contextvars(cv_setter2): assert cv.get() == "cv_setter2" pytest-trio-0.8.0/pytest_trio/_tests/test_fixture_mistakes.py000066400000000000000000000100461433025171300247070ustar00rootroot00000000000000import pytest from pytest_trio import trio_fixture from .helpers import enable_trio_mode def test_trio_fixture_with_non_trio_test(testdir): testdir.makepyfile( """ import trio from pytest_trio import trio_fixture import pytest @trio_fixture def trio_time(): return trio.current_time() @pytest.fixture def indirect_trio_time(trio_time): return trio_time + 1 @pytest.mark.trio async def test_async(mock_clock, trio_time, indirect_trio_time): assert trio_time == 0 assert indirect_trio_time == 1 def test_sync(trio_time): pass def test_sync_indirect(indirect_trio_time): pass """ ) result = testdir.runpytest() result.assert_outcomes(passed=1, errors=2) result.stdout.fnmatch_lines(["*: Trio fixtures can only be used by Trio tests*"]) def test_trio_fixture_with_wrong_scope_without_trio_mode(testdir): # There's a trick here: when you have a non-function-scope fixture, it's # not instantiated for any particular function (obviously). So... when our # pytest_fixture_setup hook tries to check for marks, it can't normally # see @pytest.mark.trio. So... it's actually almost impossible to have an # async fixture get treated as a Trio fixture *and* have it be # non-function-scope. But, class-scoped fixtures can see marks on the # class, so this is one way (the only way?) it can happen: testdir.makepyfile( """ import pytest @pytest.fixture(scope="class") async def async_class_fixture(): pass @pytest.mark.trio class TestFoo: async def test_foo(self, async_class_fixture): pass """ ) result = testdir.runpytest() result.assert_outcomes(errors=1) result.stdout.fnmatch_lines(["*: Trio fixtures must be function-scope*"]) @enable_trio_mode def test_trio_fixture_with_wrong_scope_in_trio_mode(testdir, enable_trio_mode): enable_trio_mode(testdir) testdir.makepyfile( """ import pytest @pytest.fixture(scope="session") async def async_session_fixture(): pass async def test_whatever(async_session_fixture): pass """ ) result = testdir.runpytest() result.assert_outcomes(errors=1) result.stdout.fnmatch_lines(["*: Trio fixtures must be function-scope*"]) @enable_trio_mode def test_async_fixture_with_sync_test_in_trio_mode(testdir, enable_trio_mode): enable_trio_mode(testdir) testdir.makepyfile( """ import pytest @pytest.fixture async def async_fixture(): pass def test_whatever(async_fixture): pass """ ) result = testdir.runpytest() result.assert_outcomes(errors=1) result.stdout.fnmatch_lines(["*: Trio fixtures can only be used by Trio tests*"]) @enable_trio_mode def test_fixture_cancels_test_but_doesnt_raise(testdir, enable_trio_mode): enable_trio_mode(testdir) testdir.makepyfile( """ import pytest import trio @pytest.fixture async def async_fixture(): with trio.CancelScope() as cscope: cscope.cancel() yield async def test_whatever(async_fixture): pass """ ) result = testdir.runpytest() result.assert_outcomes(failed=1) result.stdout.fnmatch_lines(["*async_fixture*cancelled the test*"]) @enable_trio_mode def test_too_many_clocks(testdir, enable_trio_mode): enable_trio_mode(testdir) testdir.makepyfile( """ import pytest @pytest.fixture def extra_clock(mock_clock): return mock_clock async def test_whatever(mock_clock, extra_clock): pass """ ) result = testdir.runpytest() result.assert_outcomes(failed=1) result.stdout.fnmatch_lines( ["*ValueError: Expected at most one Clock in kwargs, got *"] ) pytest-trio-0.8.0/pytest_trio/_tests/test_fixture_names.py000066400000000000000000000011431433025171300241700ustar00rootroot00000000000000import pytest from pytest_trio import trio_fixture import trio @trio_fixture def fixture_with_unique_name(nursery): nursery.start_soon(trio.sleep_forever) @pytest.mark.trio async def test_fixture_names(fixture_with_unique_name): # This might be a bit fragile ... if we rearrange the nursery hierarchy # somehow so it breaks, then we can make it more robust. task = trio.lowlevel.current_task() assert task.name == "" sibling_names = {task.name for task in task.parent_nursery.child_tasks} assert "" in sibling_names pytest-trio-0.8.0/pytest_trio/_tests/test_fixture_nursery.py000066400000000000000000000010031433025171300245670ustar00rootroot00000000000000import pytest import trio async def handle_client(stream): while True: buff = await stream.receive_some(4) await stream.send_all(buff) @pytest.fixture async def server(nursery): listeners = await nursery.start(trio.serve_tcp, handle_client, 0) return listeners[0] @pytest.mark.trio async def test_try(server): stream = await trio.testing.open_stream_to_socket_listener(server) await stream.send_all(b"ping") rep = await stream.receive_some(4) assert rep == b"ping" pytest-trio-0.8.0/pytest_trio/_tests/test_fixture_ordering.py000066400000000000000000000251371433025171300247070ustar00rootroot00000000000000import pytest # Tests that: # - leaf_fix gets set up first and torn down last # - the two fix_concurrent_{1,2} fixtures run their setup/teardown code # at the same time -- their execution can be interleaved. def test_fixture_basic_ordering(testdir): testdir.makepyfile( """ import pytest from pytest_trio import trio_fixture from trio.testing import Sequencer setup_events = [] teardown_events = [] @trio_fixture def seq(): return Sequencer() @pytest.fixture async def leaf_fix(): setup_events.append("leaf_fix setup") yield teardown_events.append("leaf_fix teardown") assert teardown_events == [ "fix_concurrent_1 teardown 1", "fix_concurrent_2 teardown 1", "fix_concurrent_1 teardown 2", "fix_concurrent_2 teardown 2", "leaf_fix teardown", ] @pytest.fixture async def fix_concurrent_1(leaf_fix, seq): async with seq(0): setup_events.append("fix_concurrent_1 setup 1") async with seq(2): setup_events.append("fix_concurrent_1 setup 2") yield async with seq(4): teardown_events.append("fix_concurrent_1 teardown 1") async with seq(6): teardown_events.append("fix_concurrent_1 teardown 2") @pytest.fixture async def fix_concurrent_2(leaf_fix, seq): async with seq(1): setup_events.append("fix_concurrent_2 setup 1") async with seq(3): setup_events.append("fix_concurrent_2 setup 2") yield async with seq(5): teardown_events.append("fix_concurrent_2 teardown 1") async with seq(7): teardown_events.append("fix_concurrent_2 teardown 2") @pytest.mark.trio async def test_root(fix_concurrent_1, fix_concurrent_2): assert setup_events == [ "leaf_fix setup", "fix_concurrent_1 setup 1", "fix_concurrent_2 setup 1", "fix_concurrent_1 setup 2", "fix_concurrent_2 setup 2", ] assert teardown_events == [] """ ) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_nursery_fixture_teardown_ordering(testdir): testdir.makepyfile( """ import pytest from pytest_trio import trio_fixture import trio from trio.testing import wait_all_tasks_blocked events = [] async def record_cancel(msg): try: await trio.sleep_forever() finally: events.append(msg) @pytest.fixture def fix0(): yield assert events == [ "test", "test cancel", "fix2 teardown", "fix2 cancel", "fix1 teardown", "fix1 cancel", ] @trio_fixture def fix1(nursery): nursery.start_soon(record_cancel, "fix1 cancel") yield events.append("fix1 teardown") @trio_fixture def fix2(fix1, nursery): nursery.start_soon(record_cancel, "fix2 cancel") yield events.append("fix2 teardown") @pytest.mark.trio async def test_root(fix2, nursery): nursery.start_soon(record_cancel, "test cancel") await wait_all_tasks_blocked() events.append("test") """ ) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_error_collection(testdir): # We want to make sure that pytest ultimately reports all the different # exceptions. We call .upper() on all the exceptions so that we have # tokens to look for in the output corresponding to each exception, where # those tokens don't appear at all the source (so we can't get a false # positive due to pytest printing out the source file). # We sleep at the beginning of all the fixtures b/c currently if any # fixture crashes, we skip setting up unrelated fixtures whose setup # hasn't even started yet. Maybe we shouldn't? But for now the sleeps make # sure that all the fixtures have started before any of them start # crashing. testdir.makepyfile( """ import pytest from pytest_trio import trio_fixture import trio test_started = False @trio_fixture async def crash_nongen(): with trio.CancelScope(shield=True): await trio.sleep(2) raise RuntimeError("crash_nongen".upper()) @trio_fixture async def crash_early_agen(): with trio.CancelScope(shield=True): await trio.sleep(2) raise RuntimeError("crash_early_agen".upper()) yield @trio_fixture async def crash_late_agen(): yield raise RuntimeError("crash_late_agen".upper()) async def crash(when, token): with trio.CancelScope(shield=True): await trio.sleep(when) raise RuntimeError(token.upper()) @trio_fixture def crash_background(nursery): nursery.start_soon(crash, 1, "crash_background_early") nursery.start_soon(crash, 3, "crash_background_late") @pytest.mark.trio async def test_all_the_crashes( autojump_clock, crash_nongen, crash_early_agen, crash_late_agen, crash_background, ): global test_started test_started = True def test_followup(): assert not test_started """ ) result = testdir.runpytest() result.assert_outcomes(passed=1, failed=1) result.stdout.fnmatch_lines_random( [ "*CRASH_NONGEN*", "*CRASH_EARLY_AGEN*", "*CRASH_LATE_AGEN*", "*CRASH_BACKGROUND_EARLY*", "*CRASH_BACKGROUND_LATE*", ] ) @pytest.mark.parametrize("bgmode", ["nursery fixture", "manual nursery"]) def test_background_crash_cancellation_propagation(bgmode, testdir): crashyfix_using_nursery_fixture = """ @trio_fixture def crashyfix(nursery): nursery.start_soon(crashy) with pytest.raises(trio.Cancelled): yield # We should be cancelled here teardown_deadlines["crashyfix"] = trio.current_effective_deadline() """ crashyfix_using_manual_nursery = """ @trio_fixture async def crashyfix(): async with trio.open_nursery() as nursery: nursery.start_soon(crashy) with pytest.raises(trio.Cancelled): yield # We should be cancelled here teardown_deadlines["crashyfix"] = trio.current_effective_deadline() """ if bgmode == "nursery fixture": crashyfix = crashyfix_using_nursery_fixture else: crashyfix = crashyfix_using_manual_nursery testdir.makepyfile( """ import pytest from pytest_trio import trio_fixture import trio teardown_deadlines = {} final_time = None async def crashy(): await trio.sleep(1) raise RuntimeError CRASHYFIX_HERE @trio_fixture def sidefix(): yield # We should NOT be cancelled here teardown_deadlines["sidefix"] = trio.current_effective_deadline() @trio_fixture def userfix(crashyfix): yield # Currently we should NOT be cancelled here... though maybe this # should change? teardown_deadlines["userfix"] = trio.current_effective_deadline() @pytest.mark.trio async def test_it(userfix, sidefix, autojump_clock): try: await trio.sleep_forever() finally: global final_time final_time = trio.current_time() def test_post(): assert teardown_deadlines == { "crashyfix": -float("inf"), "sidefix": float("inf"), "userfix": float("inf"), } assert final_time == 1 """.replace( "CRASHYFIX_HERE", crashyfix ) ) result = testdir.runpytest() result.assert_outcomes(passed=1, failed=1) # See the thread starting at # https://github.com/python-trio/pytest-trio/pull/77#issuecomment-499979536 # for details on the real case that this was minimized from def test_complex_cancel_interaction_regression(testdir): testdir.makepyfile( """ import pytest import trio from contextlib import asynccontextmanager async def die_soon(): raise RuntimeError('oops'.upper()) @asynccontextmanager async def async_finalizer(): try: yield finally: await trio.sleep(0) @pytest.fixture async def fixture(nursery): async with trio.open_nursery() as nursery1: async with async_finalizer(): async with trio.open_nursery() as nursery2: nursery2.start_soon(die_soon) yield nursery1.cancel_scope.cancel() @pytest.mark.trio async def test_try(fixture): await trio.sleep_forever() """ ) result = testdir.runpytest() result.assert_outcomes(passed=0, failed=1) result.stdout.fnmatch_lines_random(["*OOPS*"]) # Makes sure that # See https://github.com/python-trio/pytest-trio/issues/120 def test_fixtures_crash_and_hang_concurrently(testdir): testdir.makepyfile( """ import trio import pytest @pytest.fixture async def hanging_fixture(): print("hanging_fixture:start") await trio.Event().wait() yield print("hanging_fixture:end") @pytest.fixture async def exploding_fixture(): print("exploding_fixture:start") raise Exception yield print("exploding_fixture:end") @pytest.mark.trio async def test_fails_right_away(exploding_fixture): ... @pytest.mark.trio async def test_fails_needs_some_scopes(exploding_fixture, hanging_fixture): ... """ ) result = testdir.runpytest() result.assert_outcomes(passed=0, failed=2) pytest-trio-0.8.0/pytest_trio/_tests/test_hypothesis_interaction.py000066400000000000000000000030011433025171300261100ustar00rootroot00000000000000import pytest import trio from trio.tests.test_scheduler_determinism import ( scheduler_trace, test_the_trio_scheduler_is_not_deterministic, test_the_trio_scheduler_is_deterministic_if_seeded, ) from hypothesis import given, settings, strategies as st from pytest_trio.plugin import _trio_test_runner_factory # deadline=None avoids unpredictable warnings/errors when CI happens to be # slow (example: https://travis-ci.org/python-trio/pytest-trio/jobs/406738296) # max_examples=5 speeds things up a bit our_settings = settings(deadline=None, max_examples=5) @our_settings @given(st.integers()) @pytest.mark.trio async def test_mark_inner(n): assert isinstance(n, int) @our_settings @pytest.mark.trio @given(st.integers()) async def test_mark_outer(n): assert isinstance(n, int) @our_settings @pytest.mark.parametrize("y", [1, 2]) @given(x=st.none()) @pytest.mark.trio async def test_mark_and_parametrize(x, y): assert x is None assert y in (1, 2) def test_the_trio_scheduler_is_deterministic_under_hypothesis(): traces = [] @our_settings @given(st.integers()) @pytest.mark.trio async def inner(_): traces.append(await scheduler_trace()) # The pytest.mark.trio doesn't do it's magic thing to # inner functions, so we invoke it explicitly here. inner.hypothesis.inner_test = _trio_test_runner_factory( None, inner.hypothesis.inner_test ) inner() # Tada, now it's a sync function! assert len(traces) >= 5 assert len(set(traces)) == 1 pytest-trio-0.8.0/pytest_trio/_tests/test_sync_fixture.py000066400000000000000000000063441433025171300240510ustar00rootroot00000000000000import pytest @pytest.fixture def sync_fix(): return "sync_fix" @pytest.mark.trio async def test_single_sync_fixture(sync_fix): assert sync_fix == "sync_fix" def test_single_yield_fixture(testdir): testdir.makepyfile( """ import pytest events = [] @pytest.fixture def fix1(): events.append('fixture setup') yield 'fix1' events.append('fixture teardown') def test_before(): assert not events @pytest.mark.trio async def test_actual_test(fix1): assert events == ['fixture setup'] assert fix1 == 'fix1' def test_after(): assert events == [ 'fixture setup', 'fixture teardown', ] """ ) result = testdir.runpytest() result.assert_outcomes(passed=3) def test_single_yield_fixture_with_async_deps(testdir): testdir.makepyfile( """ import pytest import trio events = [] @pytest.fixture async def fix0(): events.append('fix0 setup') await trio.sleep(0) return 'fix0' @pytest.fixture def fix1(fix0): events.append('fix1 setup') yield 'fix1 - ' + fix0 events.append('fix1 teardown') def test_before(): assert not events @pytest.mark.trio async def test_actual_test(fix1): assert events == ['fix0 setup', 'fix1 setup'] assert fix1 == 'fix1 - fix0' def test_after(): assert events == [ 'fix0 setup', 'fix1 setup', 'fix1 teardown', ] """ ) result = testdir.runpytest() result.assert_outcomes(passed=3) def test_sync_yield_fixture_crashed_teardown_allow_other_teardowns(testdir): testdir.makepyfile( """ import pytest import trio setup_events = set() teardown_events = set() @pytest.fixture async def force_async_fixture(): pass @pytest.fixture def good_fixture(force_async_fixture): setup_events.add('good_fixture setup') yield teardown_events.add('good_fixture teardown') @pytest.fixture def bad_fixture(force_async_fixture): setup_events.add('bad_fixture setup') yield teardown_events.add('bad_fixture teardown') raise RuntimeError('Crash during fixture teardown') def test_before(): assert not setup_events assert not teardown_events @pytest.mark.trio async def test_actual_test(bad_fixture, good_fixture): pass def test_after(): assert setup_events == { 'good_fixture setup', 'bad_fixture setup', } assert teardown_events == { 'bad_fixture teardown', 'good_fixture teardown', } """ ) result = testdir.runpytest() result.assert_outcomes(failed=1, passed=2) result.stdout.re_match_lines( [r"(E\W+| +\| )RuntimeError: Crash during fixture teardown"] ) pytest-trio-0.8.0/pytest_trio/_tests/test_trio_mode.py000066400000000000000000000077161433025171300233140ustar00rootroot00000000000000import pytest from .helpers import enable_trio_mode test_text = """ import pytest import trio from hypothesis import given, settings, strategies async def test_pass(): await trio.sleep(0) async def test_fail(): await trio.sleep(0) assert False @settings(deadline=None, max_examples=5) @given(strategies.binary()) async def test_hypothesis_pass(b): await trio.sleep(0) assert isinstance(b, bytes) @settings(deadline=None, max_examples=5) @given(strategies.binary()) async def test_hypothesis_fail(b): await trio.sleep(0) assert isinstance(b, int) """ @enable_trio_mode def test_trio_mode(testdir, enable_trio_mode): enable_trio_mode(testdir) testdir.makepyfile(test_text) result = testdir.runpytest() result.assert_outcomes(passed=2, failed=2) # This is faking qtrio due to real qtrio's dependence on either # PyQt5 or PySide2. They are both large and require special # handling in CI. The testing here is able to focus on the # pytest-trio features with just this minimal substitute. qtrio_text = """ import trio fake_used = False def run(*args, **kwargs): global fake_used fake_used = True return trio.run(*args, **kwargs) """ def test_trio_mode_and_qtrio_run_configuration(testdir): testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\ntrio_run = qtrio\n") testdir.makepyfile(qtrio=qtrio_text) test_text = """ import qtrio import trio async def test_fake_qtrio_used(): await trio.sleep(0) assert qtrio.fake_used """ testdir.makepyfile(test_text) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_trio_mode_and_qtrio_marker(testdir): testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\n") testdir.makepyfile(qtrio=qtrio_text) test_text = """ import pytest import qtrio import trio @pytest.mark.trio(run=qtrio.run) async def test_fake_qtrio_used(): await trio.sleep(0) assert qtrio.fake_used """ testdir.makepyfile(test_text) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_qtrio_just_run_configuration(testdir): testdir.makefile(".ini", pytest="[pytest]\ntrio_run = qtrio\n") testdir.makepyfile(qtrio=qtrio_text) test_text = """ import pytest import qtrio import trio @pytest.mark.trio async def test_fake_qtrio_used(): await trio.sleep(0) assert qtrio.fake_used """ testdir.makepyfile(test_text) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_invalid_trio_run_fails(testdir): run_name = "invalid_trio_run" testdir.makefile( ".ini", pytest=f"[pytest]\ntrio_mode = true\ntrio_run = {run_name}\n" ) test_text = """ async def test(): pass """ testdir.makepyfile(test_text) result = testdir.runpytest() result.assert_outcomes() result.stdout.fnmatch_lines( [ f"*ValueError: {run_name!r} not valid for 'trio_run' config. Must be one of: *" ] ) def test_closest_explicit_run_wins(testdir): testdir.makefile(".ini", pytest=f"[pytest]\ntrio_mode = true\ntrio_run = trio\n") testdir.makepyfile(qtrio=qtrio_text) test_text = """ import pytest import pytest_trio import qtrio @pytest.mark.trio(run='should be ignored') @pytest.mark.trio(run=qtrio.run) async def test(): assert qtrio.fake_used """ testdir.makepyfile(test_text) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_ini_run_wins_with_blank_marker(testdir): testdir.makefile(".ini", pytest=f"[pytest]\ntrio_mode = true\ntrio_run = qtrio\n") testdir.makepyfile(qtrio=qtrio_text) test_text = """ import pytest import pytest_trio import qtrio @pytest.mark.trio async def test(): assert qtrio.fake_used """ testdir.makepyfile(test_text) result = testdir.runpytest() result.assert_outcomes(passed=1) pytest-trio-0.8.0/pytest_trio/_version.py000066400000000000000000000001311433025171300205770ustar00rootroot00000000000000# This file is imported from __init__.py and exec'd from setup.py __version__ = "0.8.0" pytest-trio-0.8.0/pytest_trio/enable_trio_mode.py000066400000000000000000000004501433025171300222460ustar00rootroot00000000000000__all__ = ["pytest_collection_modifyitems", "pytest_fixture_setup"] from .plugin import automark, handle_fixture def pytest_collection_modifyitems(items): automark(items) def pytest_fixture_setup(fixturedef, request): return handle_fixture(fixturedef, request, force_trio_mode=True) pytest-trio-0.8.0/pytest_trio/plugin.py000066400000000000000000000527561433025171300202750ustar00rootroot00000000000000"""pytest-trio implementation.""" import sys from functools import wraps, partial from collections.abc import Coroutine, Generator from contextlib import asynccontextmanager from inspect import isasyncgen, isasyncgenfunction, iscoroutinefunction import contextvars import outcome import pytest import trio from trio.abc import Clock, Instrument from trio.testing import MockClock from _pytest.outcomes import Skipped, XFailed if sys.version_info[:2] < (3, 11): from exceptiongroup import BaseExceptionGroup ################################################################ # Basic setup ################################################################ try: from hypothesis import register_random except ImportError: # pragma: no cover pass else: # On recent versions of Hypothesis, make the Trio scheduler deterministic # even though it uses a module-scoped Random instance. This works # regardless of whether or not the random_module strategy is used. register_random(trio._core._run._r) # We also have to enable determinism, which is disabled by default # due to a small performance impact - but fine to enable in testing. # See https://github.com/python-trio/trio/pull/890/ for details. trio._core._run._ALLOW_DETERMINISTIC_SCHEDULING = True def pytest_addoption(parser): parser.addini( "trio_mode", "should pytest-trio handle all async functions?", type="bool", default=False, ) parser.addini( "trio_run", "what runner should pytest-trio use? [trio, qtrio]", default="trio", ) def pytest_configure(config): # So that it shows up in 'pytest --markers' output: config.addinivalue_line( "markers", "trio: mark the test as an async trio test; it will be run using trio.run", ) ################################################################ # Core support for trio fixtures and trio tests ################################################################ # This is more complicated than you might expect. # The first complication is that all of pytest's machinery for setting up, # running a test, and then tearing it down again is synchronous. But we want # to have async setup, async tests, and async teardown. # # Our trick: from pytest's point of view, trio fixtures return an unevaluated # placeholder value, a TrioFixture object. This contains all the information # needed to do the actual setup/teardown, but doesn't actually perform these # operations. # # Then, pytest runs what it thinks of as "the test", we enter trio, and use # our own logic to setup the trio fixtures, run the actual test, and then tear # down the trio fixtures. This works pretty well, though it has some # limitations: # - trio fixtures have to be test-scoped # - normally pytest considers a fixture crash to be an ERROR, but when a trio # fixture crashes, it gets classified as a FAIL. # The other major complication is that we really want to allow trio fixtures # to yield inside a nursery. (See gh-55 for more discussion.) And then while # the fixture function is suspended, a task inside that nursery might crash. # # Why is this a problem? Two reasons. First, a technical one: Trio's cancel # scope machinery assumes that it can inject a Cancelled exception into any # code inside the cancel scope, and that exception will eventually make its # way back to the 'with' block. # # A fixture that yields inside a nursery violates this rule: the cancel scope # remains "active" from when the fixture yields until when it's reentered, but # if a Cancelled exception is raised during this time, then it *won't* go into # the fixture. (And we can't throw it in there either, because that's just not # how pytest fixtures work. Whoops.) # # And second, our setup/test/teardown process needs to account for the # possibility that any fixture's background task might crash at any moment, # and do something sensible with it. # # You should think of fixtures as a dependency graph: each fixtures *uses* # zero or more other fixtures, and is *used by* zero or more other fixtures. # A fixture should be setup before any of its dependees are setup, and torn # down once all of its dependees have terminated. # At the root of this dependency graph, we have the test itself, # which is just like a fixture except that instead of having a separate setup # and teardown phase, it runs straight through. # # To implement this, we isolate each fixture into its own task: this makes # sure that crashes in one can't trigger implicit cancellation in another. # Then we use trio.Event objects to implement the ordering described above. # # If a fixture crashes, whether during setup, teardown, or in a background # task at any other point, then we mark the whole test run as "crashed". When # a run is "crashed", two things happen: (1) if any fixtures or the test # itself haven't started yet, then we don't start them, and treat them as if # they've already exited. (2) if the test is running, we cancel it. That's # all. In particular, if a fixture has a background crash, we don't propagate # that to any other fixtures, we still follow the normal teardown sequence, # and so on – but since the test is cancelled, the teardown sequence should # start immediately. canary = contextvars.ContextVar("pytest-trio canary") class TrioTestContext: def __init__(self): self.crashed = False # This holds cancel scopes for whatever setup steps are currently # running -- initially it's the fixtures that are in the middle of # evaluating themselves, and then once fixtures are set up it's the # test itself. Basically, at any given moment, it's the stuff we need # to cancel if we want to start tearing down our fixture DAG. self.active_cancel_scopes = set() self.fixtures_with_errors = set() self.fixtures_with_cancel = set() self.error_list = [] def crash(self, fixture, exc): if exc is None: self.fixtures_with_cancel.add(fixture) else: self.error_list.append(exc) self.fixtures_with_errors.add(fixture) self.crashed = True for cscope in self.active_cancel_scopes: cscope.cancel() class TrioFixture: """ Represent a fixture that need to be run in a trio context to be resolved. The name is actually a misnomer, because we use it to represent the actual test itself as well, since the test is basically just a fixture with no dependents and no teardown. """ def __init__(self, name, func, pytest_kwargs, is_test=False): self.name = name self._func = func self._pytest_kwargs = pytest_kwargs self._is_test = is_test self._teardown_done = trio.Event() # These attrs are all accessed from other objects: # Downstream users read this value. self.fixture_value = None # This event notifies downstream users that we're done setting up. # Invariant: if this is set, then either fixture_value is usable *or* # test_ctx.crashed is True. self.setup_done = trio.Event() # Downstream users *modify* this value, by adding their _teardown_done # events to it, so we know who we need to wait for before tearing # down. self.user_done_events = set() def register_and_collect_dependencies(self): # Returns the set of all TrioFixtures that this fixture depends on, # directly or indirectly, and sets up all their user_done_events. deps = set() deps.add(self) for value in self._pytest_kwargs.values(): if isinstance(value, TrioFixture): value.user_done_events.add(self._teardown_done) deps.update(value.register_and_collect_dependencies()) return deps @asynccontextmanager async def _fixture_manager(self, test_ctx): __tracebackhide__ = True try: async with trio.open_nursery() as nursery_fixture: try: yield nursery_fixture finally: nursery_fixture.cancel_scope.cancel() except BaseException as exc: test_ctx.crash(self, exc) finally: self.setup_done.set() self._teardown_done.set() async def run(self, test_ctx, contextvars_ctx): __tracebackhide__ = True # This is a gross hack. I guess Trio should provide a context= # argument to start_soon/start? task = trio.lowlevel.current_task() assert canary not in task.context task.context = contextvars_ctx # Force a yield so we pick up the new context await trio.sleep(0) # Check that it worked, since technically trio doesn't *guarantee* # that sleep(0) will actually yield. assert canary.get() == "in correct context" # This 'with' block handles the nursery fixture lifetime, the # teardone_done event, and crashing the context if there's an # unhandled exception. async with self._fixture_manager(test_ctx) as nursery_fixture: # Resolve our kwargs resolved_kwargs = {} for name, value in self._pytest_kwargs.items(): if isinstance(value, TrioFixture): await value.setup_done.wait() if value.fixture_value is NURSERY_FIXTURE_PLACEHOLDER: resolved_kwargs[name] = nursery_fixture else: resolved_kwargs[name] = value.fixture_value else: resolved_kwargs[name] = value # If something's already crashed before we're ready to start, then # there's no point in even setting up. if test_ctx.crashed: return # Run actual fixture setup step # If another fixture crashes while we're in the middle of setting # up, we want to be cancelled immediately, so we'll save an # encompassing cancel scope where self._crash can find it. test_ctx.active_cancel_scopes.add(nursery_fixture.cancel_scope) if self._is_test: # Tests are exactly like fixtures, except that they to be # regular async functions. assert not self.user_done_events func_value = None assert not test_ctx.crashed await self._func(**resolved_kwargs) else: func_value = self._func(**resolved_kwargs) if isinstance(func_value, Coroutine): self.fixture_value = await func_value elif isasyncgen(func_value): self.fixture_value = await func_value.asend(None) elif isinstance(func_value, Generator): self.fixture_value = func_value.send(None) else: # Regular synchronous function self.fixture_value = func_value # Now that we're done setting up, we don't want crashes to cancel # us immediately; instead we want them to cancel our downstream # dependents, and then eventually let us clean up normally. So # remove this from the set of cancel scopes affected by self._crash. test_ctx.active_cancel_scopes.remove(nursery_fixture.cancel_scope) # self.fixture_value is ready, so notify users that they can # continue. (Or, maybe we crashed and were cancelled, in which # case our users will check test_ctx.crashed and immediately exit, # which is fine too.) self.setup_done.set() # Wait for users to be finished # # At this point we're in a very strange state: if the fixture # yielded inside a nursery or cancel scope, then we are still # "inside" that scope even though its with block is not on the # stack. In particular this means that if they get cancelled, then # our waiting might get a Cancelled error, that we cannot really # deal with – it should get thrown back into the fixture # generator, but pytest fixture generators don't work that way: # https://github.com/python-trio/pytest-trio/issues/55 # And besides, we can't start tearing down until all our users # have finished. # # So if we get an exception here, we crash the context (which # cancels the test and starts the cleanup process), save any # exception that *isn't* Cancelled (because if its Cancelled then # we can't route it to the right place, and anyway the teardown # code will get it again if it matters), and then use a shield to # keep waiting for the teardown to finish without having to worry # about cancellation. yield_outcome = outcome.Value(None) try: for event in self.user_done_events: await event.wait() except BaseException as exc: assert isinstance(exc, trio.Cancelled) yield_outcome = outcome.Error(exc) test_ctx.crash(self, None) with trio.CancelScope(shield=True): for event in self.user_done_events: await event.wait() # Do our teardown if isasyncgen(func_value): try: await yield_outcome.asend(func_value) except StopAsyncIteration: pass else: raise RuntimeError("too many yields in fixture") elif isinstance(func_value, Generator): try: yield_outcome.send(func_value) except StopIteration: pass else: raise RuntimeError("too many yields in fixture") def _trio_test(run): """Use: @trio_test async def test_whatever(): await ... Also: if a pytest fixture is passed in that subclasses the ``Clock`` abc, then that clock is passed to ``trio.run()``. """ def decorator(fn): @wraps(fn) def wrapper(**kwargs): __tracebackhide__ = True clocks = {k: c for k, c in kwargs.items() if isinstance(c, Clock)} if not clocks: clock = None elif len(clocks) == 1: clock = list(clocks.values())[0] else: raise ValueError( f"Expected at most one Clock in kwargs, got {clocks!r}" ) instruments = [i for i in kwargs.values() if isinstance(i, Instrument)] try: return run(partial(fn, **kwargs), clock=clock, instruments=instruments) except BaseExceptionGroup as eg: queue = [eg] leaves = [] while queue: ex = queue.pop() if isinstance(ex, BaseExceptionGroup): queue.extend(ex.exceptions) else: leaves.append(ex) if len(leaves) == 1: if isinstance(leaves[0], XFailed): pytest.xfail() if isinstance(leaves[0], Skipped): pytest.skip() # Since our leaf exceptions don't consist of exactly one 'magic' # skipped or xfailed exception, re-raise the whole group. raise return wrapper return decorator def _trio_test_runner_factory(item, testfunc=None): if testfunc: run = trio.run else: testfunc = item.obj for marker in item.iter_markers("trio"): maybe_run = marker.kwargs.get("run") if maybe_run is not None: run = maybe_run break else: # no marker found that explicitly specifiers the runner so use config run = choose_run(config=item.config) if getattr(testfunc, "_trio_test_runner_wrapped", False): # We have already wrapped this, perhaps because we combined Hypothesis # with pytest.mark.parametrize return testfunc if not iscoroutinefunction(testfunc): pytest.fail("test function `%r` is marked trio but is not async" % item) @_trio_test(run=run) async def _bootstrap_fixtures_and_run_test(**kwargs): __tracebackhide__ = True test_ctx = TrioTestContext() test = TrioFixture( "".format(testfunc.__name__), testfunc, kwargs, is_test=True ) contextvars_ctx = contextvars.copy_context() contextvars_ctx.run(canary.set, "in correct context") async with trio.open_nursery() as nursery: for fixture in test.register_and_collect_dependencies(): nursery.start_soon( fixture.run, test_ctx, contextvars_ctx, name=fixture.name ) silent_cancellers = ( test_ctx.fixtures_with_cancel - test_ctx.fixtures_with_errors ) if silent_cancellers: for fixture in silent_cancellers: test_ctx.error_list.append( RuntimeError( "{} cancelled the test but didn't " "raise an error".format(fixture.name) ) ) if len(test_ctx.error_list) == 1: raise test_ctx.error_list[0] elif test_ctx.error_list: raise BaseExceptionGroup( "errors in async test and trio fixtures", test_ctx.error_list ) _bootstrap_fixtures_and_run_test._trio_test_runner_wrapped = True return _bootstrap_fixtures_and_run_test ################################################################ # Hooking up the test/fixture machinery to pytest ################################################################ @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(item): if item.get_closest_marker("trio") is not None: if hasattr(item.obj, "hypothesis"): # If it's a Hypothesis test, we go in a layer. item.obj.hypothesis.inner_test = _trio_test_runner_factory( item, item.obj.hypothesis.inner_test ) elif getattr(item.obj, "is_hypothesis_test", False): # pragma: no cover pytest.fail( "test function `%r` is using Hypothesis, but pytest-trio " "only works with Hypothesis 3.64.0 or later." % item ) else: item.obj = _trio_test_runner_factory(item) yield # It's intentionally impossible to use this to create a non-function-scoped # fixture (since that would require exposing a way to pass scope= to # pytest.fixture). def trio_fixture(func): func._force_trio_fixture = True return pytest.fixture(func) def _is_trio_fixture(func, coerce_async, kwargs): if getattr(func, "_force_trio_fixture", False): return True if coerce_async and (iscoroutinefunction(func) or isasyncgenfunction(func)): return True if any(isinstance(value, TrioFixture) for value in kwargs.values()): return True return False def handle_fixture(fixturedef, request, force_trio_mode): is_trio_test = request.node.get_closest_marker("trio") is not None if force_trio_mode: is_trio_mode = True else: is_trio_mode = request.node.config.getini("trio_mode") coerce_async = is_trio_test or is_trio_mode kwargs = {name: request.getfixturevalue(name) for name in fixturedef.argnames} if _is_trio_fixture(fixturedef.func, coerce_async, kwargs): if request.scope != "function": raise RuntimeError("Trio fixtures must be function-scope") if not is_trio_test: raise RuntimeError("Trio fixtures can only be used by Trio tests") fixture = TrioFixture( "".format(fixturedef.argname), fixturedef.func, kwargs, ) fixturedef.cached_result = (fixture, request.param_index, None) return fixture def pytest_fixture_setup(fixturedef, request): return handle_fixture(fixturedef, request, force_trio_mode=False) ################################################################ # Trio mode ################################################################ def automark(items, run=trio.run): for item in items: if hasattr(item.obj, "hypothesis"): test_func = item.obj.hypothesis.inner_test else: test_func = item.obj if iscoroutinefunction(test_func): item.add_marker(pytest.mark.trio(run=run)) def choose_run(config): run_string = config.getini("trio_run") if run_string == "trio": run = trio.run elif run_string == "qtrio": import qtrio run = qtrio.run else: raise ValueError( f"{run_string!r} not valid for 'trio_run' config." + " Must be one of: trio, qtrio" ) return run def pytest_collection_modifyitems(config, items): if config.getini("trio_mode"): automark(items, run=choose_run(config=config)) ################################################################ # Built-in fixtures ################################################################ class NURSERY_FIXTURE_PLACEHOLDER: pass @pytest.fixture def mock_clock(): return MockClock() @pytest.fixture def autojump_clock(): return MockClock(autojump_threshold=0) @trio_fixture def nursery(request): return NURSERY_FIXTURE_PLACEHOLDER pytest-trio-0.8.0/setup.py000066400000000000000000000033571433025171300155430ustar00rootroot00000000000000from setuptools import setup, find_packages exec(open("pytest_trio/_version.py", encoding="utf-8").read()) LONG_DESC = open("README.rst", encoding="utf-8").read() setup( name="pytest-trio", version=__version__, description="Pytest plugin for trio", url="https://github.com/python-trio/pytest-trio", long_description=open("README.rst").read(), author="Emmanuel Leblond", author_email="emmanuel.leblond@gmail.com", license="MIT OR Apache-2.0", packages=find_packages(), entry_points={"pytest11": ["trio = pytest_trio.plugin"]}, install_requires=[ "trio >= 0.22.0", # for ExceptionGroup support "outcome >= 1.1.0", "pytest >= 7.2.0", # for ExceptionGroup support ], keywords=[ "async", "pytest", "testing", "trio", ], python_requires=">=3.7", classifiers=[ "License :: OSI Approved :: MIT License", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: System :: Networking", "Topic :: Software Development :: Testing", "Framework :: Hypothesis", "Framework :: Pytest", "Framework :: Trio", ], ) pytest-trio-0.8.0/test-requirements.txt000066400000000000000000000000631433025171300202610ustar00rootroot00000000000000pytest==7.2.0 pytest-cov==4.0.0 hypothesis==6.56.4