pax_global_header 0000666 0000000 0000000 00000000064 15064462627 0014526 g ustar 00root root 0000000 0000000 52 comment=08737af202f6610cdb8ba53fecaefd9c03269637
anyio-4.11.0/ 0000775 0000000 0000000 00000000000 15064462627 0012730 5 ustar 00root root 0000000 0000000 anyio-4.11.0/.github/ 0000775 0000000 0000000 00000000000 15064462627 0014270 5 ustar 00root root 0000000 0000000 anyio-4.11.0/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 15064462627 0016453 5 ustar 00root root 0000000 0000000 anyio-4.11.0/.github/ISSUE_TEMPLATE/bug_report.yaml 0000664 0000000 0000000 00000003232 15064462627 0021507 0 ustar 00root root 0000000 0000000 name: Bug Report
description: File a bug report
labels: ["bug"]
body:
- type: markdown
attributes:
value: >
If you observed a crash in the library, or saw unexpected behavior in it, report
your findings here.
- type: checkboxes
attributes:
label: Things to check first
options:
- label: >
I have searched the existing issues and didn't find my bug already reported
there
required: true
- label: >
I have checked that my bug is still present in the latest release
required: true
- type: input
id: anyio-version
attributes:
label: AnyIO version
description: What version of AnyIO were you running?
validations:
required: true
- type: input
id: python-version
attributes:
label: Python version
description: What version of Python were you running?
validations:
required: true
- type: textarea
id: what-happened
attributes:
label: What happened?
description: >
Unless you are reporting a crash, tell us what you expected to happen instead.
validations:
required: true
- type: textarea
id: mwe
attributes:
label: How can we reproduce the bug?
description: >
In order to investigate the bug, we need to be able to reproduce it on our own.
Please create a
[minimum workable example](https://stackoverflow.com/help/minimal-reproducible-example)
that demonstrates the problem. List any third party libraries required for this,
but avoid using them unless absolutely necessary.
validations:
required: true
anyio-4.11.0/.github/ISSUE_TEMPLATE/config.yml 0000664 0000000 0000000 00000000442 15064462627 0020443 0 ustar 00root root 0000000 0000000 blank_issues_enabled: false
contact_links:
- name: GitHub Discussions
url: https://github.com/agronholm/anyio/discussions/categories/q-a
about: Technical support forum
- name: Support chat on Gitter
url: https://gitter.im/python-trio/AnyIO
about: Technical support chat
anyio-4.11.0/.github/ISSUE_TEMPLATE/features_request.yaml 0000664 0000000 0000000 00000002034 15064462627 0022724 0 ustar 00root root 0000000 0000000 name: Feature request
description: Suggest a new feature
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: >
If you have thought of a new feature that would increase the usefulness of this
project, please use this form to send us your idea.
- type: checkboxes
attributes:
label: Things to check first
options:
- label: >
I have searched the existing issues and didn't find my feature already
requested there
required: true
- type: textarea
id: feature
attributes:
label: Feature description
description: >
Describe the feature in detail. The more specific the description you can give,
the easier it should be to implement this feature.
validations:
required: true
- type: textarea
id: usecase
attributes:
label: Use case
description: >
Explain why you need this feature, and why you think it would be useful to
others too.
validations:
required: true
anyio-4.11.0/.github/pull_request_template.md 0000664 0000000 0000000 00000002165 15064462627 0021235 0 ustar 00root root 0000000 0000000
## Changes
Fixes #.
## Checklist
If this is a user-facing code change, like a bugfix or a new feature, please ensure that
you've fulfilled the following conditions (where applicable):
- [ ] You've added tests (in `tests/`) added which would fail without your patch
- [ ] You've updated the documentation (in `docs/`, in case of behavior changes or new
features)
- [ ] You've added a new changelog entry (in `docs/versionhistory.rst`).
If this is a trivial change, like a typo fix or a code reformatting, then you can ignore
these instructions.
### Updating the changelog
If there are no entries after the last release, use `**UNRELEASED**` as the version.
If, say, your patch fixes issue #123, the entry should look like this:
```
- Fix big bad boo-boo in task groups
(`#123 `_; PR by @yourgithubaccount)
```
If there's no issue linked, just link to your pull request instead by updating the
changelog after you've created the PR.
anyio-4.11.0/.github/workflows/ 0000775 0000000 0000000 00000000000 15064462627 0016325 5 ustar 00root root 0000000 0000000 anyio-4.11.0/.github/workflows/publish.yml 0000664 0000000 0000000 00000002677 15064462627 0020532 0 ustar 00root root 0000000 0000000 name: Publish packages to PyPI
on:
push:
tags:
- "[0-9]+.[0-9]+.[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+.post[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+[a-b][0-9]+"
- "[0-9]+.[0-9]+.[0-9]+rc[0-9]+"
jobs:
build:
name: Build the source tarball and the wheel
runs-on: ubuntu-latest
environment: release
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.x
- name: Install dependencies
run: pip install build
- name: Create packages
run: python -m build
- name: Archive packages
uses: actions/upload-artifact@v4
with:
name: dist
path: dist
publish:
name: Publish build artifacts to the PyPI
needs: build
runs-on: ubuntu-latest
environment: release
permissions:
id-token: write
steps:
- name: Retrieve packages
uses: actions/download-artifact@v4
- name: Upload packages
uses: pypa/gh-action-pypi-publish@release/v1
release:
name: Create a GitHub release
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- id: changelog
uses: agronholm/release-notes@v1
with:
path: docs/versionhistory.rst
- uses: ncipollo/release-action@v1
with:
body: ${{ steps.changelog.outputs.changelog }}
anyio-4.11.0/.github/workflows/test-downstream.yml 0000664 0000000 0000000 00000010051 15064462627 0022205 0 ustar 00root root 0000000 0000000 ---
name: Test against downstream projects
on:
workflow_dispatch:
jobs:
starlette:
name: "Starlette on Python ${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.13"]
steps:
- uses: actions/checkout@v5
with:
repository: Kludex/starlette
- uses: astral-sh/setup-uv@v6
with:
python-version: ${{ matrix.python-version }}
enable-cache: true
- name: Install dependencies
run: |
scripts/install
pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
- name: Run tests
run: scripts/test
- name: Enforce coverage
run: scripts/coverage
httpcore:
name: "Httpcore on Python ${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.13"]
steps:
- uses: actions/checkout@v5
with:
repository: encode/httpcore
- name: Install uv
uses: actions/setup-python@v6
with:
python-version: "${{ matrix.python-version }}"
- name: Install dependencies
run: |
scripts/install
pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
- name: Run tests
run: scripts/test
- name: Enforce coverage
run: scripts/coverage
fastapi:
name: "FastAPI on Python ${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.13"]
env:
UV_SYSTEM_PYTHON: 1
steps:
- uses: actions/checkout@v5
with:
repository: tiangolo/fastapi
- uses: actions/setup-python@v6
with:
python-version: "${{ matrix.python-version }}"
- name: Setup uv
uses: astral-sh/setup-uv@v5
with:
version: "0.4.15"
enable-cache: true
cache-dependency-glob: |
requirements**.txt
pyproject.toml
- name: Install dependencies
run: |
uv pip install -r requirements-tests.txt
uv pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
- name: Run tests
run: pytest -W ignore::ResourceWarning
env:
PYTHONPATH: ./docs_src
litestar:
name: "Litestar on Python ${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.13"]
steps:
- uses: actions/checkout@v5
with:
repository: litestar-org/litestar
- name: Set up python ${{ matrix.python-version }}
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.8"
enable-cache: true
- name: Install dependencies
run: |
uv sync
uv pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
- name: Test
run: uv run pytest docs/examples tests -n auto
mcp:
name: "Anthropic MCP on Python ${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.13"]
dep-resolution: ["lowest-direct", "highest"]
steps:
- uses: actions/checkout@v5
with:
repository: modelcontextprotocol/python-sdk
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
version: "0.7.2"
- name: Install the project
run: |
uv sync --frozen --all-extras --python ${{ matrix.python-version }} --resolution ${{ matrix.dep-resolution }}
uv pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
- name: Run pytest
run: uv run --frozen --no-sync pytest
anyio-4.11.0/.github/workflows/test.yml 0000664 0000000 0000000 00000010600 15064462627 0020024 0 ustar 00root root 0000000 0000000 name: test suite
on:
push:
branches: [master]
pull_request:
jobs:
changed-files:
runs-on: ubuntu-latest
outputs:
workflow-changed: ${{ steps.changed-files.outputs.workflow_any_changed }}
pyproject-changed: ${{ steps.changed-files.outputs.src_any_changed }}
src-changed: ${{ steps.changed-files.outputs.src_any_changed }}
tests-changed: ${{ steps.changed-files.outputs.tests_any_changed }}
docs-changed: ${{ steps.changed-files.outputs.doc_any_changed }}
steps:
- uses: actions/checkout@v4
- name: Get changed files by category
id: changed-files
uses: tj-actions/changed-files@v46
with:
files_yaml: |
workflow:
- .github/workflows/test.yml
pyproject:
- pyproject.toml
src:
- src/**
tests:
- tests/**
doc:
- README.rst
- docs/**
pyright:
runs-on: ubuntu-latest
needs: changed-files
if: |
${{
(needs.changed-files.outputs.workflow-changed == 'true')
|| (needs.changed-files.outputs.src-changed == 'true')
}}
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.x
- uses: actions/cache@v4
with:
path: ~/.cache/pip
key: pip-pyright
- name: Install dependencies
run: pip install -e . pyright
- name: Run pyright
run: pyright --ignoreexternal --verifytypes anyio
test:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14", pypy-3.11]
include:
- os: macos-latest
python-version: "3.9"
- os: macos-latest
python-version: "3.13"
- os: windows-latest
python-version: "3.9"
- os: windows-latest
python-version: "3.13"
runs-on: ${{ matrix.os }}
needs: changed-files
if: |
${{
(needs.changed-files.outputs.workflow-changed == 'true')
|| (needs.changed-files.outputs.pyproject-changed == 'true')
|| (needs.changed-files.outputs.src-changed == 'true')
|| (needs.changed-files.outputs.tests-changed == 'true')
}}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
cache: pip
cache-dependency-path: pyproject.toml
- name: Install the project and its dependencies
run: pip install --group test -e .
- name: Patch /etc/hosts
if: runner.os != 'Windows'
run: |
echo "1.2.3.4 xn--fa-hia.de" | sudo tee -a /etc/hosts
echo "5.6.7.8 fass.de" | sudo tee -a /etc/hosts
- name: Patch C:\Windows\System32\drivers\etc\hosts
if: runner.os == 'Windows'
run: |
Add-Content -Path C:\Windows\System32\drivers\etc\hosts -Value "1.2.3.4 xn--fa-hia.de"
Add-Content -Path C:\Windows\System32\drivers\etc\hosts -Value "5.6.7.8 fass.de"
- name: Test with pytest
run: coverage run -m pytest -v
timeout-minutes: 5
env:
PYTEST_DISABLE_PLUGIN_AUTOLOAD: 1
- name: Generate coverage report
run: coverage xml
- name: Upload Coverage
uses: coverallsapp/github-action@v2
with:
parallel: true
file: coverage.xml
docs:
runs-on: ubuntu-latest
needs: changed-files
if: |
${{
(needs.changed-files.outputs.workflow-changed == 'true')
|| (needs.changed-files.outputs.pyproject-changed == 'true')
|| (needs.changed-files.outputs.src-changed == 'true')
|| (needs.changed-files.outputs.docs-changed == 'true')
}}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: "3.11"
cache: pip
cache-dependency-path: pyproject.toml
- name: Install the project and its dependencies
run: pip install --group doc -e .
- name: Build documentation
run: sphinx-build -W docs build/sphinx
coveralls:
name: Finish Coveralls
needs: test
runs-on: ubuntu-latest
steps:
- name: Finished
uses: coverallsapp/github-action@v2
with:
parallel-finished: true
anyio-4.11.0/.gitignore 0000664 0000000 0000000 00000000247 15064462627 0014723 0 ustar 00root root 0000000 0000000 *.egg-info
*.dist-info
*.pyc
build
dist
docs/_build
venv*/
__pycache__
.coverage
.pytest_cache/
.mypy_cache/
.ruff_cache/
.hypothesis/
.eggs/
.tox
.idea
.cache
.local
anyio-4.11.0/.pre-commit-config.yaml 0000664 0000000 0000000 00000002560 15064462627 0017214 0 ustar 00root root 0000000 0000000 # This is the configuration file for pre-commit (https://pre-commit.com/).
# To use:
# * Install pre-commit (https://pre-commit.com/#installation)
# * Copy this file as ".pre-commit-config.yaml"
# * Run "pre-commit install".
repos:
- repo: https://github.com/adrienverge/yamllint
rev: v1.37.1
hooks:
- id: yamllint
args: ['-d {extends: relaxed, rules: {line-length: disable}}', '-s']
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: end-of-file-fixer
- id: mixed-line-ending
args: [ "--fix=lf" ]
- id: trailing-whitespace
- repo: https://github.com/codespell-project/codespell
rev: v2.4.1
hooks:
- id: codespell
additional_dependencies:
- tomli
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.13.1
hooks:
- id: ruff-check
args: [--fix, --show-fixes]
- id: ruff-format
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.18.2
hooks:
- id: mypy
additional_dependencies:
- pytest
- trio >= 0.26
- packaging
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
hooks:
- id: rst-backticks
- id: rst-directive-colons
- id: rst-inline-touching-normal
anyio-4.11.0/.readthedocs.yml 0000664 0000000 0000000 00000000470 15064462627 0016017 0 ustar 00root root 0000000 0000000 version: 2
build:
os: ubuntu-22.04
tools:
python: "3.11"
jobs:
install:
- python -m pip install --no-cache-dir "pip >= 25.1"
- python -m pip install --upgrade --upgrade-strategy only-if-needed --no-cache-dir --group doc .
sphinx:
configuration: docs/conf.py
fail_on_warning: true
anyio-4.11.0/LICENSE 0000664 0000000 0000000 00000002071 15064462627 0013735 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (c) 2018 Alex Grönholm
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
anyio-4.11.0/README.rst 0000664 0000000 0000000 00000005204 15064462627 0014420 0 ustar 00root root 0000000 0000000 .. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg
:target: https://github.com/agronholm/anyio/actions/workflows/test.yml
:alt: Build Status
.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master
:target: https://coveralls.io/github/agronholm/anyio?branch=master
:alt: Code Coverage
.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest
:target: https://anyio.readthedocs.io/en/latest/?badge=latest
:alt: Documentation
.. image:: https://badges.gitter.im/gitterHQ/gitter.svg
:target: https://gitter.im/python-trio/AnyIO
:alt: Gitter chat
AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or
Trio_. It implements Trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony
with the native SC of Trio itself.
Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or
Trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full
refactoring necessary. It will blend in with the native libraries of your chosen backend.
To find out why you might want to use AnyIO's APIs instead of asyncio's, you can read about it
`here `_.
Documentation
-------------
View full documentation at: https://anyio.readthedocs.io/
Features
--------
AnyIO offers the following functionality:
* Task groups (nurseries_ in trio terminology)
* High-level networking (TCP, UDP and UNIX sockets)
* `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python
3.8)
* async/await style UDP sockets (unlike asyncio where you still have to use Transports and
Protocols)
* A versatile API for byte streams and object streams
* Inter-task synchronization and communication (locks, conditions, events, semaphores, object
streams)
* Worker threads
* Subprocesses
* Subinterpreter support for code parallelization (on Python 3.13 and later)
* Asynchronous file I/O (using worker threads)
* Signal handling
AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures.
It even works with the popular Hypothesis_ library.
.. _asyncio: https://docs.python.org/3/library/asyncio.html
.. _Trio: https://github.com/python-trio/trio
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning
.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs
.. _pytest: https://docs.pytest.org/en/latest/
.. _Hypothesis: https://hypothesis.works/
anyio-4.11.0/docs/ 0000775 0000000 0000000 00000000000 15064462627 0013660 5 ustar 00root root 0000000 0000000 anyio-4.11.0/docs/api.rst 0000664 0000000 0000000 00000016444 15064462627 0015174 0 ustar 00root root 0000000 0000000 API reference
=============
Event loop
----------
.. autofunction:: anyio.run
.. autofunction:: anyio.get_all_backends
.. autofunction:: anyio.get_cancelled_exc_class
.. autofunction:: anyio.sleep
.. autofunction:: anyio.sleep_forever
.. autofunction:: anyio.sleep_until
.. autofunction:: anyio.current_time
Asynchronous resources
----------------------
.. autofunction:: anyio.aclose_forcefully
.. autoclass:: anyio.abc.AsyncResource
Typed attributes
----------------
.. autofunction:: anyio.typed_attribute
.. autoclass:: anyio.TypedAttributeSet
.. autoclass:: anyio.TypedAttributeProvider
Timeouts and cancellation
-------------------------
.. autofunction:: anyio.move_on_after
.. autofunction:: anyio.fail_after
.. autofunction:: anyio.current_effective_deadline
.. autoclass:: anyio.CancelScope
Task groups
-----------
.. autofunction:: anyio.create_task_group
.. autoclass:: anyio.abc.TaskGroup
.. autoclass:: anyio.abc.TaskStatus
Running code in worker threads
------------------------------
.. autofunction:: anyio.to_thread.run_sync
.. autofunction:: anyio.to_thread.current_default_thread_limiter
Running code in subinterpreters
-------------------------------
.. autofunction:: anyio.to_interpreter.run_sync
.. autofunction:: anyio.to_interpreter.current_default_interpreter_limiter
Running code in worker processes
--------------------------------
.. autofunction:: anyio.to_process.run_sync
.. autofunction:: anyio.to_process.current_default_process_limiter
Running asynchronous code from other threads
--------------------------------------------
.. autofunction:: anyio.from_thread.run
.. autofunction:: anyio.from_thread.run_sync
.. autofunction:: anyio.from_thread.check_cancelled
.. autofunction:: anyio.from_thread.start_blocking_portal
.. autoclass:: anyio.from_thread.BlockingPortal
.. autoclass:: anyio.from_thread.BlockingPortalProvider
Async file I/O
--------------
.. autofunction:: anyio.open_file
.. autofunction:: anyio.wrap_file
.. autoclass:: anyio.AsyncFile
.. autoclass:: anyio.Path
Temporary files and directories
-------------------------------
.. autofunction:: anyio.mkstemp
.. autofunction:: anyio.mkdtemp
.. autofunction:: anyio.gettempdir
.. autofunction:: anyio.gettempdirb
.. autoclass:: anyio.TemporaryFile
.. autoclass:: anyio.NamedTemporaryFile
.. autoclass:: anyio.SpooledTemporaryFile
.. autoclass:: anyio.TemporaryDirectory
Context manager mix-in classes
------------------------------
.. autoclass:: anyio.ContextManagerMixin
:special-members: __contextmanager__
.. autoclass:: anyio.AsyncContextManagerMixin
:special-members: __asynccontextmanager__
Streams and stream wrappers
---------------------------
.. autofunction:: anyio.create_memory_object_stream
.. autoclass:: anyio.abc.UnreliableObjectReceiveStream()
.. autoclass:: anyio.abc.UnreliableObjectSendStream()
.. autoclass:: anyio.abc.UnreliableObjectStream()
.. autoclass:: anyio.abc.ObjectReceiveStream()
.. autoclass:: anyio.abc.ObjectSendStream()
.. autoclass:: anyio.abc.ObjectStream()
.. autoclass:: anyio.abc.ByteReceiveStream
.. autoclass:: anyio.abc.ByteSendStream
.. autoclass:: anyio.abc.ByteStream
.. autoclass:: anyio.abc.Listener
.. autoclass:: anyio.abc.ObjectStreamConnectable
.. autoclass:: anyio.abc.ByteStreamConnectable
.. autodata:: anyio.abc.AnyUnreliableByteReceiveStream
.. autodata:: anyio.abc.AnyUnreliableByteSendStream
.. autodata:: anyio.abc.AnyUnreliableByteStream
.. autodata:: anyio.abc.AnyByteReceiveStream
.. autodata:: anyio.abc.AnyByteSendStream
.. autodata:: anyio.abc.AnyByteStream
.. autodata:: anyio.abc.AnyByteStreamConnectable
.. autoclass:: anyio.streams.buffered.BufferedByteReceiveStream
.. autoclass:: anyio.streams.buffered.BufferedByteStream
.. autoclass:: anyio.streams.file.FileStreamAttribute
.. autoclass:: anyio.streams.file.FileReadStream
.. autoclass:: anyio.streams.file.FileWriteStream
.. autoclass:: anyio.streams.memory.MemoryObjectReceiveStream
.. autoclass:: anyio.streams.memory.MemoryObjectSendStream
.. autoclass:: anyio.streams.memory.MemoryObjectStreamStatistics
.. autoclass:: anyio.streams.stapled.MultiListener
.. autoclass:: anyio.streams.stapled.StapledByteStream
.. autoclass:: anyio.streams.stapled.StapledObjectStream
.. autoclass:: anyio.streams.text.TextReceiveStream
.. autoclass:: anyio.streams.text.TextSendStream
.. autoclass:: anyio.streams.text.TextStream
.. autoclass:: anyio.streams.text.TextConnectable
.. autoclass:: anyio.streams.tls.TLSAttribute
.. autoclass:: anyio.streams.tls.TLSStream
.. autoclass:: anyio.streams.tls.TLSListener
.. autoclass:: anyio.streams.tls.TLSConnectable
Sockets and networking
----------------------
.. autofunction:: anyio.as_connectable
.. autofunction:: anyio.connect_tcp
.. autofunction:: anyio.connect_unix
.. autofunction:: anyio.create_tcp_listener
.. autofunction:: anyio.create_unix_listener
.. autofunction:: anyio.create_udp_socket
.. autofunction:: anyio.create_connected_udp_socket
.. autofunction:: anyio.getaddrinfo
.. autofunction:: anyio.getnameinfo
.. autofunction:: anyio.wait_readable
.. autofunction:: anyio.wait_socket_readable
.. autofunction:: anyio.wait_socket_writable
.. autofunction:: anyio.wait_writable
.. autoclass:: anyio.abc.SocketAttribute
.. autoclass:: anyio.abc.SocketStream()
.. autoclass:: anyio.abc.SocketListener()
.. autoclass:: anyio.abc.UDPSocket()
.. autoclass:: anyio.abc.ConnectedUDPSocket()
.. autoclass:: anyio.abc.UNIXSocketStream()
.. autoclass:: anyio.abc.UNIXDatagramSocket()
.. autoclass:: anyio.abc.ConnectedUNIXDatagramSocket()
.. autoclass:: anyio.TCPConnectable
.. autoclass:: anyio.UNIXConnectable
Subprocesses
------------
.. autofunction:: anyio.run_process
.. autofunction:: anyio.open_process
.. autoclass:: anyio.abc.Process
Synchronization
---------------
.. autoclass:: anyio.Event
.. autoclass:: anyio.Lock
.. autoclass:: anyio.Condition
.. autoclass:: anyio.Semaphore
.. autoclass:: anyio.CapacityLimiter
.. autoclass:: anyio.ResourceGuard
.. autoclass:: anyio.LockStatistics
.. autoclass:: anyio.EventStatistics
.. autoclass:: anyio.ConditionStatistics
.. autoclass:: anyio.CapacityLimiterStatistics
.. autoclass:: anyio.SemaphoreStatistics
Operating system signals
------------------------
.. autofunction:: anyio.open_signal_receiver
Low level operations
--------------------
.. autofunction:: anyio.lowlevel.checkpoint
.. autofunction:: anyio.lowlevel.checkpoint_if_cancelled
.. autofunction:: anyio.lowlevel.cancel_shielded_checkpoint
.. autofunction:: anyio.lowlevel.current_token
.. autoclass:: anyio.lowlevel.RunVar
.. autoclass:: anyio.lowlevel.EventLoopToken
Testing and debugging
---------------------
.. autoclass:: anyio.TaskInfo
.. autoclass:: anyio.pytest_plugin.FreePortFactory
.. autofunction:: anyio.get_current_task
.. autofunction:: anyio.get_running_tasks
.. autofunction:: anyio.wait_all_tasks_blocked
Exceptions
----------
.. autoexception:: anyio.BrokenResourceError
.. autoexception:: anyio.BrokenWorkerInterpreter
.. autoexception:: anyio.BrokenWorkerProcess
.. autoexception:: anyio.BusyResourceError
.. autoexception:: anyio.ClosedResourceError
.. autoexception:: anyio.ConnectionFailed
.. autoexception:: anyio.DelimiterNotFound
.. autoexception:: anyio.EndOfStream
.. autoexception:: anyio.IncompleteRead
.. autoexception:: anyio.NoEventLoopError
.. autoexception:: anyio.RunFinishedError
.. autoexception:: anyio.TypedAttributeLookupError
.. autoexception:: anyio.WouldBlock
anyio-4.11.0/docs/basics.rst 0000664 0000000 0000000 00000006335 15064462627 0015665 0 ustar 00root root 0000000 0000000 The basics
==========
.. py:currentmodule:: anyio
AnyIO requires Python 3.8 or later to run. It is recommended that you set up a
virtualenv_ when developing or playing around with AnyIO.
Installation
------------
To install AnyIO, run:
.. code-block:: bash
pip install anyio
To install a supported version of Trio_, you can install it as an extra like this:
.. code-block:: bash
pip install anyio[trio]
Running async programs
----------------------
The simplest possible AnyIO program looks like this::
from anyio import run
async def main():
print('Hello, world!')
run(main)
This will run the program above on the default backend (asyncio). To explicitly specify
which backend to run on, you can use the ``backend`` argument, like so::
run(main, backend='trio')
run(main, backend='asyncio')
But AnyIO code is not required to be run via :func:`run`. You can just as well use the
native ``run()`` function of the backend library::
import sniffio
import trio
from anyio import sleep
async def main():
print('Hello')
await sleep(1)
print("I'm running on", sniffio.current_async_library())
trio.run(main)
.. versionchanged:: 4.0.0
On the ``asyncio`` backend, ``anyio.run()`` now uses a back-ported version of
:class:`asyncio.Runner` on Pythons older than 3.11.
.. _backend options:
Backend specific options
------------------------
Any options exclusive to a specific backend can be passed with the ``backend_options``
argument to :func:`run`::
run(main, backend="asyncio", backend_options={"debug": True})
run(
main,
backend="trio",
backend_options={"restrict_keyboard_interrupt_to_checkpoints": True}
)
Here is the list of supported options for each backend:
**Asyncio**:
* options covered in the documentation of :class:`asyncio.Runner`
* ``use_uvloop`` (``bool``, default=False): Use the faster uvloop_ event loop
implementation, if available (this is a shorthand for passing
``loop_factory=uvloop.new_event_loop``, and is ignored if ``loop_factory`` is passed
a value other than ``None``)
**Trio**: options covered in the
`official documentation
`_
.. versionchanged:: 3.2.0
The default value of ``use_uvloop`` was changed to ``False``.
.. versionchanged:: 4.0.0
The ``policy`` option was replaced with ``loop_factory``.
.. _uvloop: https://pypi.org/project/uvloop/
Using native async libraries
----------------------------
AnyIO lets you mix and match code written for AnyIO and code written for the
asynchronous framework of your choice. There are a few rules to keep in mind however:
* You can only use "native" libraries for the backend you're running, so you cannot, for
example, use a library written for Trio_ together with a library written for asyncio.
* Tasks spawned by these "native" libraries on backends other than Trio_ are not subject
to the cancellation rules enforced by AnyIO
* Threads spawned outside of AnyIO cannot use :func:`.from_thread.run` to call
asynchronous code
.. seealso:: :ref:`asyncio cancellation`
.. _virtualenv: https://docs.python-guide.org/dev/virtualenvs/
.. _Trio: https://github.com/python-trio/trio
anyio-4.11.0/docs/cancellation.rst 0000664 0000000 0000000 00000026735 15064462627 0017063 0 ustar 00root root 0000000 0000000 Cancellation and timeouts
=========================
.. py:currentmodule:: anyio
The ability to cancel tasks is the foremost advantage of the asynchronous programming
model. Threads, on the other hand, cannot be forcibly killed and shutting them down will
require perfect cooperation from the code running in them.
Cancellation in AnyIO follows the model established by the Trio_ framework. This means
that cancellation of tasks is done via so called *cancel scopes*. Cancel scopes are used
as context managers and can be nested. Cancelling a cancel scope cancels all cancel
scopes nested within it. If a task is waiting on something, it is cancelled immediately.
If the task is just starting, it will run until it first tries to run an operation
requiring waiting, such as :func:`~sleep`.
A task group contains its own cancel scope. The entire task group can be cancelled by
cancelling this scope::
from anyio import create_task_group, get_cancelled_exc_class, sleep, run
async def waiter(index: int):
try:
await sleep(1)
except get_cancelled_exc_class():
print(f"Waiter {index} cancelled")
raise
async def taskfunc():
async with create_task_group() as tg:
# Start a couple tasks and wait until they are blocked
tg.start_soon(waiter, 1)
tg.start_soon(waiter, 2)
await sleep(0.1)
# Cancel the scope and exit the task group
tg.cancel_scope.cancel()
run(taskfunc)
# Output:
# Waiter 1 cancelled
# Waiter 2 cancelled
.. _Trio: https://trio.readthedocs.io/en/latest/reference-core.html
#cancellation-and-timeouts
.. _asyncio cancellation:
Differences between asyncio and AnyIO cancellation semantics
------------------------------------------------------------
Asyncio employs a type of cancellation called *edge cancellation*. This means that when
a task is cancelled, a :exc:`~asyncio.CancelledError` is raised in the task and the task
then gets to handle it however it likes, even opting to ignore it entirely. In contrast,
tasks that either explicitly use a cancel scope, or are spawned from an AnyIO task
group, use *level cancellation*. This means that as long as a task remains within an
*effectively cancelled* cancel scope, it will get hit with a cancellation exception any
time it hits a *yield point* (usually by awaiting something, or through
``async with ...`` or ``async for ...``).
This can cause difficulties when running code written for asyncio that does not expect
to get cancelled repeatedly. For example, :class:`asyncio.Condition` was written in such
a way that it suppresses cancellation exceptions until it is able to reacquire the
underlying lock. This can lead to a busy-wait_ loop that needlessly consumes a lot of
CPU time.
.. _busy-wait: https://en.wikipedia.org/wiki/Busy_waiting
Timeouts
--------
Networked operations can often take a long time, and you usually want to set up some
kind of a timeout to ensure that your application doesn't stall forever. There are two
principal ways to do this: :func:`~move_on_after` and :func:`~fail_after`. Both are used
as synchronous context managers. The difference between these two is that the former
simply exits the context block prematurely on a timeout, while the other raises a
:exc:`TimeoutError`.
Both methods create a new cancel scope, and you can check the deadline by accessing the
:attr:`~.CancelScope.deadline` attribute. Note, however, that an outer cancel scope
may have an earlier deadline than your current cancel scope. To check the actual
deadline, you can use the :func:`~current_effective_deadline` function.
Here's how you typically use timeouts::
from anyio import create_task_group, move_on_after, sleep, run
async def main():
async with create_task_group() as tg:
with move_on_after(1) as scope:
print('Starting sleep')
await sleep(2)
print('This should never be printed')
# The cancelled_caught property will be True if timeout was reached
print('Exited cancel scope, cancelled =', scope.cancelled_caught)
run(main)
.. note:: It's recommended not to directly cancel a scope from :func:`~fail_after`, as
that may currently result in :exc:`TimeoutError` being erroneously raised if exiting
the scope is delayed long enough for the deadline to be exceeded.
Shielding
---------
There are cases where you want to shield your task from cancellation, at least
temporarily. The most important such use case is performing shutdown procedures on
asynchronous resources.
To accomplish this, open a new cancel scope with the ``shield=True`` argument::
from anyio import CancelScope, create_task_group, sleep, run
async def external_task():
print('Started sleeping in the external task')
await sleep(1)
print('This line should never be seen')
async def main():
async with create_task_group() as tg:
with CancelScope(shield=True) as scope:
tg.start_soon(external_task)
tg.cancel_scope.cancel()
print('Started sleeping in the host task')
await sleep(1)
print('Finished sleeping in the host task')
run(main)
The shielded block will be exempt from cancellation except when the shielded block
itself is being cancelled. Shielding a cancel scope is often best combined with
:func:`~move_on_after` or :func:`~fail_after`, both of which also accept
``shield=True``::
async def do_something(resource):
try:
...
except BaseException:
# Here we wait 10 seconds for resource.aclose() to complete,
# but if the operation doesn't complete within that period, we move on
# and re-raise the caught exception anyway
with move_on_after(10, shield=True):
await resource.aclose()
raise
run(main)
.. _finalization:
Finalization
------------
Sometimes you may want to perform cleanup operations in response to the failure of the
operation::
async def do_something():
try:
await run_async_stuff()
except BaseException:
# (perform cleanup)
raise
In some specific cases, you might only want to catch the cancellation exception. This is
tricky because each async framework has its own exception class for that and AnyIO
cannot control which exception is raised in the task when it's cancelled. To work around
that, AnyIO provides a way to retrieve the exception class specific to the currently
running async framework, using :func:`~get_cancelled_exc_class`::
from anyio import get_cancelled_exc_class
async def do_something():
try:
await run_async_stuff()
except get_cancelled_exc_class():
# (perform cleanup)
raise
.. warning:: Always reraise the cancellation exception if you catch it. Failing to do so
may cause undefined behavior in your application.
If you need to use ``await`` during finalization, you need to enclose it in a shielded
cancel scope, or the operation will be cancelled immediately since it's in an already
cancelled scope::
async def do_something():
try:
await run_async_stuff()
except get_cancelled_exc_class():
with CancelScope(shield=True):
await some_cleanup_function()
raise
Specifying the reason for cancellation
--------------------------------------
To help with debugging, it is possible to specify a reason why you're cancelling a
cancel scope::
async def do_something():
with CancelScope() as scope:
scope.cancel("Testing cancellation")
try:
await sleep(1)
except get_cancelled_exc_class() as exc:
print(exc) # Print the cancellation message
raise # Always re-raise cancellation exceptions!
raise
While the exact resulting message from the cancellation exception varies by the event
loop implementation, it will contain at least the following pieces of information:
* The cancellation reason (if one was given)
* The task name where :meth:`CancelScope.cancel` was called (if cancelled from a task)
.. note:: Calling :meth:`~CancelScope.cancel` on an already cancelled scope will not
change the cancel message.
.. _cancel_scope_stack_corruption:
Avoiding cancel scope stack corruption
--------------------------------------
When using cancel scopes, it is important that they are entered and exited in LIFO (last
in, first out) order within each task. This is usually not an issue since cancel scopes
are normally used as context managers. However, in certain situations, cancel scope
stack corruption might still occur:
* Manually calling ``CancelScope.__enter__()`` and ``CancelScope.__exit__()``, usually
from another context manager class, in the wrong order
* Using cancel scopes with ``[Async]ExitStack`` in a manner that couldn't be achieved by
nesting them as context managers
* Using the low level coroutine protocol to execute parts of the coroutine function in
different cancel scopes
* Yielding in an async generator while enclosed in a cancel scope
Remember that task groups contain their own cancel scopes so the same list of risky
situations applies to them too.
As an example, the following code is highly dubious::
# Bad!
async def some_generator():
async with create_task_group() as tg:
tg.start_soon(foo)
yield
The problem with this code is that it violates structural concurrency: what happens if
the spawned task raises an exception? The host task would be cancelled as a result, but
the host task might be long gone by the time that happens. Even if it weren't, any
enclosing ``try...except`` in the generator would not be triggered. Unfortunately there
is currently no way to automatically detect this condition in AnyIO, so in practice you
may simply experience some weird behavior in your application as a consequence of
running code like above.
Depending on how they are used, this pattern is, however, *usually* safe to use in
asynchronous context managers, so long as you make sure that the same host task keeps
running throughout the entire enclosed code block::
from contextlib import asynccontextmanager
# Okay in most cases!
@asynccontextmanager
async def some_context_manager():
async with create_task_group() as tg:
tg.start_soon(foo)
yield
Prior to AnyIO 3.6, this usage pattern was also invalid in pytest's asynchronous
generator fixtures. Starting from 3.6, however, each async generator fixture is run from
start to end in the same task, making it possible to have task groups or cancel scopes
safely straddle the ``yield``.
When you're implementing the async context manager protocol manually and your async
context manager needs to use other context managers, you may find it convenient to use
:class:`AsyncContextManagerMixin` in order to avoid cumbersome code that calls
``__aenter__()`` and ``__aexit__()`` directly::
from __future__ import annotations
from collections.abc import AsyncGenerator
from typing import Self
from anyio import AsyncContextManagerMixin, create_task_group
class MyAsyncContextManager(AsyncContextManagerMixin):
@asynccontextmanager
async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
async with create_task_group() as tg:
... # launch tasks
yield self
.. seealso:: :doc:`contextmanagers`
anyio-4.11.0/docs/conf.py 0000664 0000000 0000000 00000001601 15064462627 0015155 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
from __future__ import annotations
from importlib.metadata import version as get_version
from packaging.version import parse
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx_tabs.tabs",
"sphinx_autodoc_typehints",
"sphinx_rtd_theme",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "AnyIO"
author = "Alex Grönholm"
copyright = "2018, " + author
v = parse(get_version("anyio"))
version = v.base_version
release = v.public
language = "en"
exclude_patterns = ["_build"]
pygments_style = "sphinx"
autodoc_default_options = {"members": True, "show-inheritance": True}
autodoc_mock_imports = ["_typeshed", "pytest", "_pytest"]
todo_include_todos = False
html_theme = "sphinx_rtd_theme"
htmlhelp_basename = "anyiodoc"
intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
anyio-4.11.0/docs/contextmanagers.rst 0000664 0000000 0000000 00000013465 15064462627 0017625 0 ustar 00root root 0000000 0000000 Context manager mix-in classes
==============================
.. py:currentmodule:: anyio
Python classes that want to offer context management functionality normally implement
``__enter__()`` and ``__exit__()`` (for synchronous context managers) or
``__aenter__()`` and ``__aexit__()`` (for asynchronous context managers). While this
offers precise control and re-entrancy support, embedding _other_ context managers in
this logic can be very error prone. To make this easier, AnyIO provides two context
manager mix-in classes, :class:`ContextManagerMixin` and
:class:`AsyncContextManagerMixin`. These classes provide implementations of
``__enter__()``, ``__exit__()`` or ``__aenter__()``, and ``__aexit__()``, that provide
another way to implement context managers similar to
:func:`@contextmanager ` and
:func:`@asynccontextmanager ` - a generator-based
approach where the ``yield`` statement signals that the context has been entered.
Here's a trivial example of how to use the mix-in classes:
.. tabs::
.. code-tab:: python Synchronous
from collections.abc import Generator
from contextlib import contextmanager
from typing import Self
from anyio import ContextManagerMixin
class MyContextManager(ContextManagerMixin):
@contextmanager
def __contextmanager__(self) -> Generator[Self]:
print("entering context")
yield self
print("exiting context")
.. code-tab:: python Asynchronous
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager
from typing import Self
from anyio import AsyncContextManagerMixin
class MyAsyncContextManager(AsyncContextManagerMixin):
@asynccontextmanager
async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
print("entering context")
yield self
print("exiting context")
When should I use the contextmanager mix-in classes?
----------------------------------------------------
When embedding other context managers, a common mistake is forgetting about error
handling when entering the context. Consider this example::
from typing import Self
from anyio import create_task_group
class MyBrokenContextManager:
async def __aenter__(self) -> Self:
self._task_group = await create_task_group().__aenter__()
# BOOM: missing the "arg" argument here to my_background_func!
self._task_group.start_soon(self.my_background_func)
return self
async def __aexit__(self, exc_type, exc_value, traceback) -> bool | None:
return await self._task_group.__aexit__(exc_type, exc_value, traceback)
async my_background_func(self, arg: int) -> None:
...
It's easy to think that you have everything covered with ``__aexit__()`` here, but what
if something goes wrong in ``__aenter__()``? The ``__aexit__()`` method will never be
called.
The mix-in classes solve this problem by providing a robust implementation of
``__enter__()``/``__exit__()`` or ``__aenter__()``/``__aexit__()`` that handles errors
correctly. Thus, the above code should be written as::
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager
from typing import Self
from anyio import AsyncContextManagerMixin, create_task_group
class MyBetterContextManager(AsyncContextManagerMixin):
@asynccontextmanager
async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
async with create_task_group() as task_group:
# Still crashes, but at least now the task group is exited
task_group.start_soon(self.my_background_func)
yield self
async my_background_func(self, arg: int) -> None:
...
.. seealso:: :ref:`cancel_scope_stack_corruption`
Inheriting context manager classes
----------------------------------
Here's how you would call the superclass implementation from a subclass:
.. tabs::
.. code-tab:: python Synchronous
from collections.abc import Generator
from contextlib import contextmanager
from typing import Self
from anyio import ContextManagerMixin
class SuperclassContextManager(ContextManagerMixin):
@contextmanager
def __contextmanager__(self) -> Generator[Self]:
print("superclass entered")
try:
yield self
finally:
print("superclass exited")
class SubclassContextManager(SuperclassContextManager):
@contextmanager
def __contextmanager__(self) -> Generator[Self]:
print("subclass entered")
try:
with super().__contextmanager__():
yield self
finally:
print("subclass exited")
.. code-tab:: python Asynchronous
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager
from typing import Self
from anyio import AsyncContextManagerMixin
class SuperclassContextManager(AsyncContextManagerMixin):
@asynccontextmanager
async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
print("superclass entered")
try:
yield self
finally:
print("superclass exited")
class SubclassContextManager(SuperclassContextManager):
@asynccontextmanager
async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
print("subclass entered")
try:
async with super().__asynccontextmanager__():
yield self
finally:
print("subclass exited")
anyio-4.11.0/docs/contributing.rst 0000664 0000000 0000000 00000004512 15064462627 0017123 0 ustar 00root root 0000000 0000000 Contributing to AnyIO
=====================
If you wish to contribute a fix or feature to AnyIO, please follow the following
guidelines.
When you make a pull request against the main AnyIO codebase, Github runs the AnyIO test
suite against your modified code. Before making a pull request, you should ensure that
the modified code passes tests locally. To that end, the use of tox_ is recommended. The
default tox run first runs ``pre-commit`` and then the actual test suite. To run the
checks on all environments in parallel, invoke tox with ``tox -p``.
To build the documentation, run ``tox -e docs`` which will generate a directory named
``build`` in which you may view the formatted HTML documentation.
AnyIO uses pre-commit_ to perform several code style/quality checks. It is recommended
to activate pre-commit_ on your local clone of the repository (using
``pre-commit install``) to ensure that your changes will pass the same checks on GitHub.
.. _tox: https://tox.readthedocs.io/en/latest/install.html
.. _pre-commit: https://pre-commit.com/#installation
Making a pull request on Github
-------------------------------
To get your changes merged to the main codebase, you need a Github account.
#. Fork the repository (if you don't have your own fork of it yet) by navigating to the
`main AnyIO repository`_ and clicking on "Fork" near the top right corner.
#. Clone the forked repository to your local machine with
``git clone git@github.com/yourusername/anyio``.
#. Create a branch for your pull request, like ``git checkout -b myfixname``
#. Make the desired changes to the code base.
#. Commit your changes locally. If your changes close an existing issue, add the text
``Fixes XXX.`` or ``Closes XXX.`` to the commit message (where XXX is the issue
number).
#. Push the changeset(s) to your forked repository (``git push``)
#. Navigate to Pull requests page on the original repository (not your fork) and click
"New pull request"
#. Click on the text "compare across forks".
#. Select your own fork as the head repository and then select the correct branch name.
#. Click on "Create pull request".
If you have trouble, consult the `pull request making guide`_ on opensource.com.
.. _main AnyIO repository: https://github.com/agronholm/anyio
.. _pull request making guide:
https://opensource.com/article/19/7/create-pull-request-github
anyio-4.11.0/docs/faq.rst 0000664 0000000 0000000 00000002563 15064462627 0015167 0 ustar 00root root 0000000 0000000 Frequently Asked Questions
==========================
Why is Curio not supported as a backend?
----------------------------------------
Curio_ was supported in AnyIO before v3.0. Support for it was dropped for two reasons:
#. Its interface allowed only coroutine functions to access the Curio_ kernel. This
forced AnyIO to follow suit in its own API design, making it difficult to adapt
existing applications that relied on synchronous callbacks to use AnyIO. It also
interfered with the goal of matching Trio's API in functions with the same purpose
(e.g. ``Event.set()``).
#. The maintainer specifically requested Curio_ support to be removed from AnyIO
(`issue 185 `_).
.. _Curio: https://github.com/dabeaz/curio
Why is Twisted not supported as a backend?
------------------------------------------
The minimum requirement to support Twisted_ would be for sniffio_ to be able to detect a
running Twisted event loop (and be able to tell when Twisted_ is being run on top of its
asyncio reactor). This is not currently supported in sniffio_, so AnyIO cannot support
Twisted either.
There is a Twisted `issue `_ that you can
follow if you're interested in Twisted support in AnyIO.
.. _Twisted: https://twistedmatrix.com/trac/
.. _sniffio: https://github.com/python-trio/sniffio
anyio-4.11.0/docs/fileio.rst 0000664 0000000 0000000 00000005015 15064462627 0015662 0 ustar 00root root 0000000 0000000 Asynchronous file I/O support
=============================
.. py:currentmodule:: anyio
AnyIO provides asynchronous wrappers for blocking file operations. These wrappers run
blocking operations in worker threads.
Example::
from anyio import open_file, run
async def main():
async with await open_file('/some/path/somewhere') as f:
contents = await f.read()
print(contents)
run(main)
The wrappers also support asynchronous iteration of the file line by line, just as the
standard file objects support synchronous iteration::
from anyio import open_file, run
async def main():
async with await open_file('/some/path/somewhere') as f:
async for line in f:
print(line, end='')
run(main)
To wrap an existing open file object as an asynchronous file, you can use
:func:`.wrap_file`::
from anyio import wrap_file, run
async def main():
with open('/some/path/somewhere') as f:
async for line in wrap_file(f):
print(line, end='')
run(main)
.. note:: Closing the wrapper also closes the underlying synchronous file object.
.. seealso:: :ref:`FileStreams`
Asynchronous path operations
----------------------------
AnyIO provides an asynchronous version of the :class:`pathlib.Path` class. It differs
with the original in a number of ways:
* Operations that perform disk I/O (like :meth:`~pathlib.Path.read_bytes`) are run in a
worker thread and thus require an ``await``
* Methods like :meth:`~pathlib.Path.glob` return an asynchronous iterator that yields
asynchronous :class:`~.Path` objects
* Properties and methods that normally return :class:`pathlib.Path` objects return
:class:`~.Path` objects instead
* Methods and properties from the Python 3.10 API are available on all versions
* Use as a context manager is not supported, as it is deprecated in pathlib
For example, to create a file with binary content::
from anyio import Path, run
async def main():
path = Path('/foo/bar')
await path.write_bytes(b'hello, world')
run(main)
Asynchronously iterating a directory contents can be done as follows::
from anyio import Path, run
async def main():
# Print the contents of every file (assumed to be text) in the directory /foo/bar
dir_path = Path('/foo/bar')
async for path in dir_path.iterdir():
if await path.is_file():
print(await path.read_text())
print('---------------------')
run(main)
anyio-4.11.0/docs/index.rst 0000664 0000000 0000000 00000000567 15064462627 0015531 0 ustar 00root root 0000000 0000000 AnyIO
=====
.. include:: ../README.rst
The manual
----------
.. toctree::
:maxdepth: 2
basics
tasks
cancellation
synchronization
streams
typedattrs
networking
threads
subprocesses
subinterpreters
fileio
tempfile
signals
contextmanagers
testing
api
migration
why
faq
support
contributing
versionhistory
anyio-4.11.0/docs/migration.rst 0000664 0000000 0000000 00000024700 15064462627 0016406 0 ustar 00root root 0000000 0000000 Migrating from AnyIO 3 to AnyIO 4
=================================
.. py:currentmodule:: anyio
The non-standard exception group class was removed
--------------------------------------------------
AnyIO 3 had its own ``ExceptionGroup`` class which predated the :pep:`654` exception
group classes. This class has now been removed in favor of the built-in
:exc:`BaseExceptionGroup` and :exc:`ExceptionGroup` classes. If your code was either
raising the old ``ExceptionGroup`` exception or catching it, you need to make the switch
to these standard classes. Otherwise you can ignore this part.
If you're targeting Python releases older than 3.11, you need to use the exceptiongroup_
backport and import one of those classes from ``exceptiongroup``. The only difference
between :exc:`BaseExceptionGroup` and :exc:`ExceptionGroup` is that the latter can
only contain exceptions derived from :exc:`Exception`, and likewise can be caught with
``except Exception:``.
Task groups now wrap single exceptions in groups
------------------------------------------------
The most prominent backwards incompatible change in AnyIO 4 was that task groups now
always raise exception groups when either the host task or any child tasks raise an
exception (other than a cancellation exception). Previously, an exception group was only
raised when more than one exception needed to be raised from the task group. The
practical consequence is that if your code previously expected to catch a specific kind
of exception falling out of a task group, you now need to either switch to the
``except*`` syntax (if you're fortunate enough to work solely with Python 3.11 or
later), or use the ``catch()`` context manager from the exceptiongroup_ backport.
So, if you had code like this::
try:
await function_using_a_taskgroup()
except ValueError as exc:
...
The Python 3.11+ equivalent would look almost the same::
try:
await function_using_a_taskgroup()
except* ValueError as excgrp:
# Note: excgrp is an ExceptionGroup now!
...
If you need to stay compatible with older Python releases, you need to use the
backport::
from exceptiongroup import ExceptionGroup, catch
def handle_value_errors(excgrp: ExceptionGroup) -> None:
...
with catch({ValueError: handle_value_errors}):
await function_using_a_taskgroup()
This difference often comes up in test suites too. For example, if you had this before
in a pytest-based test suite::
with pytest.raises(ValueError):
await function_using_a_taskgroup()
You now need to change it to::
from exceptiongroup import ExceptionGroup
with pytest.raises(ExceptionGroup) as exc:
await function_using_a_taskgroup()
assert len(exc.value.exceptions) == 1
assert isinstance(exc.value.exceptions[0], ValueError)
If you need to stay compatible with both AnyIO 3 and 4, you can use the following
compatibility code to "collapse" single-exception groups by unwrapping them::
import sys
from contextlib import contextmanager
from typing import Generator
has_exceptiongroups = True
if sys.version_info < (3, 11):
try:
from exceptiongroup import BaseExceptionGroup
except ImportError:
has_exceptiongroups = False
@contextmanager
def collapse_excgroups() -> Generator[None, None, None]:
try:
yield
except BaseException as exc:
if has_exceptiongroups:
while isinstance(exc, BaseExceptionGroup) and len(exc.exceptions) == 1:
exc = exc.exceptions[0]
raise exc
Syntax for type annotated memory object streams has changed
-----------------------------------------------------------
Where previously, creating type annotated memory object streams worked by passing the
desired type as the second argument::
send, receive = create_memory_object_stream(100, int)
In 4.0, :class:`create_memory_object_stream() ` is a class
masquerading as a function, so you need to parametrize it::
send, receive = create_memory_object_stream[int](100)
If you didn't parametrize your memory object streams before, then you don't need to make
any changes in this regard.
Event loop factories instead of event loop policies
----------------------------------------------------
If you're using a custom asyncio event loop policy with :func:`run`, you need to switch
to passing an *event loop factory*, that is, a callable that returns a new event loop.
Using uvloop_ as an example, code like the following::
anyio.run(main, backend_options={"event_loop_policy": uvloop.EventLoopPolicy()})
should be converted into::
anyio.run(main, backend_options={"loop_factory": uvloop.new_event_loop})
Make sure not to actually call the factory function!
.. _exceptiongroup: https://pypi.org/project/exceptiongroup/
.. _uvloop: https://github.com/MagicStack/uvloop
Migrating from AnyIO 2 to AnyIO 3
=================================
AnyIO 3 changed some functions and methods in a way that needs some adaptation in your
code. All deprecated functions and methods will be removed in AnyIO 4.
Asynchronous functions converted to synchronous
-----------------------------------------------
AnyIO 3 changed several previously asynchronous functions and methods into regular ones
for two reasons:
#. to better serve use cases where synchronous callbacks are used by third party
libraries
#. to better match the API of Trio_
The following functions and methods were changed:
* :func:`current_time`
* :func:`current_effective_deadline`
* :meth:`CancelScope.cancel() <.CancelScope.cancel>`
* :meth:`CapacityLimiter.acquire_nowait`
* :meth:`CapacityLimiter.acquire_on_behalf_of_nowait`
* :meth:`Condition.release`
* :meth:`Event.set`
* :func:`get_current_task`
* :func:`get_running_tasks`
* :meth:`Lock.release`
* :meth:`MemoryObjectReceiveStream.receive_nowait()
<.streams.memory.MemoryObjectReceiveStream.receive_nowait>`
* :meth:`MemoryObjectSendStream.send_nowait()
<.streams.memory.MemoryObjectSendStream.send_nowait>`
* :func:`open_signal_receiver`
* :meth:`Semaphore.release`
When migrating to AnyIO 3, simply remove the ``await`` from each call to these.
.. note:: For backwards compatibility reasons, :func:`current_time`,
:func:`current_effective_deadline` and :func:`get_running_tasks` return objects which
are awaitable versions of their original types (:class:`float` and :class:`list`,
respectively). These awaitable versions are subclasses of the original types so they
should behave as their originals, but if you absolutely need the pristine original
types, you can either use ``maybe_async`` or ``float()`` / ``list()`` on the returned
value as appropriate.
The following async context managers changed to regular context managers:
* :func:`fail_after`
* :func:`move_on_after`
* ``open_cancel_scope()`` (now just ``CancelScope()``)
When migrating, just change ``async with`` into a plain ``with``.
With the exception of
:meth:`MemoryObjectReceiveStream.receive_nowait()
<.streams.memory.MemoryObjectReceiveStream.receive_nowait>`,
all of them can still be used like before – they will raise :exc:`DeprecationWarning`
when used this way on AnyIO 3, however.
If you're writing a library that needs to be compatible with both major releases, you
will need to use the compatibility functions added in AnyIO 2.2: ``maybe_async()`` and
``maybe_async_cm()``. These will let you safely use functions/methods and context
managers (respectively) regardless of which major release is currently installed.
Example 1 – setting an event::
from anyio.abc import Event
from anyio import maybe_async
async def foo(event: Event):
await maybe_async(event.set())
...
Example 2 – opening a cancel scope::
from anyio import CancelScope, maybe_async_cm
async def foo():
async with maybe_async_cm(CancelScope()) as scope:
...
.. _Trio: https://github.com/python-trio/trio
Starting tasks
--------------
The ``TaskGroup.spawn()`` coroutine method has been deprecated in favor of the
synchronous method :meth:`.TaskGroup.start_soon` (which mirrors ``start_soon()`` in
Trio's nurseries). If you're fully migrating to AnyIO 3, simply switch to calling the
new method (and remove the ``await``).
If your code needs to work with both AnyIO 2 and 3, you can keep using
``TaskGroup.spawn()`` (until AnyIO 4) and suppress the deprecation warning::
import warnings
async def foo():
async with create_task_group() as tg:
with warnings.catch_warnings():
await tg.spawn(otherfunc)
Blocking portal changes
-----------------------
AnyIO now **requires** :func:`.from_thread.start_blocking_portal` to be used as a
context manager::
from anyio import sleep
from anyio.from_thread import start_blocking_portal
with start_blocking_portal() as portal:
portal.call(sleep, 1)
As with ``TaskGroup.spawn()``, the ``BlockingPortal.spawn_task()`` method has also been
renamed to :meth:`~from_thread.BlockingPortal.start_task_soon`, so as to be consistent
with task groups.
The ``create_blocking_portal()`` factory function was also deprecated in favor of
instantiating :class:`~from_thread.BlockingPortal` directly.
For code requiring cross compatibility, catching the deprecation warning (as above)
should work.
Synchronization primitives
--------------------------
Synchronization primitive factories (``create_event()`` etc.) were deprecated in favor
of instantiating the classes directly. So convert code like this::
from anyio import create_event
async def main():
event = create_event()
into this::
from anyio import Event
async def main():
event = Event()
or, if you need to work with both AnyIO 2 and 3::
try:
from anyio import Event
create_event = Event
except ImportError:
from anyio import create_event
from anyio.abc import Event
async def foo() -> Event:
return create_event()
Threading functions moved
-------------------------
Threading functions were restructured to submodules, following the example of Trio:
* ``current_default_worker_thread_limiter`` →
:func:`.to_thread.current_default_thread_limiter`
(NOTE: the function was renamed too!)
* ``run_sync_in_worker_thread()`` → :func:`.to_thread.run_sync`
* ``run_async_from_thread()`` → :func:`.from_thread.run`
* ``run_sync_from_thread()`` → :func:`.from_thread.run_sync`
The old versions are still in place but emit deprecation warnings when called.
anyio-4.11.0/docs/networking.rst 0000664 0000000 0000000 00000024477 15064462627 0016617 0 ustar 00root root 0000000 0000000 Using sockets and streams
=========================
.. py:currentmodule:: anyio
Networking capabilities are arguably the most important part of any asynchronous
library. AnyIO contains its own high level implementation of networking on top of low
level primitives offered by each of its supported backends.
Currently AnyIO offers the following networking functionality:
* TCP sockets (client + server)
* UNIX domain sockets (client + server)
* UDP sockets
* UNIX datagram sockets
More exotic forms of networking such as raw sockets and SCTP are currently not
supported.
.. warning:: Unlike the standard BSD sockets interface and most other networking
libraries, AnyIO (from 2.0 onwards) signals the end of any stream by raising the
:exc:`~EndOfStream` exception instead of returning an empty bytes object.
Working with TCP sockets
------------------------
TCP (Transmission Control Protocol) is the most commonly used protocol on the Internet.
It allows one to connect to a port on a remote host and send and receive data in a
reliable manner.
To connect to a listening TCP socket somewhere, you can use :func:`~connect_tcp`::
from anyio import connect_tcp, run
async def main():
async with await connect_tcp('hostname', 1234) as client:
await client.send(b'Client\n')
response = await client.receive()
print(response)
run(main)
As a convenience, you can also use :func:`~connect_tcp` to establish a TLS session with
the peer after connection, by passing ``tls=True`` or by passing a nonempty value for
either ``ssl_context`` or ``tls_hostname``.
To receive incoming TCP connections, you first create a TCP listener with
:func:`create_tcp_listener` and call :meth:`~.abc.Listener.serve` on it::
from anyio import create_tcp_listener, run
async def handle(client):
async with client:
name = await client.receive(1024)
await client.send(b'Hello, %s\n' % name)
async def main():
listener = await create_tcp_listener(local_port=1234)
await listener.serve(handle)
run(main)
See the section on :ref:`TLS` for more information.
Working with UNIX sockets
-------------------------
UNIX domain sockets are a form of interprocess communication on UNIX-like operating
systems. They cannot be used to connect to remote hosts and do not work on Windows.
The API for UNIX domain sockets is much like the one for TCP sockets, except that
instead of host/port combinations, you use file system paths.
This is what the client from the TCP example looks like when converted to use UNIX
sockets::
from anyio import connect_unix, run
async def main():
async with await connect_unix('/tmp/mysock') as client:
await client.send(b'Client\n')
response = await client.receive(1024)
print(response)
run(main)
And the listener::
from anyio import create_unix_listener, run
async def handle(client):
async with client:
name = await client.receive(1024)
await client.send(b'Hello, %s\n' % name)
async def main():
listener = await create_unix_listener('/tmp/mysock')
await listener.serve(handle)
run(main)
.. note:: The UNIX socket listener does not remove the socket it creates, so you may
need to delete them manually.
Sending and receiving file descriptors
++++++++++++++++++++++++++++++++++++++
UNIX sockets can be used to pass open file descriptors (sockets and files) to another
process. The receiving end can then use either :func:`os.fdopen` or
:class:`socket.socket` to get a usable file or socket object, respectively.
The following is an example where a client connects to a UNIX socket server and receives
the descriptor of a file opened on the server, reads the contents of the file and then
prints them on standard output.
Client::
import os
from anyio import connect_unix, run
async def main():
async with await connect_unix('/tmp/mysock') as client:
_, fds = await client.receive_fds(0, 1)
with os.fdopen(fds[0]) as file:
print(file.read())
run(main)
Server::
from pathlib import Path
from anyio import create_unix_listener, run
async def handle(client):
async with client:
with path.open('r') as file:
await client.send_fds(b'this message is ignored', [file])
async def main():
listener = await create_unix_listener('/tmp/mysock')
await listener.serve(handle)
path = Path('/tmp/examplefile')
path.write_text('Test file')
run(main)
Working with UDP sockets
------------------------
UDP (User Datagram Protocol) is a way of sending packets over the network without
features like connections, retries or error correction.
For example, if you wanted to create a UDP "hello" service that just reads a packet and
then sends a packet to the sender with the contents prepended with "Hello, ", you would
do this::
import socket
from anyio import create_udp_socket, run
async def main():
async with await create_udp_socket(
family=socket.AF_INET, local_port=1234
) as udp:
async for packet, (host, port) in udp:
await udp.sendto(b'Hello, ' + packet, host, port)
run(main)
.. note:: If you are testing on your local machine or don't know which family socket to
use, it is a good idea to replace ``family=socket.AF_INET`` by
``local_host='localhost'`` in the previous example.
If your use case involves sending lots of packets to a single destination, you can still
"connect" your UDP socket to a specific host and port to avoid having to pass the
address and port every time you send data to the peer::
from anyio import create_connected_udp_socket, run
async def main():
async with await create_connected_udp_socket(
remote_host='hostname', remote_port=1234) as udp:
await udp.send(b'Hi there!\n')
run(main)
Working with UNIX datagram sockets
----------------------------------
UNIX datagram sockets are a subset of UNIX domain sockets, with the difference being
that while UNIX sockets implement reliable communication of a continuous byte stream
(similarly to TCP), UNIX datagram sockets implement communication of data packets
(similarly to UDP).
The API for UNIX datagram sockets is modeled after the one for UDP sockets, except that
instead of host/port combinations, you use file system paths - here is the UDP "hello"
service example written with UNIX datagram sockets::
from anyio import create_unix_datagram_socket, run
async def main():
async with await create_unix_datagram_socket(
local_path='/tmp/mysock'
) as unix_dg:
async for packet, path in unix_dg:
await unix_dg.sendto(b'Hello, ' + packet, path)
run(main)
.. note:: If ``local_path`` is not set, the UNIX datagram socket will be bound on an
unnamed address, and will generally not be able to receive datagrams from other UNIX
datagram sockets.
Similarly to UDP sockets, if your case involves sending lots of packets to a single
destination, you can "connect" your UNIX datagram socket to a specific path to avoid
having to pass the path every time you send data to the peer::
from anyio import create_connected_unix_datagram_socket, run
async def main():
async with await create_connected_unix_datagram_socket(
remote_path='/dev/log'
) as unix_dg:
await unix_dg.send(b'Hi there!\n')
run(main)
Wrapping existing sockets as streams or listeners
-------------------------------------------------
In some cases, you might want to create a socket in third party code and wrap that as an
AnyIO stream or socket listener. For that, various class methods exist:
* :meth:`.abc.SocketListener.from_socket`
* :meth:`.abc.SocketStream.from_socket`
* :meth:`.abc.UNIXSocketStream.from_socket`
* :meth:`.abc.UDPSocket.from_socket`
* :meth:`.abc.ConnectedUDPSocket.from_socket`
* :meth:`.abc.UNIXDatagramSocket.from_socket`
* :meth:`.abc.ConnectedUNIXDatagramSocket.from_socket`
.. _connectables:
Abstracting remote connections using Connectables
-------------------------------------------------
AnyIO offers a hierarchy of classes implementing either the
:class:`.abc.ObjectStreamConnectable` or :class:`.abc.ByteStreamConnectable` interfaces
which lets developers abstract out the connection mechanism for network clients.
For example, you could create a network client class like this::
from os import PathLike
from ssl import SSLContext
from anyio.abc import ByteStreamConnectable, as_connectable
class MyNetworkClient:
def __init__(
self,
connectable: ByteStreamConnectable | tuple[str, int] | str | PathLike[str],
tls: bool | SSLContext = False
):
self.connectable = as_connectable(connectable, tls)
async def __aenter__(self):
# Connect to the remote and enter the stream's context manager
self._stream = await self.connectable.connect()
await self._stream.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
# Exit the stream's context manager, thus disconnecting it
await self._stream.__aexit__(exc_type, exc_val, exc_tb)
Here's a dissection of the type annotation for ``connectable``:
* :class:`.abc.ByteStreamConnectable`: allows for any arbitrary bytestream connectable
* ``tuple[str, int]``: TCP host/port
* ``str | bytes | PathLike[str]``: file system path to a UNIX socket
The :func:`as_connectable` function is a convenience that lets users instantiate your
client without the hassle of manually instantiating a connectable like
:class:`TCPConnectable` or :class:`UNIXConnectable`.
So why bother jumping through these extra hoops? Because it gives users the flexibility
of using more exotic transports, such as:
* Mock streams (for testing)
* Interceptor streams (for testing network delays, stalled connections, etc.)
* SOCKS proxies
* HTTP tunneling via ``CONNECT``
In particular, tunneling using AnyIO streams rather than external connectors has the
advantage of allowing passthrough of information such as the peer address via
:meth:`~TypedAttributeProvider.extra`.
In addition to that, it greatly simplifies adding support for TLS and transports other
than TCP.
anyio-4.11.0/docs/signals.rst 0000664 0000000 0000000 00000005015 15064462627 0016053 0 ustar 00root root 0000000 0000000 Receiving operating system signals
==================================
.. py:currentmodule:: anyio
You may occasionally find it useful to receive signals sent to your application in a
meaningful way. For example, when you receive a ``signal.SIGTERM`` signal, your
application is expected to shut down gracefully. Likewise, ``SIGHUP`` is often used as a
means to ask the application to reload its configuration.
AnyIO provides a simple mechanism for you to receive the signals you're interested in::
import signal
from anyio import open_signal_receiver, run
async def main():
with open_signal_receiver(signal.SIGTERM, signal.SIGHUP) as signals:
async for signum in signals:
if signum == signal.SIGTERM:
return
elif signum == signal.SIGHUP:
print('Reloading configuration')
run(main)
.. note:: Signal handlers can only be installed in the main thread, so they will not
work when the event loop is being run through :class:`~.from_thread.BlockingPortal`,
for instance.
.. note:: Windows does not natively support signals so do not rely on this in a cross
platform application.
Handling KeyboardInterrupt and SystemExit
-----------------------------------------
By default, different backends handle the Ctrl+C (or Ctrl+Break on Windows) key
combination and external termination (:exc:`KeyboardInterrupt` and :exc:`SystemExit`,
respectively) differently: Trio raises the relevant exception inside the application
while asyncio shuts down all the tasks and exits. If you need to do your own cleanup in
these situations, you will need to install a signal handler::
import signal
from anyio import open_signal_receiver, create_task_group, run
from anyio.abc import CancelScope
async def signal_handler(scope: CancelScope):
with open_signal_receiver(signal.SIGINT, signal.SIGTERM) as signals:
async for signum in signals:
if signum == signal.SIGINT:
print('Ctrl+C pressed!')
else:
print('Terminated!')
scope.cancel()
return
async def main():
async with create_task_group() as tg:
tg.start_soon(signal_handler, tg.cancel_scope)
... # proceed with starting the actual application logic
run(main)
.. note:: Windows does not support the :data:`~signal.SIGTERM` signal so if you need a
mechanism for graceful shutdown on Windows, you will have to find another way.
anyio-4.11.0/docs/streams.rst 0000664 0000000 0000000 00000031131 15064462627 0016067 0 ustar 00root root 0000000 0000000 Streams
=======
.. py:currentmodule:: anyio
A "stream" in AnyIO is a simple interface for transporting information from one place to
another. It can mean either in-process communication or sending data over a network.
AnyIO divides streams into two categories: byte streams and object streams.
Byte streams ("Streams" in Trio lingo) are objects that receive and/or send chunks of
bytes. They are modelled after the limitations of the stream sockets, meaning the
boundaries are not respected. In practice this means that if, for example, you call
``.send(b'hello ')`` and then ``.send(b'world')``, the other end will receive the data
chunked in any arbitrary way, like (``b'hello'`` and ``b' world'``), ``b'hello world'``
or (``b'hel'``, ``b'lo wo'``, ``b'rld'``).
Object streams ("Channels" in Trio lingo), on the other hand, deal with Python objects.
The most commonly used implementation of these is the memory object stream. The exact
semantics of object streams vary a lot by implementation.
Many stream implementations wrap other streams. Of these, some can wrap any
bytes-oriented streams, meaning ``ObjectStream[bytes]`` and ``ByteStream``. This enables
many interesting use cases.
.. _memory object streams:
Memory object streams
---------------------
Memory object streams are intended for implementing a producer-consumer pattern with
multiple tasks. Using :func:`~create_memory_object_stream`, you get a pair of object
streams: one for sending, one for receiving. They essentially work like queues, but with
support for closing and asynchronous iteration.
By default, memory object streams are created with a buffer size of 0. This means that
:meth:`~.streams.memory.MemoryObjectSendStream.send` will block until there's another
task that calls :meth:`~.streams.memory.MemoryObjectReceiveStream.receive`. You can set
the buffer size to a value of your choosing when creating the stream. It is also
possible to have an unbounded buffer by passing :data:`math.inf` as the buffer size but
this is not recommended.
Memory object streams can be cloned by calling the ``clone()`` method. Each clone can be
closed separately, but each end of the stream is only considered closed once all of its
clones have been closed. For example, if you have two clones of the receive stream, the
send stream will start raising :exc:`~BrokenResourceError` only when both receive
streams have been closed.
Multiple tasks can send and receive on the same memory object stream (or its clones) but
each sent item is only ever delivered to a single recipient.
The receive ends of memory object streams can be iterated using the async iteration
protocol. The loop exits when all clones of the send stream have been closed.
Example::
from anyio import create_task_group, create_memory_object_stream, run
from anyio.streams.memory import MemoryObjectReceiveStream
async def process_items(receive_stream: MemoryObjectReceiveStream[str]) -> None:
async with receive_stream:
async for item in receive_stream:
print('received', item)
async def main():
# The [str] specifies the type of the objects being passed through the
# memory object stream. This is a bit of trick, as create_memory_object_stream
# is actually a class masquerading as a function.
send_stream, receive_stream = create_memory_object_stream[str]()
async with create_task_group() as tg:
tg.start_soon(process_items, receive_stream)
async with send_stream:
for num in range(10):
await send_stream.send(f'number {num}')
run(main)
In contrast to other AnyIO streams (but in line with Trio's Channels), memory object
streams can be closed synchronously, using either the ``close()`` method or by using the
stream as a context manager::
from anyio.streams.memory import MemoryObjectSendStream
def synchronous_callback(send_stream: MemoryObjectSendStream[str]) -> None:
with send_stream:
send_stream.send_nowait('hello')
Stapled streams
---------------
A stapled stream combines any mutually compatible receive and send stream together,
forming a single bidirectional stream.
It comes in two variants:
* :class:`~.streams.stapled.StapledByteStream` (combines a
:class:`~.abc.ByteReceiveStream` with a :class:`~.abc.ByteSendStream`)
* :class:`~.streams.stapled.StapledObjectStream` (combines an
:class:`~.abc.ObjectReceiveStream` with a compatible :class:`~.abc.ObjectSendStream`)
Buffered byte streams
---------------------
A buffered byte stream wraps an existing bytes-oriented receive stream and provides
certain amenities that require buffering, such as receiving an exact number of bytes, or
receiving until the given delimiter is found.
Example::
from anyio import run, create_memory_object_stream
from anyio.streams.buffered import BufferedByteReceiveStream
async def main():
send, receive = create_memory_object_stream[bytes](4)
buffered = BufferedByteReceiveStream(receive)
for part in b'hel', b'lo, ', b'wo', b'rld!':
await send.send(part)
result = await buffered.receive_exactly(8)
print(repr(result))
result = await buffered.receive_until(b'!', 10)
print(repr(result))
run(main)
The above script gives the following output::
b'hello, w'
b'orld'
.. tip:: In some cases, you may need to inject data directly into the buffer. You can do
that with the :meth:`~.streams.buffered.BufferedByteReceiveStream.feed_data` method.
Text streams
------------
Text streams wrap existing receive/send streams and encode/decode strings to bytes and
vice versa.
Example::
from anyio import run, create_memory_object_stream
from anyio.streams.text import TextReceiveStream, TextSendStream
async def main():
bytes_send, bytes_receive = create_memory_object_stream[bytes](1)
text_send = TextSendStream(bytes_send)
await text_send.send('åäö')
result = await bytes_receive.receive()
print(repr(result))
text_receive = TextReceiveStream(bytes_receive)
await bytes_send.send(result)
result = await text_receive.receive()
print(repr(result))
run(main)
The above script gives the following output::
b'\xc3\xa5\xc3\xa4\xc3\xb6'
'åäö'
.. _FileStreams:
File streams
------------
File streams read from or write to files on the file system. They can be useful for
substituting a file for another source of data, or writing output to a file for logging
or debugging purposes.
Example::
from anyio import run
from anyio.streams.file import FileReadStream, FileWriteStream
async def main():
path = '/tmp/testfile'
async with await FileWriteStream.from_path(path) as stream:
await stream.send(b'Hello, World!')
async with await FileReadStream.from_path(path) as stream:
async for chunk in stream:
print(chunk.decode(), end='')
print()
run(main)
.. versionadded:: 3.0
.. _TLS:
TLS streams
-----------
TLS (Transport Layer Security), the successor to SSL (Secure Sockets Layer), is the
supported way of providing authenticity and confidentiality for TCP streams in AnyIO.
TLS is typically established right after the connection has been made. The handshake
involves the following steps:
* Sending the certificate to the peer (usually just by the server)
* Checking the peer certificate(s) against trusted CA certificates
* Checking that the peer host name matches the certificate
Obtaining a server certificate
******************************
There are three principal ways you can get an X.509 certificate for your server:
#. Create a self signed certificate
#. Use certbot_ or a similar software to automatically obtain certificates from
`Let's Encrypt`_
#. Buy one from a certificate vendor
The first option is probably the easiest, but this requires that any client
connecting to your server adds the self signed certificate to their list of trusted
certificates. This is of course impractical outside of local development and is strongly
discouraged in production use.
The second option is nowadays the recommended method, as long as you have an environment
where running certbot_ or similar software can automatically replace the certificate
with a newer one when necessary, and that you don't need any extra features like class 2
validation.
The third option may be your only valid choice when you have special requirements for
the certificate that only a certificate vendor can fulfill, or that automatically
renewing the certificates is not possible or practical in your environment.
.. _certbot: https://certbot.eff.org/
.. _Let's Encrypt: https://letsencrypt.org/
Using self signed certificates
******************************
To create a self signed certificate for ``localhost``, you can use the openssl_ command
line tool:
.. code-block:: bash
openssl req -x509 -newkey rsa:2048 -subj '/CN=localhost' -keyout key.pem -out cert.pem -nodes -days 365
This creates a (2048 bit) private RSA key (``key.pem``) and a certificate (``cert.pem``)
matching the host name "localhost". The certificate will be valid for one year with
these settings.
To set up a server using this key-certificate pair::
import ssl
from anyio import create_tcp_listener, run
from anyio.streams.tls import TLSListener
async def handle(client):
async with client:
name = await client.receive()
await client.send(b'Hello, %s\n' % name)
async def main():
# Create a context for the purpose of authenticating clients
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
# Load the server certificate and private key
context.load_cert_chain(certfile='cert.pem', keyfile='key.pem')
# Create the listener and start serving connections
listener = TLSListener(await create_tcp_listener(local_port=1234), context)
await listener.serve(handle)
run(main)
Connecting to this server can then be done as follows::
import ssl
from anyio import connect_tcp, run
async def main():
# These two steps are only required for certificates that are not trusted by the
# installed CA certificates on your machine, so you can skip this part if you
# use Let's Encrypt or a commercial certificate vendor
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
context.load_verify_locations(cafile='cert.pem')
async with await connect_tcp('localhost', 1234, ssl_context=context) as client:
await client.send(b'Client\n')
response = await client.receive()
print(response)
run(main)
.. _openssl: https://www.openssl.org/
Creating self-signed certificates on the fly
********************************************
When testing your TLS enabled service, it would be convenient to generate the
certificates on the fly. To this end, you can use the trustme_ library::
import ssl
import pytest
import trustme
@pytest.fixture(scope='session')
def ca():
return trustme.CA()
@pytest.fixture(scope='session')
def server_context(ca):
server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ca.issue_cert('localhost').configure_cert(server_context)
return server_context
@pytest.fixture(scope='session')
def client_context(ca):
client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ca.configure_trust(client_context)
return client_context
You can then pass the server and client contexts from the above fixtures to
:class:`~.streams.tls.TLSListener`, :meth:`~.streams.tls.TLSStream.wrap` or whatever you
use on either side.
.. _trustme: https://pypi.org/project/trustme/
Dealing with ragged EOFs
************************
According to the `TLS standard`_, encrypted connections should end with a closing
handshake. This practice prevents so-called `truncation attacks`_. However, broadly
available implementations for protocols such as HTTP, widely ignore this requirement
because the protocol level closing signal would make the shutdown handshake redundant.
AnyIO follows the standard by default (unlike the Python standard library's :mod:`ssl`
module). The practical implication of this is that if you're implementing a protocol
that is expected to skip the TLS closing handshake, you need to pass the
``standard_compatible=False`` option to :meth:`~.streams.tls.TLSStream.wrap` or
:class:`~.streams.tls.TLSListener`.
.. _TLS standard: https://tools.ietf.org/html/draft-ietf-tls-tls13-28
.. _truncation attacks: https://en.wikipedia.org/wiki/Transport_Layer_Security
#Attacks_against_TLS/SSL
anyio-4.11.0/docs/subinterpreters.rst 0000664 0000000 0000000 00000003454 15064462627 0017660 0 ustar 00root root 0000000 0000000 Working with subinterpreters
============================
.. py:currentmodule:: anyio
Subinterpreters offer a middle ground between worker threads and worker processes. They
allow you to utilize multiple CPU cores to run Python code while avoiding the overhead
and complexities of spawning subprocesses.
.. warning:: Subinterpreter support is considered **experimental**. The underlying
Python API for managing subinterpreters has not been finalized yet, and has had
little real-world testing. As such, it is not recommended to use this feature for
anything important yet.
Running a function in a worker interpreter
------------------------------------------
Running functions in a worker interpreter makes sense when:
* The code you want to run in parallel is CPU intensive
* The code is either pure Python code, or extension code that does not release the
Global Interpreter Lock (GIL)
If the code you're trying to run only does blocking network I/O, or file I/O, then
you're better off using :doc:`worker thread ` instead.
This is done by using :func:`.to_interpreter.run_sync`::
import time
from anyio import run, to_interpreter
from yourothermodule import cpu_intensive_function
async def main():
result = await to_interpreter.run_sync(
cpu_intensive_function, 'Hello, ', 'world!'
)
print(result)
run(main)
Limitations
-----------
* Subinterpreters are only supported on Python 3.13 or later
* Code in the ``__main__`` module cannot be run with this (as a consequence, this
applies to any functions defined in the REPL)
* The target functions cannot react to cancellation
* Unlike with threads, the code running in the subinterpreter cannot share mutable data
with other interpreters/threads (however, sharing *immutable* data is fine)
anyio-4.11.0/docs/subprocesses.rst 0000664 0000000 0000000 00000007665 15064462627 0017150 0 ustar 00root root 0000000 0000000 Using subprocesses
==================
.. py:currentmodule:: anyio
AnyIO allows you to run arbitrary executables in subprocesses, either as a one-shot call
or by opening a process handle for you that gives you more control over the subprocess.
You can either give the command as a string, in which case it is passed to your default
shell (equivalent to ``shell=True`` in :func:`subprocess.run`), or as a sequence of
strings (``shell=False``) in which case the executable is the first item in the sequence
and the rest are arguments passed to it.
Running one-shot commands
-------------------------
To run an external command with one call, use :func:`~run_process`::
from anyio import run_process, run
async def main():
result = await run_process('ps')
print(result.stdout.decode())
run(main)
The snippet above runs the ``ps`` command within a shell. To run it directly::
from anyio import run_process, run
async def main():
result = await run_process(['ps'])
print(result.stdout.decode())
run(main)
Working with processes
----------------------
When you have more complex requirements for your interaction with subprocesses, you can
launch one with :func:`~open_process`::
from anyio import open_process, run
from anyio.streams.text import TextReceiveStream
async def main():
async with await open_process(['ps']) as process:
async for text in TextReceiveStream(process.stdout):
print(text)
run(main)
See the API documentation of :class:`~.abc.Process` for more information.
.. _RunInProcess:
Running functions in worker processes
-------------------------------------
When you need to run CPU intensive code, worker processes are better than threads
because, with the exception of the experimental free-threaded builds of Python 3.13 and
later, current implementations of Python cannot run Python code in multiple threads at
once.
Exceptions to this rule are:
#. Blocking I/O operations
#. C extension code that explicitly releases the Global Interpreter Lock
#. :doc:`Subinterpreter workers `
(experimental; available on Python 3.13 and later)
If the code you wish to run does not belong in this category, it's best to use worker
processes instead in order to take advantage of multiple CPU cores.
This is done by using :func:`.to_process.run_sync`::
import time
from anyio import run, to_process
def cpu_intensive_function(arg1, arg2):
time.sleep(1)
return arg1 + arg2
async def main():
result = await to_process.run_sync(cpu_intensive_function, 'Hello, ', 'world!')
print(result)
# This check is important when the application uses to_process.run_sync()
if __name__ == '__main__':
run(main)
Technical details
*****************
There are some limitations regarding the arguments and return values passed:
* the arguments must be pickleable (using the highest available protocol)
* the return value must be pickleable (using the highest available protocol)
* the target callable must be importable (lambdas and inner functions won't work)
Other considerations:
* Even ``cancellable=False`` runs can be cancelled before the request has been sent to
the worker process
* If a cancellable call is cancelled during execution on the worker process, the worker
process will be killed
* The worker process imports the parent's ``__main__`` module, so guarding for any
import time side effects using ``if __name__ == '__main__':`` is required to avoid
infinite recursion
* ``sys.stdin`` and ``sys.stdout``, ``sys.stderr`` are redirected to ``/dev/null`` so
:func:`print` and :func:`input` won't work
* Worker processes terminate after 5 minutes of inactivity, or when the event loop is
finished
* On asyncio, either :func:`asyncio.run` or :func:`anyio.run` must be used for proper
cleanup to happen
* Multiprocessing-style synchronization primitives are currently not available
anyio-4.11.0/docs/support.rst 0000664 0000000 0000000 00000001342 15064462627 0016126 0 ustar 00root root 0000000 0000000 Getting help
============
If you are having trouble with AnyIO, make sure you've first checked the
:doc:`FAQ ` to see if your question is answered there. If not, you have a couple
ways for getting support:
* Post a question on `Stack Overflow`_ and use the ``anyio`` tag
* Join the `python-trio/AnyIO`_ room on Gitter
.. _Stack Overflow: https://stackoverflow.com/
.. _python-trio/AnyIO: https://gitter.im/python-trio/AnyIO
Reporting bugs
==============
If you're fairly certain that you have discovered a bug, you can `file an issue`_ on
Github. If you feel unsure, come talk to us first! The issue tracker is **not** the
proper venue for asking support questions.
.. _file an issue: https://github.com/agronholm/anyio/issues
anyio-4.11.0/docs/synchronization.rst 0000664 0000000 0000000 00000020717 15064462627 0017662 0 ustar 00root root 0000000 0000000 Using synchronization primitives
================================
.. py:currentmodule:: anyio
Synchronization primitives are objects that are used by tasks to communicate and
coordinate with each other. They are useful for things like distributing workload,
notifying other tasks and guarding access to shared resources.
.. note:: AnyIO primitives are not thread-safe, therefore they should not be used
directly from worker threads. Use :func:`~from_thread.run_sync` for that.
Events
------
Events (:class:`Event`) are used to notify tasks that something they've been waiting to
happen has happened. An event object can have multiple listeners and they are all
notified when the event is triggered.
Example::
from anyio import Event, create_task_group, run
async def notify(event):
event.set()
async def main():
event = Event()
async with create_task_group() as tg:
tg.start_soon(notify, event)
await event.wait()
print('Received notification!')
run(main)
# Output:
# Received notification!
.. note:: Unlike standard library Events, AnyIO events cannot be reused, and must be
replaced instead. This practice prevents a class of race conditions, and matches the
semantics of the Trio library.
Semaphores
----------
Semaphores (:class:`Semaphore`) are used for limiting access to a shared resource. A
semaphore starts with a maximum value, which is decremented each time the semaphore is
acquired by a task and incremented when it is released. If the value drops to zero, any
attempt to acquire the semaphore will block until another task frees it.
Example::
from anyio import Semaphore, create_task_group, sleep, run
async def use_resource(tasknum, semaphore):
async with semaphore:
print(f"Task number {tasknum} is now working with the shared resource")
await sleep(1)
async def main():
semaphore = Semaphore(2)
async with create_task_group() as tg:
for num in range(10):
tg.start_soon(use_resource, num, semaphore)
run(main)
# Output:
# Task number 0 is now working with the shared resource
# Task number 1 is now working with the shared resource
# Task number 2 is now working with the shared resource
# Task number 3 is now working with the shared resource
# Task number 4 is now working with the shared resource
# Task number 5 is now working with the shared resource
# Task number 6 is now working with the shared resource
# Task number 7 is now working with the shared resource
# Task number 8 is now working with the shared resource
# Task number 9 is now working with the shared resource
.. tip:: If the performance of semaphores is critical for you, you could pass
``fast_acquire=True`` to :class:`Semaphore`. This has the effect of skipping the
:func:`~.lowlevel.cancel_shielded_checkpoint` call in :meth:`Semaphore.acquire` if
there is no contention (acquisition succeeds immediately). This could, in some cases,
lead to the task never yielding control back to to the event loop if you use the
semaphore in a loop that does not have other yield points.
Locks
-----
Locks (:class:`Lock`) are used to guard shared resources to ensure sole access to a
single task at once. They function much like semaphores with a maximum value of 1,
except that only the task that acquired the lock is allowed to release it.
Example::
from anyio import Lock, create_task_group, sleep, run
async def use_resource(tasknum, lock):
async with lock:
print('Task number', tasknum, 'is now working with the shared resource')
await sleep(1)
async def main():
lock = Lock()
async with create_task_group() as tg:
for num in range(4):
tg.start_soon(use_resource, num, lock)
run(main)
# Output:
# Task number 0 is now working with the shared resource
# Task number 1 is now working with the shared resource
# Task number 2 is now working with the shared resource
# Task number 3 is now working with the shared resource
.. tip:: If the performance of locks is critical for you, you could pass
``fast_acquire=True`` to :class:`Lock`. This has the effect of skipping the
:func:`~.lowlevel.cancel_shielded_checkpoint` call in :meth:`Lock.acquire` if there
is no contention (acquisition succeeds immediately). This could, in some cases, lead
to the task never yielding control back to to the event loop if use the lock in a
loop that does not have other yield points.
Conditions
----------
A condition is basically a combination of an event and a lock. It first acquires a lock
and then waits for a notification from the event. Once the condition receives a
notification, it releases the lock. The notifying task can also choose to wake up more
than one listener at once, or even all of them.
Like :class:`Lock`, :class:`Condition` also requires that the task which locked it also
the one to release it.
Example::
from anyio import Condition, create_task_group, sleep, run
async def listen(tasknum, condition):
async with condition:
await condition.wait()
print('Woke up task number', tasknum)
async def main():
condition = Condition()
async with create_task_group() as tg:
for tasknum in range(6):
tg.start_soon(listen, tasknum, condition)
await sleep(1)
async with condition:
condition.notify(1)
await sleep(1)
async with condition:
condition.notify(2)
await sleep(1)
async with condition:
condition.notify_all()
run(main)
# Output:
# Woke up task number 0
# Woke up task number 1
# Woke up task number 2
# Woke up task number 3
# Woke up task number 4
# Woke up task number 5
.. _capacity-limiters:
Capacity limiters
-----------------
Capacity limiters (:class:`CapacityLimiter`) are like semaphores except that a single
borrower (the current task by default) can only hold a single token at a time. It is
also possible to borrow a token on behalf of any arbitrary object, so long as that object
is hashable.
It is recommended to use capacity limiters instead of semaphores unless you intend to
allow a task to acquire multiple tokens from the same object. AnyIO uses capacity
limiters to limit the number of threads spawned.
The number of total tokens available for tasks to acquire can be adjusted by assigning
the desired value to the ``total_tokens`` property. If the value is higher than the
previous one, it will automatically wake up the appropriate number of waiting tasks.
Example::
from anyio import CapacityLimiter, create_task_group, sleep, run
async def use_resource(tasknum, limiter):
async with limiter:
print(f"Task number {tasknum} is now working with the shared resource")
await sleep(1)
async def main():
limiter = CapacityLimiter(2)
async with create_task_group() as tg:
for num in range(10):
tg.start_soon(use_resource, num, limiter)
run(main)
# Output:
# Task number 0 is now working with the shared resource
# Task number 1 is now working with the shared resource
# Task number 2 is now working with the shared resource
# Task number 3 is now working with the shared resource
# Task number 4 is now working with the shared resource
# Task number 5 is now working with the shared resource
# Task number 6 is now working with the shared resource
# Task number 7 is now working with the shared resource
# Task number 8 is now working with the shared resource
# Task number 9 is now working with the shared resource
Resource guards
---------------
Some resources, such as sockets, are very sensitive about concurrent use and should not
allow even attempts to be used concurrently. For such cases, :class:`ResourceGuard` is
the appropriate solution::
class Resource:
def __init__(self):
self._guard = ResourceGuard()
async def do_something() -> None:
with self._guard:
...
Now, if another task tries calling the ``do_something()`` method on the same
``Resource`` instance before the first call has finished, that will raise a
:exc:`BusyResourceError`.
Queues
------
In place of queues, AnyIO offers a more powerful construct:
:ref:`memory object streams `.
anyio-4.11.0/docs/tasks.rst 0000664 0000000 0000000 00000017073 15064462627 0015547 0 ustar 00root root 0000000 0000000 Creating and managing tasks
===========================
.. py:currentmodule:: anyio
A *task* is a unit of execution that lets you do many things concurrently that need
waiting on. This works so that while you can have any number of tasks, the asynchronous
event loop can only run one of them at a time. When the task encounters an ``await``
statement that requires the task to sleep until something happens, the event loop is
then free to work on another task. When the thing the first task was waiting is
complete, the event loop will resume the execution of that task on the first opportunity
it gets.
Task handling in AnyIO loosely follows the Trio_ model. Tasks can be created (*spawned*)
using *task groups*. A task group is an asynchronous context manager that makes sure
that all its child tasks are finished one way or another after the context block is
exited. If a child task, or the code in the enclosed context block raises an exception,
all child tasks are cancelled. Otherwise the context manager just waits until all child
tasks have exited before proceeding.
Here's a demonstration::
from anyio import sleep, create_task_group, run
async def sometask(num: int) -> None:
print('Task', num, 'running')
await sleep(1)
print('Task', num, 'finished')
async def main() -> None:
async with create_task_group() as tg:
for num in range(5):
tg.start_soon(sometask, num)
print('All tasks finished!')
run(main)
.. _Trio: https://trio.readthedocs.io/en/latest/reference-core.html
#tasks-let-you-do-multiple-things-at-once
.. _start_initialize:
Starting and initializing tasks
-------------------------------
Sometimes it is very useful to be able to wait until a task has successfully initialized
itself. For example, when starting network services, you can have your task start the
listener and then signal the caller that initialization is done. That way, the caller
can now start another task that depends on that service being up and running. Also, if
the socket bind fails or something else goes wrong during initialization, the exception
will be propagated to the caller which can then catch and handle it.
This can be done with :meth:`TaskGroup.start() <.abc.TaskGroup.start>`::
from anyio import (
TASK_STATUS_IGNORED,
create_task_group,
connect_tcp,
create_tcp_listener,
run,
)
from anyio.abc import TaskStatus
async def handler(stream):
...
async def start_some_service(
port: int, *, task_status: TaskStatus[None] = TASK_STATUS_IGNORED
):
async with await create_tcp_listener(
local_host="127.0.0.1", local_port=port
) as listener:
task_status.started()
await listener.serve(handler)
async def main():
async with create_task_group() as tg:
await tg.start(start_some_service, 5000)
async with await connect_tcp("127.0.0.1", 5000) as stream:
...
run(main)
The target coroutine function **must** call ``task_status.started()`` because the task
that is calling with :meth:`TaskGroup.start() <.abc.TaskGroup.start>` will be blocked
until then. If the spawned task never calls it, then the
:meth:`TaskGroup.start() <.abc.TaskGroup.start>` call will raise a ``RuntimeError``.
.. note:: Unlike :meth:`~.abc.TaskGroup.start_soon`, :meth:`~.abc.TaskGroup.start` needs
an ``await``.
Handling multiple errors in a task group
----------------------------------------
It is possible for more than one task to raise an exception in a task group. This can
happen when a task reacts to cancellation by entering either an exception handler block
or a ``finally:`` block and raises an exception there. This raises the question: which
exception is propagated from the task group context manager? The answer is "both". In
practice this means that a special exception, :exc:`ExceptionGroup` (or
:exc:`BaseExceptionGroup`) is raised which contains both exception objects.
To catch such exceptions potentially nested in groups, special measures are required.
On Python 3.11 and later, you can use the ``except*`` syntax to catch multiple
exceptions::
from anyio import create_task_group
try:
async with create_task_group() as tg:
tg.start_soon(some_task)
tg.start_soon(another_task)
except* ValueError as excgroup:
for exc in excgroup.exceptions:
... # handle each ValueError
except* KeyError as excgroup:
for exc in excgroup.exceptions:
... # handle each KeyError
If compatibility with older Python versions is required, you can use the ``catch()``
function from the exceptiongroup_ package::
from anyio import create_task_group
from exceptiongroup import catch
def handle_valueerror(excgroup: ExceptionGroup) -> None:
for exc in excgroup.exceptions:
... # handle each ValueError
def handle_keyerror(excgroup: ExceptionGroup) -> None:
for exc in excgroup.exceptions:
... # handle each KeyError
with catch({
ValueError: handle_valueerror,
KeyError: handle_keyerror
}):
async with create_task_group() as tg:
tg.start_soon(some_task)
tg.start_soon(another_task)
If you need to set local variables in the handlers, declare them as ``nonlocal``::
async def yourfunc():
somevariable: str | None = None
def handle_valueerror(exc):
nonlocal somevariable
somevariable = 'whatever'
with catch({
ValueError: handle_valueerror,
KeyError: handle_keyerror
}):
async with create_task_group() as tg:
tg.start_soon(some_task)
tg.start_soon(another_task)
print(f"{somevariable=}")
.. _exceptiongroup: https://pypi.org/project/exceptiongroup/
Context propagation
-------------------
Whenever a new task is spawned, `context`_ will be copied to the new task. It is
important to note *which* context will be copied to the newly spawned task. It is not
the context of the task group's host task that will be copied, but the context of the
task that calls :meth:`TaskGroup.start() <.abc.TaskGroup.start>` or
:meth:`TaskGroup.start_soon() <.abc.TaskGroup.start_soon>`.
.. _context: https://docs.python.org/3/library/contextvars.html
Differences with asyncio.TaskGroup
----------------------------------
The :class:`asyncio.TaskGroup` class, added in Python 3.11, is very similar in design to
the AnyIO :class:`~.abc.TaskGroup` class. The asyncio counterpart has some important
differences in its semantics, however:
* The task group itself is instantiated directly, rather than using a factory function
* Tasks are spawned solely through :meth:`~asyncio.TaskGroup.create_task`; there is no
``start()`` or ``start_soon()`` method
* The :meth:`~asyncio.TaskGroup.create_task` method returns a task object which can be
awaited on (or cancelled)
* Tasks spawned via :meth:`~asyncio.TaskGroup.create_task` can only be cancelled
individually (there is no ``cancel()`` method or similar in the task group)
* When a task spawned via :meth:`~asyncio.TaskGroup.create_task` is cancelled before its
coroutine has started running, it will not get a chance to handle the cancellation
exception
* :class:`asyncio.TaskGroup` does not allow starting new tasks after an exception in
one of the tasks has triggered a shutdown of the task group
* Tasks spawned from :class:`asyncio.TaskGroup` use different cancellation semantics
(see the notes on :ref:`asyncio cancellation semantics `)
anyio-4.11.0/docs/tempfile.rst 0000664 0000000 0000000 00000006371 15064462627 0016226 0 ustar 00root root 0000000 0000000 Asynchronous Temporary File and Directory
=========================================
.. py:currentmodule:: anyio
This module provides asynchronous wrappers for handling temporary files and directories
using the :mod:`tempfile` module. The asynchronous methods execute blocking operations in worker threads.
Temporary File
--------------
:class:`TemporaryFile` creates a temporary file that is automatically deleted upon closure.
**Example:**
.. code-block:: python
from anyio import TemporaryFile, run
async def main():
async with TemporaryFile(mode="w+") as f:
await f.write("Temporary file content")
await f.seek(0)
print(await f.read()) # Output: Temporary file content
run(main)
Named Temporary File
--------------------
:class:`NamedTemporaryFile` works similarly to :class:`TemporaryFile`, but the file has a visible name in the filesystem.
**Example:**
.. code-block:: python
from anyio import NamedTemporaryFile, run
async def main():
async with NamedTemporaryFile(mode="w+", delete=True) as f:
print(f"Temporary file name: {f.name}")
await f.write("Named temp file content")
await f.seek(0)
print(await f.read())
run(main)
Spooled Temporary File
----------------------
:class:`SpooledTemporaryFile` is useful when temporary data is small and should be kept in memory rather than written to disk.
**Example:**
.. code-block:: python
from anyio import SpooledTemporaryFile, run
async def main():
async with SpooledTemporaryFile(max_size=1024, mode="w+") as f:
await f.write("Spooled temp file content")
await f.seek(0)
print(await f.read())
run(main)
Temporary Directory
-------------------
The :class:`TemporaryDirectory` provides an asynchronous way to create temporary directories.
**Example:**
.. code-block:: python
from anyio import TemporaryDirectory, run
async def main():
async with TemporaryDirectory() as temp_dir:
print(f"Temporary directory path: {temp_dir}")
run(main)
Low-Level Temporary File and Directory Creation
-----------------------------------------------
For more control, the module provides lower-level functions:
- :func:`mkstemp` - Creates a temporary file and returns a tuple of file descriptor and path.
- :func:`mkdtemp` - Creates a temporary directory and returns the directory path.
- :func:`gettempdir` - Returns the path of the default temporary directory.
- :func:`gettempdirb` - Returns the path of the default temporary directory in bytes.
**Example:**
.. code-block:: python
from anyio import mkstemp, mkdtemp, gettempdir, run
import os
async def main():
fd, path = await mkstemp(suffix=".txt", prefix="mkstemp_", text=True)
print(f"Created temp file: {path}")
temp_dir = await mkdtemp(prefix="mkdtemp_")
print(f"Created temp dir: {temp_dir}")
print(f"Default temp dir: {await gettempdir()}")
os.remove(path)
run(main)
.. note::
Using these functions requires manual cleanup of the created files and directories.
.. seealso::
- Python Standard Library: :mod:`tempfile` (`official documentation `_)
anyio-4.11.0/docs/testing.rst 0000664 0000000 0000000 00000024553 15064462627 0016100 0 ustar 00root root 0000000 0000000 Testing with AnyIO
==================
AnyIO provides built-in support for testing your library or application in the form of a
pytest_ plugin. This plugin is part of the AnyIO distribution, so nothing extra needs to
be installed to use it.
.. _pytest: https://docs.pytest.org/en/latest/
Creating asynchronous tests
---------------------------
Pytest does not natively support running asynchronous test functions, so they have to be
marked for the AnyIO pytest plugin to pick them up. This can be done in one of three
ways:
#. Setting the ``anyio_mode = "auto"`` option in the pytest configuration
#. Using the ``pytest.mark.anyio`` marker
#. Using the ``anyio_backend`` fixture, either directly or via another fixture
The simplest way is thus the following:
.. code-block:: toml
[tool.pytest.ini_options]
anyio_mode = "auto"
.. note:: This does not work if ``pytest-asyncio`` is installed and configured to use
its own ``auto`` mode, as it will conflict with the AnyIO plugin. To prevent this
from happening, you can remove the ``asyncio_mode`` option from your pytest
configuration, thus making ``pytest-asyncio`` use its default strict mode.
In case your AnyIO tests need to coexist with other async test plugins, the next best
option is to use the ``pytest.mark.anyio`` marker::
import pytest
# This is the same as using the @pytest.mark.anyio on all test functions in the module
pytestmark = pytest.mark.anyio
async def test_something():
...
.. note:: The marker only affects asynchronous test functions.
Marking modules, classes or functions with this marker has the same effect as applying
the ``pytest.mark.usefixtures('anyio_backend')`` on them.
Thus, you can also require the fixture directly in your tests and fixtures::
import pytest
async def test_something(anyio_backend):
...
Specifying the backends to run on
---------------------------------
The ``anyio_backend`` fixture determines the backends and their options that tests and
fixtures are run with. The AnyIO pytest plugin comes with a function scoped fixture with
this name which runs everything on all supported backends.
If you change the backends/options for the entire project, then put something like this
in your top level ``conftest.py``::
@pytest.fixture
def anyio_backend():
return 'asyncio'
If you want to specify different options for the selected backend, you can do so by
passing a tuple of (backend name, options dict)::
@pytest.fixture(params=[
pytest.param(('asyncio', {'use_uvloop': True}), id='asyncio+uvloop'),
pytest.param(('asyncio', {'use_uvloop': False}), id='asyncio'),
pytest.param(('trio', {'restrict_keyboard_interrupt_to_checkpoints': True}), id='trio')
])
def anyio_backend(request):
return request.param
If you need to run a single test on a specific backend, you can use
``@pytest.mark.parametrize`` (remember to add the ``anyio_backend`` parameter to the
actual test function, or pytest will complain)::
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_on_asyncio_only(anyio_backend):
...
Because the ``anyio_backend`` fixture can return either a string or a tuple, there are
two additional function-scoped fixtures (which themselves depend on the
``anyio_backend`` fixture) provided for your convenience:
* ``anyio_backend_name``: the name of the backend (e.g. ``asyncio``)
* ``anyio_backend_options``: the dictionary of option keywords used to run the backend
Asynchronous fixtures
---------------------
The plugin also supports coroutine functions as fixtures, for the purpose of setting up
and tearing down asynchronous services used for tests.
There are two ways to get the AnyIO pytest plugin to run your asynchronous fixtures:
#. Use them in AnyIO enabled tests (see the first section)
#. Use the ``anyio_backend`` fixture (or any other fixture using it) in the fixture
itself
The simplest way is using the first option::
import pytest
pytestmark = pytest.mark.anyio
@pytest.fixture
async def server():
server = await setup_server()
yield server
await server.shutdown()
async def test_server(server):
result = await server.do_something()
assert result == 'foo'
For ``autouse=True`` fixtures, you may need to use the other approach::
@pytest.fixture(autouse=True)
async def server(anyio_backend):
server = await setup_server()
yield
await server.shutdown()
async def test_server():
result = await client.do_something_on_the_server()
assert result == 'foo'
Using async fixtures with higher scopes
---------------------------------------
For async fixtures with scopes other than ``function``, you will need to define your own
``anyio_backend`` fixture because the default ``anyio_backend`` fixture is function
scoped::
@pytest.fixture(scope='module')
def anyio_backend():
return 'asyncio'
@pytest.fixture(scope='module')
async def server(anyio_backend):
server = await setup_server()
yield
await server.shutdown()
Built-in utility fixtures
-------------------------
Some useful pytest fixtures are provided to make testing network services easier:
* ``free_tcp_port_factory``: session scoped fixture returning a callable
(:class:`~.pytest_plugin.FreePortFactory`) that generates unused TCP port numbers
* ``free_udp_port_factory``: session scoped fixture returning a callable
(:class:`~.pytest_plugin.FreePortFactory`) that generates unused UDP port numbers
* ``free_tcp_port``: function level fixture that invokes the ``free_tcp_port_factory``
fixture to generate a free TCP port number
* ``free_udp_port``: function level fixture that invokes the ``free_udp_port_factory``
fixture to generate a free UDP port number
The use of these fixtures, in place of hard-coded ports numbers, will avoid errors due
to a port already being allocated. In particular, they are a must for running multiple
instances of the same test suite concurrently, either via ``pytest-xdist`` or ``tox`` or
similar tools which can run the test suite in multiple interpreters in parallel.
For example, you could set up a network listener in an ephemeral port and then connect
to it::
from anyio import connect_tcp, create_task_group, create_tcp_listener
from anyio.abc import SocketStream
async def test_echo(free_tcp_port: int) -> None:
async def handle(client_stream: SocketStream) -> None:
async with client_stream:
payload = await client_stream.receive()
await client_stream.send(payload[::-1])
async with (
await create_tcp_listener(local_port=free_tcp_port) as listener,
create_task_group() as tg
):
tg.start_soon(listener.serve, handle)
async with await connect_tcp("127.0.0.1", free_tcp_port) as stream:
await stream.send(b"hello")
assert await stream.receive() == b"olleh"
tg.cancel_scope.cancel()
.. warning:::: It is possible in rare cases, particularly in local development, that
another process could bind to the port returned by one of these fixtures before your
code can do the same, leading to an :exc:`OSError` with the ``EADDRINUSE`` code. It
is advisable to just rerun the test if this happens.
This is mostly useful with APIs that don't natively offer any way to bind to ephemeral
ports (and retrieve those ports after binding). If you're working with AnyIO's own APIs,
however, you could make use of this native capability::
from anyio import connect_tcp, create_task_group, create_tcp_listener
from anyio.abc import SocketAttribute, SocketStream
async def test_echo() -> None:
async def handle(client_stream: SocketStream) -> None:
async with client_stream:
payload = await client_stream.receive()
await client_stream.send(payload[::-1])
async with (
await create_tcp_listener(local_host="127.0.0.1") as listener,
create_task_group() as tg
):
tg.start_soon(listener.serve, handle)
port = listener.extra(SocketAttribute.local_port)
async with await connect_tcp("127.0.0.1", port) as stream:
await stream.send(b"hello")
assert await stream.receive() == b"olleh"
tg.cancel_scope.cancel()
.. versionadded:: 4.9.0
Technical details
-----------------
The fixtures and tests are run by a "test runner", implemented separately for each
backend. The test runner keeps an event loop open during the request, making it possible
for code in fixtures to communicate with the code in the tests (and each other).
The test runner is created when the first matching async test or fixture is about to be
run, and shut down when that same fixture is being torn down or the test has finished
running. As such, if no higher-order (scoped ``class`` or higher) async fixtures are
used, a separate test runner is created for each matching test. Conversely, if even one
async fixture, scoped higher than ``function``, is shared across all tests, only one
test runner will be created during the test session.
Context variable propagation
++++++++++++++++++++++++++++
The asynchronous test runner runs all async fixtures and tests in the same task, so
context variables set in async fixtures or tests, within an async test runner, will
affect other async fixtures and tests within the same runner. However, these context
variables are **not** carried over to synchronous tests and fixtures, or to other async
test runners.
Comparison with other async test runners
++++++++++++++++++++++++++++++++++++++++
The ``pytest-asyncio`` library only works with asyncio code. Like the AnyIO pytest
plugin, it can be made to support higher order fixtures (by specifying a higher order
``event_loop`` fixture). However, it runs the setup and teardown phases of each async
fixture in a new async task per operation, making context variable propagation
impossible and preventing task groups and cancel scopes from functioning properly.
The ``pytest-trio`` library, made for testing Trio projects, works only with Trio code.
Additionally, it only supports function scoped async fixtures. Another significant
difference with the AnyIO pytest plugin is that attempts to run the setup and teardown
for async fixtures concurrently when their dependency graphs allow that.
anyio-4.11.0/docs/threads.rst 0000664 0000000 0000000 00000025014 15064462627 0016046 0 ustar 00root root 0000000 0000000 Working with threads
====================
.. py:currentmodule:: anyio
Practical asynchronous applications occasionally need to run network, file or
computationally expensive operations. Such operations would normally block the
asynchronous event loop, leading to performance issues. The solution is to run such code
in *worker threads*. Using worker threads lets the event loop continue running other
tasks while the worker thread runs the blocking call.
Running a function in a worker thread
-------------------------------------
To run a (synchronous) callable in a worker thread::
import time
from anyio import to_thread, run
async def main():
await to_thread.run_sync(time.sleep, 5)
run(main)
By default, tasks are shielded from cancellation while they are waiting for a worker
thread to finish. You can pass the ``cancellable=True`` parameter to allow such tasks to
be cancelled. Note, however, that the thread will still continue running – only its
outcome will be ignored.
.. seealso:: :ref:`RunInProcess`
Calling asynchronous code from a worker thread
----------------------------------------------
If you need to call a coroutine function from a worker thread, you can do this::
from anyio import from_thread, sleep, to_thread, run
def blocking_function():
from_thread.run(sleep, 5)
async def main():
await to_thread.run_sync(blocking_function)
run(main)
.. note:: The worker thread must have been spawned using :func:`~to_thread.run_sync` for
this to work.
Calling synchronous code from a worker thread
---------------------------------------------
Occasionally you may need to call synchronous code in the event loop thread from a
worker thread. Common cases include setting asynchronous events or sending data to a
memory object stream. Because these methods aren't thread safe, you need to arrange them
to be called inside the event loop thread using :func:`~from_thread.run_sync`::
import time
from anyio import Event, from_thread, to_thread, run
def worker(event):
time.sleep(1)
from_thread.run_sync(event.set)
async def main():
event = Event()
await to_thread.run_sync(worker, event)
await event.wait()
run(main)
Accessing the event loop from a foreign thread
----------------------------------------------
If you need to run code in the event loop from a thread that is not an AnyIO worker
thread (that wasn't spawned by :func:`anyio.to_thread.run_sync`), there are two ways you
can do this:
#. Obtain an *event loop token* from :func:`~.lowlevel.current_token` and then pass that
as ``token`` to either :func:`~.from_thread.run` or :func:`~.from_thread.run_sync`
(whichever is appropriate)
#. Run a :class:`~.from_thread.BlockingPortal` in an existing task and make the portal
object available to the external thread
The first method is the easier one::
from threading import Thread
from anyio import Event, run, from_thread
from anyio.lowlevel import current_token
def external_func(event, token):
# Enter the event loop using the given token to set the asynchronous event
from_thread.run_sync(event.set, token=token)
async def main():
event = Event()
# Start a new thread, independent of AnyIO's worker threads
thread = Thread(target=external_func, args=[event, current_token()])
thread.start()
# Wait for the external thread to set the event
await event.wait()
run(main)
The next section will demonstrate how to do the same with blocking portals.
Running code from threads using blocking portals
------------------------------------------------
Blocking portals (:class:`~.from_thread.BlockingPortal`) offer a somewhat more
comprehensive array of functionality for accessing event loops from other threads than
just running one-off functions with :func:`~.from_thread.run` or
:func:`~.from_thread.run_sync`. A blocking portal runs its own task group, allowing the
portal to spawn new tasks and thus offer extra functionality that requires task
spawning, such as wrapping asynchronous context managers.
Starting a blocking portal
++++++++++++++++++++++++++
There are two principal ways to create a blocking portal:
#. Running it in a task in an existing event loop
#. Starting a dedicated event loop in a new thread
The first option involves using a :class:`~.BlockingPortal` instance as an async context
manager and keeping it open::
from anyio import to_thread, run
from anyio.from_thread import BlockingPortal
async def async_func() -> None:
print("This runs on the event loop")
def sync_func_run_in_thread(portal: BlockingPortal) -> None:
portal.call(async_func)
async def main():
async with BlockingPortal() as portal:
# Here the portal stays open until the worker thread has run the function
await to_thread.run_sync(sync_func_run_in_thread, portal)
run(main)
The second option using :func:`~.from_thread.start_blocking_portal` to launch a new
event loop in its own dedicated thread::
from anyio.from_thread import start_blocking_portal
async def async_func() -> None:
print("This runs on the event loop")
with start_blocking_portal() as portal:
portal.call(async_func)
.. note:: The event loop is shut down as soon as you exit the context manager.
Spawning tasks
++++++++++++++
To spawn a task from the blocking portal, you can use
:meth:`~.BlockingPortal.start_task_soon`. It will return a
:class:`~concurrent.futures.Future` object that you can wait on to get the result when
the task finishes::
from concurrent.futures import as_completed
from anyio import sleep
from anyio.from_thread import start_blocking_portal
async def long_running_task(index):
await sleep(1)
print(f'Task {index} running...')
await sleep(index)
return f'Task {index} return value'
with start_blocking_portal() as portal:
futures = [portal.start_task_soon(long_running_task, i) for i in range(1, 5)]
for future in as_completed(futures):
print(future.result())
Cancelling tasks spawned this way can be done by cancelling the returned
:class:`~concurrent.futures.Future`.
Blocking portals also have a method similar to
:meth:`TaskGroup.start() <.abc.TaskGroup.start>`:
:meth:`~.BlockingPortal.start_task` which, like its counterpart, waits for the callable
to signal readiness by calling ``task_status.started()``::
from anyio import sleep, TASK_STATUS_IGNORED
from anyio.from_thread import start_blocking_portal
async def service_task(*, task_status=TASK_STATUS_IGNORED):
task_status.started('STARTED')
await sleep(1)
return 'DONE'
with start_blocking_portal() as portal:
future, start_value = portal.start_task(service_task)
print('Task has started with value', start_value)
return_value = future.result()
print('Task has finished with return value', return_value)
Using asynchronous context managers
+++++++++++++++++++++++++++++++++++
You can use :meth:`~.BlockingPortal.wrap_async_context_manager` to wrap an asynchronous
context managers as a synchronous one::
from anyio.from_thread import start_blocking_portal
class AsyncContextManager:
async def __aenter__(self):
print('entering')
async def __aexit__(self, exc_type, exc_val, exc_tb):
print('exiting with', exc_type)
async_cm = AsyncContextManager()
with start_blocking_portal() as portal, portal.wrap_async_context_manager(async_cm):
print('inside the context manager block')
.. note:: You cannot use wrapped async context managers in synchronous callbacks inside
the event loop thread.
Starting an on-demand, shared blocking portal
+++++++++++++++++++++++++++++++++++++++++++++
If you're building a synchronous API that needs to start a blocking portal on demand,
you might need a more efficient solution than just starting a blocking portal for each
call. To that end, you can use :class:`~.from_thread.BlockingPortalProvider`::
from anyio.from_thread import BlockingPortalProvider
class MyAPI:
def __init__(self, async_obj) -> None:
self._async_obj = async_obj
self._portal_provider = BlockingPortalProvider()
def do_stuff(self) -> None:
with self._portal_provider as portal:
portal.call(self._async_obj.do_async_stuff)
Now, no matter how many threads call the ``do_stuff()`` method on a ``MyAPI`` instance
at the same time, the same blocking portal will be used to handle the async calls
inside. It's easy to see that this is much more efficient than having each call spawn
its own blocking portal.
Context propagation
-------------------
When running functions in worker threads, the current context is copied to the worker
thread. Therefore any context variables available on the task will also be available to
the code running on the thread. As always with context variables, any changes made to
them will not propagate back to the calling asynchronous task.
When calling asynchronous code from worker threads, context is again copied to the task
that calls the target function in the event loop thread.
Adjusting the default maximum worker thread count
-------------------------------------------------
The default AnyIO worker thread limiter has a value of **40**, meaning that any calls
to :func:`.to_thread.run_sync` without an explicit ``limiter`` argument will cause a
maximum of 40 threads to be spawned. You can adjust this limit like this::
from anyio import to_thread
async def foo():
# Set the maximum number of worker threads to 60
to_thread.current_default_thread_limiter().total_tokens = 60
.. note:: AnyIO's default thread pool limiter does not affect the default thread pool
executor on :mod:`asyncio`.
Reacting to cancellation in worker threads
------------------------------------------
While there is no mechanism in Python to cancel code running in a thread, AnyIO provides
a mechanism that allows user code to voluntarily check if the host task's scope has been
cancelled, and if it has, raise a cancellation exception. This can be done by simply
calling :func:`from_thread.check_cancelled`::
import time
from anyio import to_thread, from_thread, move_on_after
def sync_function():
while True:
from_thread.check_cancelled()
print("Not cancelled yet")
time.sleep(1)
async def foo():
with move_on_after(3):
await to_thread.run_sync(sync_function)
anyio-4.11.0/docs/typedattrs.rst 0000664 0000000 0000000 00000005633 15064462627 0016624 0 ustar 00root root 0000000 0000000 Using typed attributes
======================
.. py:currentmodule:: anyio
On AnyIO, streams and listeners can be layered on top of each other to provide extra
functionality. But when you want to look up information from one of the layers down
below, you might have to traverse the entire chain to find what you're looking for,
which is highly inconvenient. To address this, AnyIO has a system of *typed attributes*
where you can look for a specific attribute by its unique key. If a stream or listener
wrapper does not have the attribute you're looking for, it will look it up in the
wrapped instance, and that wrapper can look in its wrapped instance and so on, until the
attribute is either found or the end of the chain is reached. This also lets wrappers
override attributes from the wrapped objects when necessary.
A common use case is finding the IP address of the remote side of a TCP connection when
the stream may be either :class:`~.abc.SocketStream` or
:class:`~.streams.tls.TLSStream`. To get the remote address, you would call the stream's
:meth:`~TypedAttributeProvider.extra` method and pass
:attr:`SocketAttribute.remote_address <.abc.SocketAttribute.remote_address>` as the
argument::
from anyio import connect_tcp
from anyio.abc import SocketAttribute
async def connect(host, port, tls: bool):
stream = await connect_tcp(host, port, tls=tls)
print('Connected to', stream.extra(SocketAttribute.remote_address))
Each typed attribute provider class should document the set of attributes it provides on
its own.
Defining your own typed attributes
----------------------------------
By convention, typed attributes are stored together in a container class with other
attributes of the same category::
from anyio import TypedAttributeSet, typed_attribute
class MyTypedAttribute(TypedAttributeSet):
string_valued_attribute: str = typed_attribute()
some_float_attribute: float = typed_attribute()
To provide values for these attributes, implement the
:meth:`~.TypedAttributeProvider.extra_attributes` property in your class::
from collections.abc import Callable, Mapping
from anyio import TypedAttributeProvider
class MyAttributeProvider(TypedAttributeProvider):
@property
def extra_attributes() -> Mapping[Any, Callable[[], Any]]:
return {
MyTypedAttribute.string_valued_attribute: lambda: 'my attribute value',
MyTypedAttribute.some_float_attribute: lambda: 6.492
}
If your class inherits from another typed attribute provider, make sure you include its
attributes in the return value::
class AnotherAttributeProvider(MyAttributeProvider):
@property
def extra_attributes() -> Mapping[Any, Callable[[], Any]]:
return {
**super().extra_attributes,
MyTypedAttribute.string_valued_attribute: lambda: 'overridden attribute value'
}
anyio-4.11.0/docs/versionhistory.rst 0000664 0000000 0000000 00000170125 15064462627 0017527 0 ustar 00root root 0000000 0000000 Version history
===============
This library adheres to `Semantic Versioning 2.0 `_.
**4.11.0**
- Added support for cancellation reasons (the ``reason`` parameter to
``CancelScope.cancel()``)
(`#975 `_)
- Bumped the minimum version of Trio to v0.31.0
- Added the ability to enter the event loop from foreign (non-worker) threads by
passing the return value of ``anyio.lowlevel.current_token()`` to
``anyio.from_thread.run()`` and ``anyio.from_thread.run_sync()`` as the ``token``
keyword argument (`#256 `_)
- Added pytest option (``anyio_mode = "auto"``) to make the pytest plugin automatically
handle all async tests
(`#971 `_)
- Added the ``anyio.Condition.wait_for()`` method for feature parity with asyncio
(`#974 `_)
- Changed the default type argument of ``anyio.abc.TaskStatus`` from ``Any`` to ``None``
(`#964 `_)
- Fixed TCP listener behavior to guarantee the same ephemeral port is used for all
socket listeners when ``local_port=0``
(`#857 `_; PR by @11kkw and @agronholm)
- Fixed inconsistency between Trio and asyncio where a TCP stream that previously
raised a ``BrokenResourceError`` on ``send()`` would still raise
``BrokenResourceError`` after the stream was closed on asyncio, but
``ClosedResourceError`` on Trio. They now both raise a ``ClosedResourceError`` in this
scenario. (`#671 `_)
**4.10.0**
- Added the ``feed_data()`` method to the ``BufferedByteReceiveStream`` class, allowing
users to inject data directly into the buffer
- Added various class methods to wrap existing sockets as listeners or socket streams:
* ``SocketListener.from_socket()``
* ``SocketStream.from_socket()``
* ``UNIXSocketStream.from_socket()``
* ``UDPSocket.from_socket()``
* ``ConnectedUDPSocket.from_socket()``
* ``UNIXDatagramSocket.from_socket()``
* ``ConnectedUNIXDatagramSocket.from_socket()``
- Added a hierarchy of connectable stream classes for transparently connecting to
various remote or local endpoints for exchanging bytes or objects
- Added context manager mix-in classes (``anyio.ContextManagerMixin`` and
``anyio.AsyncContextManagerMixin``) to help write classes that embed other context
managers, particularly cancel scopes or task groups
(`#905 `_; PR by @agronholm and
@tapetersen)
- Added the ability to specify the thread name in ``start_blocking_portal()``
(`#818 `_; PR by @davidbrochart)
- Added ``anyio.notify_closing`` to allow waking ``anyio.wait_readable``
and ``anyio.wait_writable`` before closing a socket. Among other things,
this prevents an OSError on the ``ProactorEventLoop``.
(`#896 `_; PR by @graingert)
- Incorporated several documentation improvements from the EuroPython 2025 sprint
(special thanks to the sprinters: Emmanuel Okedele, Jan Murre, Euxenia Miruna Goia and
Christoffer Fjord)
- Added a documentation page explaining why one might want to use AnyIO's APIs instead
of asyncio's
- Updated the ``to_interpreters`` module to use the public ``concurrent.interpreters``
API on Python 3.14 or later
- Fixed ``anyio.Path.copy()`` and ``anyio.Path.copy_into()`` failing on Python 3.14.0a7
- Fixed return annotation of ``__aexit__`` on async context managers. CMs which can
suppress exceptions should return ``bool``, or ``None`` otherwise.
(`#913 `_; PR by @Enegg)
- Fixed rollover boundary check in ``SpooledTemporaryFile`` so that rollover
only occurs when the buffer size exceeds ``max_size``
(`#915 `_; PR by @11kkw)
- Migrated testing and documentation dependencies from extras to dependency groups
- Fixed compatibility of ``anyio.to_interpreter`` with Python 3.14.0b2
(`#926 `_; PR by @hroncok)
- Fixed ``SyntaxWarning`` on Python 3.14 about ``return`` in ``finally``
(`#816 `_)
- Fixed RunVar name conflicts. RunVar instances with the same name should not share
storage (`#880 `_; PR by @vimfu)
- Renamed the ``BrokenWorkerIntepreter`` exception to ``BrokenWorkerInterpreter``.
The old name is available as a deprecated alias.
(`#938 `_; PR by @ayussh-verma)
- Fixed an edge case in ``CapacityLimiter`` on asyncio where a task, waiting to acquire
a limiter gets cancelled and is subsequently granted a token from the limiter, but
before the cancellation is delivered, and then fails to notify the next waiting task
(`#947 `_)
**4.9.0**
- Added async support for temporary file handling
(`#344 `_; PR by @11kkw)
- Added 4 new fixtures for the AnyIO ``pytest`` plugin:
* ``free_tcp_port_factory``: session scoped fixture returning a callable that
generates unused TCP port numbers
* ``free_udp_port_factory``: session scoped fixture returning a callable that
generates unused UDP port numbers
* ``free_tcp_port``: function scoped fixture that invokes the
``free_tcp_port_factory`` fixture to generate a free TCP port number
* ``free_udp_port``: function scoped fixture that invokes the
``free_udp_port_factory`` fixture to generate a free UDP port number
- Added ``stdin`` argument to ``anyio.run_process()`` akin to what
``anyio.open_process()``, ``asyncio.create_subprocess_…()``, ``trio.run_process()``,
and ``subprocess.run()`` already accept (PR by @jmehnle)
- Added the ``info`` property to ``anyio.Path`` on Python 3.14
- Changed ``anyio.getaddrinfo()`` to ignore (invalid) IPv6 name resolution results when
IPv6 support is disabled in Python
- Changed ``EndOfStream`` raised from ``MemoryObjectReceiveStream.receive()`` to leave
out the ``AttributeError`` from the exception chain which was merely an implementation
detail and caused some confusion
- Fixed traceback formatting growing quadratically with level of ``TaskGroup``
nesting on asyncio due to exception chaining when raising ``ExceptionGroups``
in ``TaskGroup.__aexit__``
(`#863 `_; PR by @tapetersen)
- Fixed ``anyio.Path.iterdir()`` making a blocking call in Python 3.13
(`#873 `_; PR by @cbornet and
@agronholm)
- Fixed ``connect_tcp()`` producing cyclic references in tracebacks when raising
exceptions (`#809 `_; PR by @graingert)
- Fixed ``anyio.to_thread.run_sync()`` needlessly holding on to references of the
context, function, arguments and others until the next work item on asyncio
(PR by @Wankupi)
**4.8.0**
- Added **experimental** support for running functions in subinterpreters on Python
3.13 and later
- Added support for the ``copy()``, ``copy_into()``, ``move()`` and ``move_into()``
methods in ``anyio.Path``, available in Python 3.14
- Changed ``TaskGroup`` on asyncio to always spawn tasks non-eagerly, even if using a
task factory created via ``asyncio.create_eager_task_factory()``, to preserve expected
Trio-like task scheduling semantics (PR by @agronholm and @graingert)
- Configure ``SO_RCVBUF``, ``SO_SNDBUF`` and ``TCP_NODELAY`` on the selector
thread waker socket pair (this should improve the performance of ``wait_readable()``)
and ``wait_writable()`` when using the ``ProactorEventLoop``
(`#836 `_; PR by @graingert)
- Fixed ``AssertionError`` when using ``nest-asyncio``
(`#840 `_)
- Fixed return type annotation of various context managers' ``__exit__`` method
(`#847 `_; PR by @Enegg)
**4.7.0**
- Updated ``TaskGroup`` to work with asyncio's eager task factories
(`#764 `_)
- Added the ``wait_readable()`` and ``wait_writable()`` functions which will accept
an object with a ``.fileno()`` method or an integer handle, and deprecated
their now obsolete versions (``wait_socket_readable()`` and
``wait_socket_writable()``) (PR by @davidbrochart)
- Changed ``EventAdapter`` (an ``Event`` with no bound async backend) to allow ``set()``
to work even before an async backend is bound to it
(`#819 `_)
- Added support for ``wait_readable()`` and ``wait_writable()`` on ``ProactorEventLoop``
(used on asyncio + Windows by default)
- Fixed a misleading ``ValueError`` in the context of DNS failures
(`#815 `_; PR by @graingert)
- Fixed the return type annotations of ``readinto()`` and ``readinto1()`` methods in the
``anyio.AsyncFile`` class
(`#825 `_)
- Fixed ``TaskInfo.has_pending_cancellation()`` on asyncio returning false positives in
cleanup code on Python >= 3.11
(`#832 `_; PR by @gschaffner)
- Fixed cancelled cancel scopes on asyncio calling ``asyncio.Task.uncancel`` when
propagating a ``CancelledError`` on exit to a cancelled parent scope
(`#790 `_; PR by @gschaffner)
**4.6.2**
- Fixed regression caused by (`#807 `_)
that prevented the use of parametrized async fixtures
**4.6.1**
This release contains all the changes from both v4.5.1 and v4.6.0, plus:
- Fixed TaskGroup and CancelScope producing cyclic references in tracebacks
when raising exceptions (`#806 `_)
(PR by @graingert)
**4.6.0**
This release is the successor to v4.5.0 with Python 3.8 support dropped, and does not
contain the changes from v4.5.1.
- Dropped support for Python 3.8
(as `#698 `_ cannot be resolved
without cancel message support)
- Fixed 100% CPU use on asyncio while waiting for an exiting task group to finish while
said task group is within a cancelled cancel scope
(`#695 `_)
- Fixed cancel scopes on asyncio not propagating ``CancelledError`` on exit when the
enclosing cancel scope has been effectively cancelled
(`#698 `_)
- Fixed asyncio task groups not yielding control to the event loop at exit if there were
no child tasks to wait on
- Fixed inconsistent task uncancellation with asyncio cancel scopes belonging to a
task group when said task group has child tasks running
**4.5.1**
As Python 3.8 support was dropped in v4.6.0, this interim release was created to bring a
regression fix to Python 3.8, and adds a few other fixes also present in v4.6.1.
- Fixed acquiring a lock twice in the same task on asyncio hanging instead of raising a
``RuntimeError`` (`#798 `_)
- Fixed an async fixture's ``self`` being different than the test's ``self`` in
class-based tests (`#633 `_)
(PR by @agronholm and @graingert)
- Fixed ``TypeError`` with ``TLSStream`` on Windows when a certificate verification
error occurs when using a `truststore `_
SSL certificate (`#795 `_)
- Corrected documentation on ``anyio.Path`` regarding the limitations imposed by the
current Python version on several of its methods, and made the ``is_junction`` method
unavailable on Python versions earlier than 3.12
(`#794 `_)
**4.5.0**
- Improved the performance of ``anyio.Lock`` and ``anyio.Semaphore`` on asyncio (even up
to 50 %)
- Added the ``fast_acquire`` parameter to ``anyio.Lock`` and ``anyio.Semaphore`` to
further boost performance at the expense of safety (``acquire()`` will not yield
control back if there is no contention)
- Added support for the ``from_uri()``, ``full_match()``, ``parser`` methods/properties
in ``anyio.Path``, newly added in Python 3.13
(`#737 `_)
- Added support for more keyword arguments for ``run_process()`` and ``open_process()``:
``startupinfo``, ``creationflags``, ``pass_fds``, ``user``, ``group``,
``extra_groups`` and ``umask``
(`#742 `_)
- Improved the type annotations and support for ``PathLike`` in ``run_process()`` and
``open_process()`` to allow for path-like arguments, just like ``subprocess.Popen``
- Changed the ``ResourceWarning`` from an unclosed memory object stream to include its
address for easier identification
- Changed ``start_blocking_portal()`` to always use daemonic threads, to accommodate the
"loitering event loop" use case
- Bumped the minimum version of Trio to v0.26.1
- Fixed ``__repr__()`` of ``MemoryObjectItemReceiver``, when ``item`` is not defined
(`#767 `_; PR by @Danipulok)
- Fixed ``to_process.run_sync()`` failing to initialize if ``__main__.__file__`` pointed
to a file in a nonexistent directory
(`#696 `_)
- Fixed ``AssertionError: feed_data after feed_eof`` on asyncio when a subprocess is
closed early, before its output has been read
(`#490 `_)
- Fixed ``TaskInfo.has_pending_cancellation()`` on asyncio not respecting shielded
scopes (`#771 `_; PR by @gschaffner)
- Fixed ``SocketStream.receive()`` returning ``bytearray`` instead of ``bytes`` when
using asyncio with ``ProactorEventLoop`` (Windows)
(`#776 `_)
- Fixed quitting the debugger in a pytest test session while in an active task group
failing the test instead of exiting the test session (because the exit exception
arrives in an exception group)
- Fixed support for Linux abstract namespaces in UNIX sockets that was broken in v4.2
(`#781 `_; PR by @tapetersen)
- Fixed ``KeyboardInterrupt`` (ctrl+c) hanging the asyncio pytest runner
**4.4.0**
- Added the ``BlockingPortalProvider`` class to aid with constructing synchronous
counterparts to asynchronous interfaces that would otherwise require multiple blocking
portals
- Added ``__slots__`` to ``AsyncResource`` so that child classes can use ``__slots__``
(`#733 `_; PR by Justin Su)
- Added the ``TaskInfo.has_pending_cancellation()`` method
- Fixed erroneous ``RuntimeError: called 'started' twice on the same task status``
when cancelling a task in a TaskGroup created with the ``start()`` method before
the first checkpoint is reached after calling ``task_status.started()``
(`#706 `_; PR by Dominik Schwabe)
- Fixed two bugs with ``TaskGroup.start()`` on asyncio:
* Fixed erroneous ``RuntimeError: called 'started' twice on the same task status``
when cancelling a task in a TaskGroup created with the ``start()`` method before
the first checkpoint is reached after calling ``task_status.started()``
(`#706 `_; PR by Dominik Schwabe)
* Fixed the entire task group being cancelled if a ``TaskGroup.start()`` call gets
cancelled (`#685 `_,
`#710 `_)
- Fixed a race condition that caused crashes when multiple event loops of the same
backend were running in separate threads and simultaneously attempted to use AnyIO for
their first time (`#425 `_; PR by David
Jiricek and Ganden Schaffner)
- Fixed cancellation delivery on asyncio incrementing the wrong cancel scope's
cancellation counter when cascading a cancel operation to a child scope, thus failing
to uncancel the host task (`#716 `_)
- Fixed erroneous ``TypedAttributeLookupError`` if a typed attribute getter raises
``KeyError``
- Fixed the asyncio backend not respecting the ``PYTHONASYNCIODEBUG`` environment
variable when setting the ``debug`` flag in ``anyio.run()``
- Fixed ``SocketStream.receive()`` not detecting EOF on asyncio if there is also data in
the read buffer (`#701 `_)
- Fixed ``MemoryObjectStream`` dropping an item if the item is delivered to a recipient
that is waiting to receive an item but has a cancellation pending
(`#728 `_)
- Emit a ``ResourceWarning`` for ``MemoryObjectReceiveStream`` and
``MemoryObjectSendStream`` that were garbage collected without being closed (PR by
Andrey Kazantcev)
- Fixed ``MemoryObjectSendStream.send()`` not raising ``BrokenResourceError`` when the
last corresponding ``MemoryObjectReceiveStream`` is closed while waiting to send a
falsey item (`#731 `_; PR by Ganden
Schaffner)
**4.3.0**
- Added support for the Python 3.12 ``walk_up`` keyword argument in
``anyio.Path.relative_to()`` (PR by Colin Taylor)
- Fixed passing ``total_tokens`` to ``anyio.CapacityLimiter()`` as a keyword argument
not working on the ``trio`` backend
(`#515 `_)
- Fixed ``Process.aclose()`` not performing the minimum level of necessary cleanup when
cancelled. Previously:
- Cancellation of ``Process.aclose()`` could leak an orphan process
- Cancellation of ``run_process()`` could very briefly leak an orphan process.
- Cancellation of ``Process.aclose()`` or ``run_process()`` on Trio could leave
standard streams unclosed
(PR by Ganden Schaffner)
- Fixed ``Process.stdin.aclose()``, ``Process.stdout.aclose()``, and
``Process.stderr.aclose()`` not including a checkpoint on asyncio (PR by Ganden
Schaffner)
- Fixed documentation on how to provide your own typed attributes
**4.2.0**
- Add support for ``byte``-based paths in ``connect_unix``, ``create_unix_listeners``,
``create_unix_datagram_socket``, and ``create_connected_unix_datagram_socket``. (PR by
Lura Skye)
- Enabled the ``Event`` and ``CapacityLimiter`` classes to be instantiated outside an
event loop thread
- Broadly improved/fixed the type annotations. Among other things, many functions and
methods that take variadic positional arguments now make use of PEP 646
``TypeVarTuple`` to allow the positional arguments to be validated by static type
checkers. These changes affected numerous methods and functions, including:
* ``anyio.run()``
* ``TaskGroup.start_soon()``
* ``anyio.from_thread.run()``
* ``anyio.from_thread.run_sync()``
* ``anyio.to_thread.run_sync()``
* ``anyio.to_process.run_sync()``
* ``BlockingPortal.call()``
* ``BlockingPortal.start_task_soon()``
* ``BlockingPortal.start_task()``
(also resolves `#560 `_)
- Fixed various type annotations of ``anyio.Path`` to match Typeshed:
* ``anyio.Path.__lt__()``
* ``anyio.Path.__le__()``
* ``anyio.Path.__gt__()``
* ``anyio.Path.__ge__()``
* ``anyio.Path.__truediv__()``
* ``anyio.Path.__rtruediv__()``
* ``anyio.Path.hardlink_to()``
* ``anyio.Path.samefile()``
* ``anyio.Path.symlink_to()``
* ``anyio.Path.with_segments()``
(PR by Ganden Schaffner)
- Fixed adjusting the total number of tokens in a ``CapacityLimiter`` on asyncio failing
to wake up tasks waiting to acquire the limiter in certain edge cases (fixed with help
from Egor Blagov)
- Fixed ``loop_factory`` and ``use_uvloop`` options not being used on the asyncio
backend (`#643 `_)
- Fixed cancellation propagating on asyncio from a task group to child tasks if the task
hosting the task group is in a shielded cancel scope
(`#642 `_)
**4.1.0**
- Adapted to API changes made in Trio v0.23:
- Call ``trio.to_thread.run_sync()`` using the ``abandon_on_cancel`` keyword argument
instead of ``cancellable``
- Removed a checkpoint when exiting a task group
- Renamed the ``cancellable`` argument in ``anyio.to_thread.run_sync()`` to
``abandon_on_cancel`` (and deprecated the old parameter name)
- Bumped minimum version of Trio to v0.23
- Added support for voluntary thread cancellation via
``anyio.from_thread.check_cancelled()``
- Bumped minimum version of trio to v0.23
- Exposed the ``ResourceGuard`` class in the public API
(`#627 `_)
- Fixed ``RuntimeError: Runner is closed`` when running higher-scoped async generator
fixtures in some cases (`#619 `_)
- Fixed discrepancy between ``asyncio`` and ``trio`` where reraising a cancellation
exception in an ``except*`` block would incorrectly bubble out of its cancel scope
(`#634 `_)
**4.0.0**
- **BACKWARDS INCOMPATIBLE** Replaced AnyIO's own ``ExceptionGroup`` class with the PEP
654 ``BaseExceptionGroup`` and ``ExceptionGroup``
- **BACKWARDS INCOMPATIBLE** Changes to cancellation semantics:
- Any exceptions raising out of a task groups are now nested inside an
``ExceptionGroup`` (or ``BaseExceptionGroup`` if one or more ``BaseException`` were
included)
- Fixed task group not raising a cancellation exception on asyncio at exit if no child
tasks were spawned and an outer cancellation scope had been cancelled before
- Ensured that exiting a ``TaskGroup`` always hits a yield point, regardless of
whether there are running child tasks to be waited on
- On asyncio, cancel scopes will defer cancelling tasks that are scheduled to resume
with a finished future
- On asyncio and Python 3.9/3.10, cancel scopes now only suppress cancellation
exceptions if the cancel message matches the scope
- Task groups on all backends now raise a single cancellation exception when an outer
cancel scope is cancelled, and no exceptions other than cancellation exceptions are
raised in the group
- **BACKWARDS INCOMPATIBLE** Changes the pytest plugin to run all tests and fixtures in
the same task, allowing fixtures to set context variables for tests and other fixtures
- **BACKWARDS INCOMPATIBLE** Changed ``anyio.Path.relative_to()`` and
``anyio.Path.is_relative_to()`` to only accept one argument, as passing multiple
arguments is deprecated as of Python 3.12
- **BACKWARDS INCOMPATIBLE** Dropped support for spawning tasks from old-style coroutine
functions (``@asyncio.coroutine``)
- **BACKWARDS INCOMPATIBLE** The ``policy`` option on the ``asyncio`` backend was
changed to ``loop_factory`` to accommodate ``asyncio.Runner``
- Changed ``anyio.run()`` to use ``asyncio.Runner`` (or a back-ported version of it on
Pythons older than 3.11) on the ``asyncio`` backend
- Dropped support for Python 3.7
- Added support for Python 3.12
- Bumped minimum version of trio to v0.22
- Added the ``anyio.Path.is_junction()`` and ``anyio.Path.walk()`` methods
- Added ``create_unix_datagram_socket`` and ``create_connected_unix_datagram_socket`` to
create UNIX datagram sockets (PR by Jean Hominal)
- Fixed ``from_thread.run`` and ``from_thread.run_sync`` not setting sniffio on asyncio.
As a result:
- Fixed ``from_thread.run_sync`` failing when used to call sniffio-dependent functions
on asyncio
- Fixed ``from_thread.run`` failing when used to call sniffio-dependent functions on
asyncio from a thread running trio or curio
- Fixed deadlock when using ``from_thread.start_blocking_portal(backend="asyncio")``
in a thread running trio or curio (PR by Ganden Schaffner)
- Improved type annotations:
- The ``item_type`` argument of ``create_memory_object_stream`` was deprecated.
To indicate the item type handled by the stream, use
``create_memory_object_stream[T_Item]()`` instead. Type checking should no longer
fail when annotating memory object streams with uninstantiable item types (PR by
Ganden Schaffner)
- Added the ``CancelScope.cancelled_caught`` property which tells users if the cancel
scope suppressed a cancellation exception
- Fixed ``fail_after()`` raising an unwarranted ``TimeoutError`` when the cancel scope
was cancelled before reaching its deadline
- Fixed ``MemoryObjectReceiveStream.receive()`` causing the receiving task on asyncio to
remain in a cancelled state if the operation was cancelled after an item was queued to
be received by the task (but before the task could actually receive the item)
- Fixed ``TaskGroup.start()`` on asyncio not responding to cancellation from the outside
- Fixed tasks started from ``BlockingPortal`` not notifying synchronous listeners
(``concurrent.futures.wait()``) when they're cancelled
- Removed unnecessary extra waiting cycle in ``Event.wait()`` on asyncio in the case
where the event was not yet set
- Fixed processes spawned by ``anyio.to_process()`` being "lost" as unusable to the
process pool when processes that have idled over 5 minutes are pruned at part of the
``to_process.run_sync()`` call, leading to increased memory consumption
(PR by Anael Gorfinkel)
Changes since 4.0.0rc1:
- Fixed the type annotation of ``TaskGroup.start_soon()`` to accept any awaitables
(already in v3.7.0 but was missing from 4.0.0rc1)
- Changed ``CancelScope`` to also consider the cancellation count (in addition to the
cancel message) on asyncio to determine if a cancellation exception should be
swallowed on scope exit, to combat issues where third party libraries catch the
``CancelledError`` and raise another, thus erasing the original cancel message
- Worked around a `CPython bug `_ that
caused ``TLSListener.handle_handshake_error()`` on asyncio to log ``"NoneType: None"``
instead of the error (PR by Ganden Schaffner)
- Re-added the ``item_type`` argument to ``create_memory_object_stream()`` (but using it
raises a deprecation warning and does nothing with regards to the static types of the
returned streams)
- Fixed processes spawned by ``anyio.to_process()`` being "lost" as unusable to the
process pool when processes that have idled over 5 minutes are pruned at part of the
``to_process.run_sync()`` call, leading to increased memory consumption
(PR by Anael Gorfinkel)
**3.7.1**
- Fixed sending large buffers via UNIX stream sockets on asyncio
- Fixed several minor documentation issues (broken links to classes, missing classes or
attributes)
**3.7.0**
- Dropped support for Python 3.6
- Improved type annotations:
- Several functions and methods that were previously annotated as accepting
``Coroutine[Any, Any, Any]`` as the return type of the callable have been amended to
accept ``Awaitable[Any]`` instead, to allow a slightly broader set of coroutine-like
inputs, like ``async_generator_asend`` objects returned from the ``asend()`` method
of async generators, and to match the ``trio`` annotations:
- ``anyio.run()``
- ``anyio.from_thread.run()``
- ``TaskGroup.start_soon()``
- ``TaskGroup.start()``
- ``BlockingPortal.call()``
- ``BlockingPortal.start_task_soon()``
- ``BlockingPortal.start_task()``
Note that this change involved only changing the type annotations; run-time
functionality was not altered.
- The ``TaskStatus`` class is now a generic protocol, and should be parametrized to
indicate the type of the value passed to ``task_status.started()``
- The ``Listener`` class is now covariant in its stream type
- ``create_memory_object_stream()`` now allows passing only ``item_type``
- Object receive streams are now covariant and object send streams are correspondingly
contravariant
- Changed ``TLSAttribute.shared_ciphers`` to match the documented semantics of
``SSLSocket.shared_ciphers`` of always returning ``None`` for client-side streams
- Fixed ``CapacityLimiter`` on the asyncio backend to order waiting tasks in the FIFO
order (instead of LIFO) (PR by Conor Stevenson)
- Fixed ``CancelScope.cancel()`` not working on asyncio if called before entering the
scope
- Fixed ``open_signal_receiver()`` inconsistently yielding integers instead of
``signal.Signals`` instances on the ``trio`` backend
- Fixed ``to_thread.run_sync()`` hanging on asyncio if the target callable raises
``StopIteration``
- Fixed ``start_blocking_portal()`` raising an unwarranted
``RuntimeError: This portal is not running`` if a task raises an exception that causes
the event loop to be closed
- Fixed ``current_effective_deadline()`` not returning ``-inf`` on asyncio when the
currently active cancel scope has been cancelled (PR by Ganden Schaffner)
- Fixed the ``OP_IGNORE_UNEXPECTED_EOF`` flag in an SSL context created by default in
``TLSStream.wrap()`` being inadvertently set on Python 3.11.3 and 3.10.11
- Fixed ``CancelScope`` to properly handle asyncio task uncancellation on Python 3.11
(PR by Nikolay Bryskin)
- Fixed ``OSError`` when trying to use ``create_tcp_listener()`` to bind to a link-local
IPv6 address (and worked around related bugs in ``uvloop``)
- Worked around a `PyPy bug `_
when using ``anyio.getaddrinfo()`` with for IPv6 link-local addresses containing
interface names
**3.6.2**
- Pinned Trio to < 0.22 to avoid incompatibility with AnyIO's ``ExceptionGroup`` class
causing ``AttributeError: 'NonBaseMultiError' object has no attribute '_exceptions'``
**3.6.1**
- Fixed exception handler in the asyncio test runner not properly handling a context
that does not contain the ``exception`` key
**3.6.0**
- Fixed ``TypeError`` in ``get_current_task()`` on asyncio when using a custom ``Task``
factory
- Updated type annotations on ``run_process()`` and ``open_process()``:
* ``command`` now accepts accepts bytes and sequences of bytes
* ``stdin``, ``stdout`` and ``stderr`` now accept file-like objects
(PR by John T. Wodder II)
- Changed the pytest plugin to run both the setup and teardown phases of asynchronous
generator fixtures within a single task to enable use cases such as cancel scopes and
task groups where a context manager straddles the ``yield``
**3.5.0**
- Added ``start_new_session`` keyword argument to ``run_process()`` and
``open_process()`` (PR by Jordan Speicher)
- Fixed deadlock in synchronization primitives on asyncio which can happen if a task
acquiring a primitive is hit with a native (not AnyIO) cancellation with just the
right timing, leaving the next acquiring task waiting forever
(`#398 `_)
- Added workaround for bpo-46313_ to enable compatibility with OpenSSL 3.0
.. _bpo-46313: https://bugs.python.org/issue46313
**3.4.0**
- Added context propagation to/from worker threads in ``to_thread.run_sync()``,
``from_thread.run()`` and ``from_thread.run_sync()``
(`#363 `_; partially based on a PR by
Sebastián Ramírez)
**NOTE**: Requires Python 3.7 to work properly on asyncio!
- Fixed race condition in ``Lock`` and ``Semaphore`` classes when a task waiting on
``acquire()`` is cancelled while another task is waiting to acquire the same primitive
(`#387 `_)
- Fixed async context manager's ``__aexit__()`` method not being called in
``BlockingPortal.wrap_async_context_manager()`` if the host task is cancelled
(`#381 `_; PR by Jonathan Slenders)
- Fixed worker threads being marked as being event loop threads in sniffio
- Fixed task parent ID not getting set to the correct value on asyncio
- Enabled the test suite to run without IPv6 support, trio or pytest plugin autoloading
**3.3.4**
- Fixed ``BrokenResourceError`` instead of ``EndOfStream`` being raised in ``TLSStream``
when the peer abruptly closes the connection while ``TLSStream`` is receiving data
with ``standard_compatible=False`` set
**3.3.3**
- Fixed UNIX socket listener not setting accepted sockets to non-blocking mode on
asyncio
- Changed unconnected UDP sockets to be always bound to a local port (on "any"
interface) to avoid errors on asyncio + Windows
**3.3.2**
- Fixed cancellation problem on asyncio where level-triggered cancellation for **all**
parent cancel scopes would not resume after exiting a shielded nested scope
(`#370 `_)
**3.3.1**
- Added missing documentation for the ``ExceptionGroup.exceptions`` attribute
- Changed the asyncio test runner not to use uvloop by default (to match the behavior of
``anyio.run()``)
- Fixed ``RuntimeError`` on asyncio when a ``CancelledError`` is raised from a task
spawned through a ``BlockingPortal``
(`#357 `_)
- Fixed asyncio warning about a ``Future`` with an exception that was never retrieved
which happened when a socket was already written to but the peer abruptly closed the
connection
**3.3.0**
- Added asynchronous ``Path`` class
- Added the ``wrap_file()`` function for wrapping existing files as asynchronous file
objects
- Relaxed the type of the ``path`` initializer argument to ``FileReadStream`` and
``FileWriteStream`` so they accept any path-like object (including the new
asynchronous ``Path`` class)
- Dropped unnecessary dependency on the ``async_generator`` library
- Changed the generics in ``AsyncFile`` so that the methods correctly return either
``str`` or ``bytes`` based on the argument to ``open_file()``
- Fixed an asyncio bug where under certain circumstances, a stopping worker thread would
still accept new assignments, leading to a hang
**3.2.1**
- Fixed idle thread pruning on asyncio sometimes causing an expired worker thread to be
assigned a task
**3.2.0**
- Added Python 3.10 compatibility
- Added the ability to close memory object streams synchronously (including support for
use as a synchronous context manager)
- Changed the default value of the ``use_uvloop`` asyncio backend option to ``False`` to
prevent unsafe event loop policy changes in different threads
- Fixed ``to_thread.run_sync()`` hanging on the second call on asyncio when used with
``loop.run_until_complete()``
- Fixed ``to_thread.run_sync()`` prematurely marking a worker thread inactive when a
task await on the result is cancelled
- Fixed ``ResourceWarning`` about an unclosed socket when UNIX socket connect fails on
asyncio
- Fixed the type annotation of ``open_signal_receiver()`` as a synchronous context
manager
- Fixed the type annotation of ``DeprecatedAwaitable(|List|Float).__await__`` to match
the ``typing.Awaitable`` protocol
**3.1.0**
- Added ``env`` and ``cwd`` keyword arguments to ``run_process()`` and ``open_process``.
- Added support for mutation of ``CancelScope.shield`` (PR by John Belmonte)
- Added the ``sleep_forever()`` and ``sleep_until()`` functions
- Changed asyncio task groups so that if the host and child tasks have only raised
``CancelledErrors``, just one ``CancelledError`` will now be raised instead of an
``ExceptionGroup``, allowing asyncio to ignore it when it propagates out of the task
- Changed task names to be converted to ``str`` early on asyncio (PR by Thomas Grainger)
- Fixed ``sniffio._impl.AsyncLibraryNotFoundError: unknown async library, or not in
async context`` on asyncio and Python 3.6 when ``to_thread.run_sync()`` is used from
``loop.run_until_complete()``
- Fixed odd ``ExceptionGroup: 0 exceptions were raised in the task group`` appearing
under certain circumstances on asyncio
- Fixed ``wait_all_tasks_blocked()`` returning prematurely on asyncio when a previously
blocked task is cancelled (PR by Thomas Grainger)
- Fixed declared return type of ``TaskGroup.start()`` (it was declared as ``None``, but
anything can be returned from it)
- Fixed ``TextStream.extra_attributes`` raising ``AttributeError`` (PR by Thomas
Grainger)
- Fixed ``await maybe_async(current_task())`` returning ``None`` (PR by Thomas Grainger)
- Fixed: ``pickle.dumps(current_task())`` now correctly raises ``TypeError`` instead of
pickling to ``None`` (PR by Thomas Grainger)
- Fixed return type annotation of ``Event.wait()`` (``bool`` → ``None``) (PR by Thomas
Grainger)
- Fixed return type annotation of ``RunVar.get()`` to return either the type of the
default value or the type of the contained value (PR by Thomas Grainger)
- Fixed a deprecation warning message to refer to ``maybe_async()`` and not
``maybe_awaitable()`` (PR by Thomas Grainger)
- Filled in argument and return types for all functions and methods previously missing
them (PR by Thomas Grainger)
**3.0.1**
- Fixed ``to_thread.run_sync()`` raising ``RuntimeError`` on asyncio when no "root" task
could be found for setting up a cleanup callback. This was a problem at least on
Tornado and possibly also Twisted in asyncio compatibility mode. The life of worker
threads is now bound to the the host task of the topmost cancel scope hierarchy
starting from the current one, or if no cancel scope is active, the current task.
**3.0.0**
- Curio support has been dropped (see the :doc:`FAQ ` as for why)
- API changes:
* **BACKWARDS INCOMPATIBLE** Submodules under ``anyio.abc.`` have been made private
(use only ``anyio.abc`` from now on).
* **BACKWARDS INCOMPATIBLE** The following method was previously a coroutine method
and has been converted into a synchronous one:
* ``MemoryObjectReceiveStream.receive_nowait()``
* The following functions and methods are no longer asynchronous but can still be
awaited on (doing so will emit a deprecation warning):
* ``current_time()``
* ``current_effective_deadline()``
* ``get_current_task()``
* ``get_running_tasks()``
* ``CancelScope.cancel()``
* ``CapacityLimiter.acquire_nowait()``
* ``CapacityLimiter.acquire_on_behalf_of_nowait()``
* ``Condition.release()``
* ``Event.set()``
* ``Lock.release()``
* ``MemoryObjectSendStream.send_nowait()``
* ``Semaphore.release()``
* The following functions now return synchronous context managers instead of
asynchronous context managers (and emit deprecation warnings if used as async
context managers):
* ``fail_after()``
* ``move_on_after()``
* ``open_cancel_scope()`` (now just ``CancelScope()``; see below)
* ``open_signal_receiver()``
* The following functions and methods have been renamed/moved (will now emit
deprecation warnings when you use them by their old names):
* ``create_blocking_portal()`` → ``anyio.from_thread.BlockingPortal()``
* ``create_capacity_limiter()`` → ``anyio.CapacityLimiter()``
* ``create_event()`` → ``anyio.Event()``
* ``create_lock()`` → ``anyio.Lock()``
* ``create_condition()`` → ``anyio.Condition()``
* ``create_semaphore()`` → ``anyio.Semaphore()``
* ``current_default_worker_thread_limiter()`` →
``anyio.to_thread.current_default_thread_limiter()``
* ``open_cancel_scope()`` → ``anyio.CancelScope()``
* ``run_sync_in_worker_thread()`` → ``anyio.to_thread.run_sync()``
* ``run_async_from_thread()`` → ``anyio.from_thread.run()``
* ``run_sync_from_thread()`` → ``anyio.from_thread.run_sync()``
* ``BlockingPortal.spawn_task`` → ``BlockingPortal.start_task_soon``
* ``CapacityLimiter.set_total_tokens()`` → ``limiter.total_tokens = ...``
* ``TaskGroup.spawn()`` → ``TaskGroup.start_soon()``
* **BACKWARDS INCOMPATIBLE** ``start_blocking_portal()`` must now be used as a context
manager (it no longer returns a BlockingPortal, but a context manager that yields
one)
* **BACKWARDS INCOMPATIBLE** The ``BlockingPortal.stop_from_external_thread()`` method
(use ``portal.call(portal.stop)`` instead now)
* **BACKWARDS INCOMPATIBLE** The ``SocketStream`` and ``SocketListener`` classes were
made non-generic
* Made all non-frozen dataclasses hashable with ``eq=False``
* Removed ``__slots__`` from ``BlockingPortal``
See the :doc:`migration documentation ` for instructions on how to deal
with these changes.
- Improvements to running synchronous code:
* Added the ``run_sync_from_thread()`` function
* Added the ``run_sync_in_process()`` function for running code in worker processes
(big thanks to Richard Sheridan for his help on this one!)
- Improvements to sockets and streaming:
* Added the ``UNIXSocketStream`` class which is capable of sending and receiving file
descriptors
* Added the ``FileReadStream`` and ``FileWriteStream`` classes
* ``create_unix_listener()`` now removes any existing socket at the given path before
proceeding (instead of raising ``OSError: Address already in use``)
- Improvements to task groups and cancellation:
* Added the ``TaskGroup.start()`` method and a corresponding
``BlockingPortal.start_task()`` method
* Added the ``name`` argument to ``BlockingPortal.start_task_soon()``
(renamed from ``BlockingPortal.spawn_task()``)
* Changed ``CancelScope.deadline`` to be writable
* Added the following functions in the ``anyio.lowlevel`` module:
* ``checkpoint()``
* ``checkpoint_if_cancelled()``
* ``cancel_shielded_checkpoint()``
- Improvements and changes to synchronization primitives:
* Added the ``Lock.acquire_nowait()``, ``Condition.acquire_nowait()`` and
``Semaphore.acquire_nowait()`` methods
* Added the ``statistics()`` method to ``Event``, ``Lock``, ``Condition``, ``Semaphore``,
``CapacityLimiter``, ``MemoryObjectReceiveStream`` and ``MemoryObjectSendStream``
* ``Lock`` and ``Condition`` can now only be released by the task that acquired them.
This behavior is now consistent on all backends whereas previously only Trio
enforced this.
* The ``CapacityLimiter.total_tokens`` property is now writable and
``CapacityLimiter.set_total_tokens()`` has been deprecated
* Added the ``max_value`` property to ``Semaphore``
- Asyncio specific improvements (big thanks to Thomas Grainger for his effort on most of
these!):
* Cancel scopes are now properly enforced with native asyncio coroutine functions
(without any explicit AnyIO checkpoints)
* Changed the asyncio ``CancelScope`` to raise a ``RuntimeError`` if a cancel scope is
being exited before it was even entered
* Changed the asyncio test runner to capture unhandled exceptions from asynchronous
callbacks and unbound native tasks which are then raised after the test function (or
async fixture setup or teardown) completes
* Changed the asyncio ``TaskGroup.start_soon()`` (formerly ``spawn()``) method to call
the target function immediately before starting the task, for consistency across
backends
* Changed the asyncio ``TaskGroup.start_soon()`` (formerly ``spawn()``) method to
avoid the use of a coroutine wrapper on Python 3.8+ and added a hint for hiding the
wrapper in tracebacks on earlier Pythons (supported by Pytest, Sentry etc.)
* Changed the default thread limiter on asyncio to use a ``RunVar`` so it is scoped
to the current event loop, thus avoiding potential conflict among multiple running
event loops
* Thread pooling is now used on asyncio with ``run_sync_in_worker_thread()``
* Fixed ``current_effective_deadline()`` raising ``KeyError`` on asyncio when no
cancel scope is active
- Added the ``RunVar`` class for scoping variables to the running event loop
**2.2.0**
- Added the ``maybe_async()`` and ``maybe_async_cm()`` functions to facilitate forward
compatibility with AnyIO 3
- Fixed socket stream bug on asyncio where receiving a half-close from the peer would
shut down the entire connection
- Fixed native task names not being set on asyncio on Python 3.8+
- Fixed ``TLSStream.send_eof()`` raising ``ValueError`` instead of the expected
``NotImplementedError``
- Fixed ``open_signal_receiver()`` on asyncio and curio hanging if the cancel scope was
cancelled before the function could run
- Fixed Trio test runner causing unwarranted test errors on ``BaseException``
(PR by Matthias Urlichs)
- Fixed formatted output of ``ExceptionGroup`` containing too many newlines
**2.1.0**
- Added the ``spawn_task()`` and ``wrap_async_context_manager()`` methods to
``BlockingPortal``
- Added the ``handshake_timeout`` and ``error_handler`` parameters to ``TLSListener``
- Fixed ``Event`` objects on the trio backend not inheriting from ``anyio.abc.Event``
- Fixed ``run_sync_in_worker_thread()`` raising ``UnboundLocalError`` on asyncio when
cancelled
- Fixed ``send()`` on socket streams not raising any exception on asyncio, and an
unwrapped ``BrokenPipeError`` on trio and curio when the peer has disconnected
- Fixed ``MemoryObjectSendStream.send()`` raising ``BrokenResourceError`` when the last
receiver is closed right after receiving the item
- Fixed ``ValueError: Invalid file descriptor: -1`` when closing a ``SocketListener`` on
asyncio
**2.0.2**
- Fixed one more case of
``AttributeError: 'async_generator_asend' object has no attribute 'cr_await'`` on
asyncio
**2.0.1**
- Fixed broken ``MultiListener.extra()`` (PR by daa)
- Fixed ``TLSStream`` returning an empty bytes object instead of raising ``EndOfStream``
when trying to receive from the stream after a closing handshake
- Fixed ``AttributeError`` when cancelling a task group's scope inside an async test
fixture on asyncio
- Fixed ``wait_all_tasks_blocked()`` raising ``AttributeError`` on asyncio if a native
task is waiting on an async generator's ``asend()`` method
**2.0.0**
- General new features:
- Added support for subprocesses
- Added support for "blocking portals" which allow running functions in the event loop
thread from external threads
- Added the ``anyio.aclose_forcefully()`` function for closing asynchronous resources
as quickly as possible
- General changes/fixes:
- **BACKWARDS INCOMPATIBLE** Some functions have been renamed or removed (see further
below for socket/fileio API changes):
- ``finalize()`` → (removed; use ``contextlib.aclosing()`` instead)
- ``receive_signals()`` → ``open_signal_receiver()``
- ``run_in_thread()`` → ``run_sync_in_worker_thread()``
- ``current_default_thread_limiter()`` → ``current_default_worker_thread_limiter()``
- ``ResourceBusyError`` → ``BusyResourceError``
- **BACKWARDS INCOMPATIBLE** Exception classes were moved to the top level package
- Dropped support for Python 3.5
- Bumped minimum versions of trio and curio to v0.16 and v1.4, respectively
- Changed the ``repr()`` of ``ExceptionGroup`` to match trio's ``MultiError``
- Backend specific changes and fixes:
- ``asyncio``: Added support for ``ProactorEventLoop``. This allows asyncio
applications to use AnyIO on Windows even without using AnyIO as the entry point.
- ``asyncio``: The asyncio backend now uses ``asyncio.run()`` behind the scenes which
properly shuts down async generators and cancels any leftover native tasks
- ``curio``: Worked around the limitation where a task can only be cancelled twice
(any cancellations beyond that were ignored)
- ``asyncio`` + ``curio``: a cancellation check now calls ``sleep(0)``, allowing the
scheduler to switch to a different task
- ``asyncio`` + ``curio``: Host name resolution now uses `IDNA 2008`_ (with UTS 46
compatibility mapping, just like trio)
- ``asyncio`` + ``curio``: Fixed a bug where a task group would abandon its subtasks
if its own cancel scope was cancelled while it was waiting for subtasks to finish
- ``asyncio`` + ``curio``: Fixed recursive tracebacks when a single exception from an
inner task group is reraised in an outer task group
- Socket/stream changes:
- **BACKWARDS INCOMPATIBLE** The stream class structure was completely overhauled.
There are now separate abstract base classes for receive and send streams, byte
streams and reliable and unreliable object streams. Stream wrappers are much better
supported by this new ABC structure and a new "typed extra attribute" system that
lets you query the wrapper chain for the attributes you want via ``.extra(...)``.
- **BACKWARDS INCOMPATIBLE** Socket server functionality has been refactored into a
network-agnostic listener system
- **BACKWARDS INCOMPATIBLE** TLS functionality has been split off from
``SocketStream`` and can now work over any bidirectional bytes-based stream – you
can now establish a TLS encrypted communications pathway over UNIX sockets or even
memory object streams. The ``TLSRequired`` exception has also been removed as it is
no longer necessary.
- **BACKWARDS INCOMPATIBLE** Buffering functionality (``receive_until()`` and
``receive_exactly()``) was split off from ``SocketStream`` into a stream wrapper
class (``anyio.streams.buffered.BufferedByteReceiveStream``)
- **BACKWARDS INCOMPATIBLE** IPv6 addresses are now reported as 2-tuples. If original
4-tuple form contains a nonzero scope ID, it is appended to the address with ``%``
as the separator.
- **BACKWARDS INCOMPATIBLE** Byte streams (including socket streams) now raise
``EndOfStream`` instead of returning an empty bytes object when the stream has been
closed from the other end
- **BACKWARDS INCOMPATIBLE** The socket API has changes:
- ``create_tcp_server()`` → ``create_tcp_listener()``
- ``create_unix_server()`` → ``create_unix_listener()``
- ``create_udp_socket()`` had some of its parameters changed:
- ``interface`` → ``local_address``
- ``port`` → ``local_port``
- ``reuse_address`` was replaced with ``reuse_port`` (and sets ``SO_REUSEPORT``
instead of ``SO_REUSEADDR``)
- ``connect_tcp()`` had some of its parameters changed:
- ``address`` → ``remote_address``
- ``port`` → ``remote_port``
- ``bind_host`` → ``local_address``
- ``bind_port`` → (removed)
- ``autostart_tls`` → ``tls``
- ``tls_hostname`` (new parameter, when you want to match the certificate against
against something else than ``remote_address``)
- ``connect_tcp()`` now returns a ``TLSStream`` if TLS was enabled
- ``notify_socket_closing()`` was removed, as it is no longer used by AnyIO
- ``SocketStream`` has changes to its methods and attributes:
- ``address`` → ``.extra(SocketAttribute.local_address)``
- ``alpn_protocol`` → ``.extra(TLSAttribute.alpn_protocol)``
- ``close()`` → ``aclose()``
- ``get_channel_binding`` → ``.extra(TLSAttribute.channel_binding_tls_unique)``
- ``cipher`` → ``.extra(TLSAttribute.cipher)``
- ``getpeercert`` → ``.extra(SocketAttribute.peer_certificate)`` or
``.extra(SocketAttribute.peer_certificate_binary)``
- ``getsockopt()`` → ``.extra(SocketAttribute.raw_socket).getsockopt(...)``
- ``peer_address`` → ``.extra(SocketAttribute.remote_address)``
- ``receive_chunks()`` → (removed; use ``async for`` on the stream instead)
- ``receive_delimited_chunks()`` → (removed)
- ``receive_exactly()`` → ``BufferedReceiveStream.receive_exactly()``
- ``receive_some()`` → ``receive()``
- ``receive_until()`` → ``BufferedReceiveStream.receive_until()``
- ``send_all()`` → ``send()``
- ``setsockopt()`` → ``.extra(SocketAttribute.raw_socket).setsockopt(...)``
- ``shared_ciphers`` → ``.extra(TLSAttribute.shared_ciphers)``
- ``server_side`` → ``.extra(TLSAttribute.server_side)``
- ``start_tls()`` → ``stream = TLSStream.wrap(...)``
- ``tls_version`` → ``.extra(TLSAttribute.tls_version)``
- ``UDPSocket`` has changes to its methods and attributes:
- ``address`` → ``.extra(SocketAttribute.local_address)``
- ``getsockopt()`` → ``.extra(SocketAttribute.raw_socket).getsockopt(...)``
- ``port`` → ``.extra(SocketAttribute.local_port)``
- ``receive()`` no longer takes a maximum bytes argument
- ``receive_packets()`` → (removed; use ``async for`` on the UDP socket instead)
- ``send()`` → requires a tuple for destination now (address, port), for
compatibility with the new ``UnreliableObjectStream`` interface. The
``sendto()`` method works like the old ``send()`` method.
- ``setsockopt()`` → ``.extra(SocketAttribute.raw_socket).setsockopt(...)``
- **BACKWARDS INCOMPATIBLE** Renamed the ``max_size`` parameter to ``max_bytes``
wherever it occurred (this was inconsistently named ``max_bytes`` in some subclasses
before)
- Added memory object streams as a replacement for queues
- Added stream wrappers for encoding/decoding unicode strings
- Support for the ``SO_REUSEPORT`` option (allows binding more than one socket to the
same address/port combination, as long as they all have this option set) has been
added to TCP listeners and UDP sockets
- The ``send_eof()`` method was added to all (bidirectional) streams
- File I/O changes:
- **BACKWARDS INCOMPATIBLE** Asynchronous file I/O functionality now uses a common
code base (``anyio.AsyncFile``) instead of backend-native classes
- **BACKWARDS INCOMPATIBLE** The File I/O API has changes to its functions and
methods:
- ``aopen()`` → ``open_file()``
- ``AsyncFileclose()`` → ``AsyncFileaclose()``
- Task synchronization changes:
- **BACKWARDS INCOMPATIBLE** Queues were replaced by memory object streams
- **BACKWARDS INCOMPATIBLE** Added the ``acquire()`` and ``release()`` methods to the
``Lock``, ``Condition`` and ``Semaphore`` classes
- **BACKWARDS INCOMPATIBLE** Removed the ``Event.clear()`` method. You must now
replace the event object with a new one rather than clear the old one.
- Fixed ``Condition.wait()`` not working on asyncio and curio (PR by Matt Westcott)
- Testing changes:
- **BACKWARDS INCOMPATIBLE** Removed the ``--anyio-backends`` command line option for
the pytest plugin. Use the ``-k`` option to do ad-hoc filtering, and the
``anyio_backend`` fixture to control which backends you wish to run the tests by
default.
- The pytest plugin was refactored to run the test and all its related async fixtures
inside the same event loop, making async fixtures much more useful
- Fixed Hypothesis support in the pytest plugin (it was not actually running the
Hypothesis tests at all)
.. _IDNA 2008: https://tools.ietf.org/html/rfc5895
**1.4.0**
- Added async name resolution functions (``anyio.getaddrinfo()`` and
``anyio.getnameinfo()``)
- Added the ``family`` and ``reuse_address`` parameters to ``anyio.create_udp_socket()``
(Enables multicast support; test contributed by Matthias Urlichs)
- Fixed ``fail.after(0)`` not raising a timeout error on asyncio and curio
- Fixed ``move_on_after()`` and ``fail_after()`` getting stuck on curio in some
circumstances
- Fixed socket operations not allowing timeouts to cancel the task
- Fixed API documentation on ``Stream.receive_until()`` which claimed that the delimiter
will be included in the returned data when it really isn't
- Harmonized the default task names across all backends
- ``wait_all_tasks_blocked()`` no longer considers tasks waiting on ``sleep(0)`` to be
blocked on asyncio and curio
- Fixed the type of the ``address`` parameter in ``UDPSocket.send()`` to include
``IPAddress`` objects (which were already supported by the backing implementation)
- Fixed ``UDPSocket.send()`` to resolve host names using ``anyio.getaddrinfo()`` before
calling ``socket.sendto()`` to avoid blocking on synchronous name resolution
- Switched to using ``anyio.getaddrinfo()`` for name lookups
**1.3.1**
- Fixed warnings caused by trio 0.15
- Worked around a compatibility issue between uvloop and Python 3.9 (missing
``shutdown_default_executor()`` method)
**1.3.0**
- Fixed compatibility with Curio 1.0
- Made it possible to assert fine grained control over which AnyIO backends and backend
options are being used with each test
- Added the ``address`` and ``peer_address`` properties to the ``SocketStream``
interface
**1.2.3**
- Repackaged release (v1.2.2 contained extra files from an experimental
branch which broke imports)
**1.2.2**
- Fixed ``CancelledError`` leaking from a cancel scope on asyncio if the task previously
received a cancellation exception
- Fixed ``AttributeError`` when cancelling a generator-based task (asyncio)
- Fixed ``wait_all_tasks_blocked()`` not working with generator-based tasks (asyncio)
- Fixed an unnecessary delay in ``connect_tcp()`` if an earlier attempt succeeds
- Fixed ``AssertionError`` in ``connect_tcp()`` if multiple connection attempts succeed
simultaneously
**1.2.1**
- Fixed cancellation errors leaking from a task group when they are contained in an
exception group
- Fixed trio v0.13 compatibility on Windows
- Fixed inconsistent queue capacity across backends when capacity was defined as 0
(trio = 0, others = infinite)
- Fixed socket creation failure crashing ``connect_tcp()``
**1.2.0**
- Added the possibility to parametrize regular pytest test functions against the
selected list of backends
- Added the ``set_total_tokens()`` method to ``CapacityLimiter``
- Added the ``anyio.current_default_thread_limiter()`` function
- Added the ``cancellable`` parameter to ``anyio.run_in_thread()``
- Implemented the Happy Eyeballs (:rfc:`6555`) algorithm for ``anyio.connect_tcp()``
- Fixed ``KeyError`` on asyncio and curio where entering and exiting a cancel scope
happens in different tasks
- Fixed deprecation warnings on Python 3.8 about the ``loop`` argument of
``asyncio.Event()``
- Forced the use ``WindowsSelectorEventLoopPolicy`` in ``asyncio.run`` when on Windows
and asyncio
to keep network functionality working
- Worker threads are now spawned with ``daemon=True`` on all backends, not just trio
- Dropped support for trio v0.11
**1.1.0**
- Added the ``lock`` parameter to ``anyio.create_condition()`` (PR by Matthias Urlichs)
- Added async iteration for queues (PR by Matthias Urlichs)
- Added capacity limiters
- Added the possibility of using capacity limiters for limiting the maximum number of
threads
- Fixed compatibility with trio v0.12
- Fixed IPv6 support in ``create_tcp_server()``, ``connect_tcp()`` and
``create_udp_socket()``
- Fixed mishandling of task cancellation while the task is running a worker thread on
asyncio and curio
**1.0.0**
- Fixed pathlib2_ compatibility with ``anyio.aopen()``
- Fixed timeouts not propagating from nested scopes on asyncio and curio (PR by Matthias
Urlichs)
- Fixed incorrect call order in socket close notifications on asyncio (mostly affecting
Windows)
- Prefixed backend module names with an underscore to better indicate privateness
.. _pathlib2: https://pypi.org/project/pathlib2/
**1.0.0rc2**
- Fixed some corner cases of cancellation where behavior on asyncio and curio did not
match with that of trio. Thanks to Joshua Oreman for help with this.
- Fixed ``current_effective_deadline()`` not taking shielded cancellation scopes into
account on asyncio and curio
- Fixed task cancellation not happening right away on asyncio and curio when a cancel
scope is entered when the deadline has already passed
- Fixed exception group containing only cancellation exceptions not being swallowed by a
timed out cancel scope on asyncio and curio
- Added the ``current_time()`` function
- Replaced ``CancelledError`` with ``get_cancelled_exc_class()``
- Added support for Hypothesis_
- Added support for :pep:`561`
- Use uvloop for the asyncio backend by default when available (but only on CPython)
.. _Hypothesis: https://hypothesis.works/
**1.0.0rc1**
- Fixed ``setsockopt()`` passing options to the underlying method in the wrong manner
- Fixed cancellation propagation from nested task groups
- Fixed ``get_running_tasks()`` returning tasks from other event loops
- Added the ``parent_id`` attribute to ``anyio.TaskInfo``
- Added the ``get_current_task()`` function
- Added guards to protect against concurrent read/write from/to sockets by multiple
tasks
- Added the ``notify_socket_close()`` function
**1.0.0b2**
- Added introspection of running tasks via ``anyio.get_running_tasks()``
- Added the ``getsockopt()`` and ``setsockopt()`` methods to the ``SocketStream`` API
- Fixed mishandling of large buffers by ``BaseSocket.sendall()``
- Fixed compatibility with (and upgraded minimum required version to) trio v0.11
**1.0.0b1**
- Initial release
anyio-4.11.0/docs/why.rst 0000664 0000000 0000000 00000063123 15064462627 0015226 0 ustar 00root root 0000000 0000000 Why you should be using AnyIO APIs instead of asyncio APIs
==========================================================
.. py:currentmodule:: anyio
AnyIO is not just a compatibility layer for bridging asyncio and Trio_. For one, it comes with its own diverse set of
Trio-inspired APIs which have been designed to be a step up from asyncio. Secondly, asyncio has numerous design issues
and missing features that AnyIO fixes for you. Therefore there are strong merits in switching to AnyIO APIs even if
you are developing an application and not a library.
Design problems with task management
++++++++++++++++++++++++++++++++++++
While the :class:`asyncio.TaskGroup` class, introduced in Python 3.11, is a major step towards structured concurrency,
it only provides a very narrow API that severely limits its usefulness.
First and foremost, the :class:`asyncio.TaskGroup` class does not offer any way to cancel, or even list all of the
contained tasks, so in order to do that, you would still have to keep track of any tasks you create. This also makes it
problematic to pass the task group to a child tasks, as tracking the tasks becomes a lot more tedious in such cases.
Secondly, while AnyIO (and Trio_) has long provided a way to wait until a newly launched task signals readiness,
:class:`asyncio.TaskGroup` still does not provide any such mechanism, leaving users to devise their own, often
error-prone methods to achieve this.
How does AnyIO fix these problems?
----------------------------------
An AnyIO task group contains its own cancel scope which can be used to cancel all the child tasks, regardless of where
they were launched from. Furthermore, if the task group's cancel scope is cancelled, any tasks launched from the task
group since then are *also* automatically subject to cancellation, thus ensuring that nothing can accidentally hang the
task group and prevent it from exiting.
As for tasks signalling readiness, :ref:`here ` is an example of waiting until a child task is
ready.
.. note:: In all fairness, AnyIO's task groups have their own ergonomics issues, like the inability to retrieve the
tasks' return values and not being easily able to cancel individual tasks. This is something that
`#890 `_ aims to rectify.
Design problems with cancellation
+++++++++++++++++++++++++++++++++
The most significant problems with asyncio relate to its handling of cancellation. Asyncio employs a cancellation
mechanism where cancelling a task schedules a :exc:`~asyncio.CancelledError` exception to be raised in the task (once).
This mechanism is called *edge cancellation*.
The most common problem with edge cancellation is that if the task catches the :exc:`~asyncio.CancelledError` (which
often happens by accident when the user code has a ``except BaseException:`` block and doesn't re-raise the exception),
then no further action is taken, and the task keeps happily running until it is explicitly cancelled again::
import asyncio
async def sleeper():
try:
await asyncio.sleep(1)
except BaseException:
pass # the first cancellation is caught here
# This call will never return unless the task is cancelled again
await asyncio.sleep(float("inf"))
async def main():
async with asyncio.TaskGroup() as tg:
task = tg.create_task(sleeper())
await asyncio.sleep(0) # ensure that the task reaches the first sleep()
task.cancel()
print("done")
# Execution hangs
asyncio.run(main())
Another issue is that if a task that has already been scheduled to resume with a value (that is, the ``await`` is about
to yield a result) is cancelled, a :exc:`~asyncio.CancelledError` will instead be raised in the task's coroutine when it
resumes, thus potentially causing the awaitable result to be lost, even if the task catches the exception::
import asyncio
async def receive(f):
print(await f)
await asyncio.sleep(1)
print("The task will be cancelled before this is printed")
async def main():
f = asyncio.get_running_loop().create_future()
task = asyncio.create_task(receive(f))
await asyncio.sleep(0) # make sure the task has started
f.set_result("hello")
task.cancel()
# The "hello" result is lost due to the cancellation
try:
await task
except asyncio.CancelledError:
pass
# No output
asyncio.run(main())
Similarly, if a newly created task is cancelled, its coroutine function may never get to run and thus react to the
cancellation. While the :external+python:doc:`asyncio documentation ` claims that:
Tasks can easily and safely be cancelled. When a task is cancelled, :exc:`~asyncio.CancelledError` will be raised in
the task at the next opportunity.
This is simply **not true** for tasks that are cancelled before they have had a chance to start! This is problematic in
cases where the newly launched task is responsible for managing a resource. If the task is cancelled without getting to
handle the :exc:`~asyncio.CancelledError`, it won't have a chance to close the managed resource::
import asyncio
class Resource:
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
# Here would be the code that cleanly closes the resource
print("closed")
async def handle_resource(resource):
async with resource:
...
async def main():
async with asyncio.TaskGroup() as tg:
task = tg.create_task(handle_resource(Resource()))
task.cancel()
# No output
asyncio.run(main())
.. note:: :func:`Eager task factories ` and, likewise, tasks started with
``eager_start=True`` do not suffer from this particular issue, as there is no opportunity to cancel the task before
its first iteration.
Asyncio cancellation shielding is a major footgun
-------------------------------------------------
Asyncio has a function, :func:`asyncio.shield`, to shield a coroutine from cancellation. It launches a new task in such
a way that the cancellation of the task awaiting on it will not propagate to the new task.
The trouble with this is that if the host task (the task that awaits for the shielded operation to complete) **is**
cancelled, the shielded task is orphaned. If the shielded task then raises an exception, only a warning might be printed
on the console, but the exception will not propagate anywhere. Worse yet, since asyncio only holds weak references to
each task, there is nothing preventing the shielded task from being garbage collected, mid-execution::
import asyncio
import gc
async def shielded_task():
fut = asyncio.get_running_loop().create_future()
await fut
async def host_task():
await asyncio.shield(shielded_task())
async def main():
async with asyncio.TaskGroup() as tg:
task = tg.create_task(host_task())
await asyncio.sleep(0) # allow the host task to start
task.cancel()
await asyncio.sleep(0) # allow the cancellation to take effect on the host task
gc.collect()
# Prints warning: Task was destroyed but it is pending!
asyncio.run(main())
To make matters even worse, the shielding only prevents indirect cancellation through the host task. If the event loop
is shut down, it will automatically cancel all tasks, including the supposedly shielded one::
import asyncio
import signal
async def finalizer():
await asyncio.sleep(1)
print("Finalizer done")
async def main():
... # the business logic goes here
asyncio.get_running_loop().call_soon(signal.raise_signal, signal.SIGINT) # simulate ctrl+C
await asyncio.shield(finalizer())
# Prints a traceback containing a KeyboardInterrupt and a CancelledError, but not the "Finalizer done" message
asyncio.run(main())
A good practical example of the issues with :func:`~asyncio.shield` can be drawn from the `Python Redis client`_ where
the incorrect use of this function was responsible for a `significant outage of ChatGPT`_. The point here is not to lay
blame on the downstream developers, but to demonstrate that :func:`~asyncio.shield` is difficult, if not impossible, to
use correctly for any practical purpose.
.. _Python Redis client: https://github.com/redis/redis-py
.. _significant outage of ChatGPT: https://openai.com/index/march-20-chatgpt-outage/
How does AnyIO fix these problems?
----------------------------------
To provide for more precise and predictable cancellation control, AnyIO (and Trio_) uses *cancel scopes*. Cancel scopes
select sections of a coroutine function to be cancelled. Cancel scopes are stateful in nature, meaning once a cancel
scope has been cancelled, it will stay that way. On asyncio, AnyIO cancel scopes work by cancelling the enclosed task(s)
every time they try to await on something as long as the task's active cancel scope is *effectively cancelled* (i.e.
either directly or via an ancestor scope). This mechanism of stateful cancellation is called *level cancellation*.
AnyIO's cancel scopes have two notable differences from asyncio's cancellation:
#. Cancel scopes never try to cancel a task when it's scheduled to resume with a value
#. Cancel scopes always allow the task a chance to react to the cancellation
In addition to providing the ability to cancel specific code sections, cancel scopes also provide two important
features: shielding and timeouts.
Shielding a section of code from cancellation also works in a more straightforward fashion – not by launching another
task, but by preventing the propagation of cancellation from parent cancel scope to a shielded scope.
Cancel scopes with a set *deadline* are roughly equivalent to :func:`asyncio.timeout`, except for the level cancellation
semantics and the ability to combine timeouts with shielding to easily implement finalization with a timeout. The
:func:`move_on_after` context manager is often used for this purpose.
.. note:: Shielded cancel scopes only protect against cancellation by other cancel scopes, not direct calls to
:meth:`~asyncio.Task.cancel`.
The first asyncio example above demonstrated how a task cancellation is only delivered once, unless explicitly repeated.
But with AnyIO's cancel scopes, every attempt to yield control to the event loop from a cancelled task results in a new
:exc:`~asyncio.CancelledError`::
import asyncio
import anyio
async def sleeper():
try:
await asyncio.sleep(1)
except BaseException:
pass # the first cancellation is caught here
# This will raise another CancelledError
await asyncio.sleep(float("inf"))
async def main():
async with anyio.create_task_group() as tg:
tg.start_soon(sleeper)
await asyncio.sleep(0) # ensure that the task reaches the first sleep()
tg.cancel_scope.cancel()
print("done")
# Output: "done"
asyncio.run(main())
The AnyIO version of the second example demonstrates that a task which is scheduled to resume will be able to process
the result of the ``await`` before it gets cancelled::
import asyncio
import anyio
async def receive(f):
print(await f)
await asyncio.sleep(1)
print("The task will be cancelled before this is printed")
async def main():
f = asyncio.get_running_loop().create_future()
async with anyio.create_task_group() as tg:
tg.start_soon(receive, f)
await asyncio.sleep(0) # make sure the task has started
f.set_result("hello")
tg.cancel_scope.cancel()
# Output: "hello"
asyncio.run(main())
The third example demonstrated that if a newly created task is cancelled, it would not get an opportunity to react to
the cancellation. With AnyIO's task groups, they do::
import asyncio
import anyio
class Resource:
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
# Here would be the code that cleanly closes the resource
print("closed")
async def handle_resource(resource):
async with resource:
...
async def main():
async with anyio.create_task_group() as tg:
tg.start_soon(handle_resource, Resource())
tg.cancel_scope.cancel()
# Output: "closed"
asyncio.run(main())
.. seealso:: :doc:`cancellation`
Design problems with asyncio queues
+++++++++++++++++++++++++++++++++++
While the :class:`asyncio.Queue` class was upgraded in Python 3.13 to support the notion of shutting down, there are
still a number of issues with it:
#. Queues are unbounded by default
#. Queues don't support async iteration
#. Queue shutdown doesn't play nice with multiple producers
The problem with unbounded queues is that careless use of such queues may cause runaway memory use and thus lead to out
of memory errors. This default behavior is unfortunately unfixable due to backwards compatibility reasons.
The second problem is mostly an ergonomics issue. A PR was made to address this, but was
`declined `_.
The third problem manifests itself when multiple producer tasks put items to the same queue. If one producer shuts down
the queue, the others will get unwarranted errors when trying to put more items to the queue. Therefore the producers
either need another means to coordinate the queue shutdown, or they need to be launched in a task group in such a manner
that the host task shuts down the queue after the producer tasks have exited. Either way, the design is not ideal for
multiple producer tasks.
.. _queue_fix:
How does AnyIO fix these problems?
----------------------------------
AnyIO offers an alternative to queues: :ref:`memory object streams `. They were modeled after
Trio's `memory channels`_. When you create a memory object stream, you get a "send" stream and a "receive" stream. The
separation is necessary for the purpose of cloning (explained below). By default, memory object streams have an item
capacity of 0, meaning the stream does not store anything. In other words, a send operation will not complete until
another task shows up to receive the item.
Memory object streams support cloning. This enables each consumer and producer task to close its own clone of the
receive or send stream. Only after all clones have been closed is the respective send or receive memory object stream
considered to be closed.
Unlike :class:`asyncio.Queue`, memory object receive streams support async iteration. The ``async for`` loop then ends
naturally when all send stream clones have been closed. For send streams, attempting to send an item when all receive
stream clones have been closed raises a :exc:`BrokenResourceError`.
Memory object streams also provide better debugging facilities via the
:meth:`~.streams.memory.MemoryObjectReceiveStream.statistics` method which can tell you:
* the number of queued items
* the number of open send and receive streams
* how many tasks are waiting to send or receive to/from the stream
.. _memory channels: https://trio.readthedocs.io/en/stable/reference-core.html#using-channels-to-pass-values-between-tasks
Design problems with the streams API
++++++++++++++++++++++++++++++++++++
While asyncio provides a limited set of `stream classes`_, their callback-based design unfortunately shines through from
the API. First of all, unlike regular sockets, you get a separate reader and writer object instead of a full-duplex
stream which you would essentially get from the :mod:`socket` functions. Second, in order to send data to the stream,
you have to first call the synchronous :meth:`~asyncio.StreamWriter.write` method which adds data to the internal
buffer, and then you have to remember to call the coroutine method :meth:`~asyncio.StreamWriter.drain` which then
*actually* causes the data to be written to the underlying socket. Likewise, when you close a stream, you first have to
call :meth:`~asyncio.StreamWriter.close` and *then* await on :meth:`~asyncio.StreamWriter.wait_closed` to make sure the
stream has *actually* closed! To add insult to injury, these classes don't even support the async context manager
protocol so you can't just do ``async with writer: ...``.
Another issue lies with the :meth:`~asyncio.StreamWriter.get_extra_info` method asyncio provides to get information like
the remote address for socket connections, or the raw socket object:
#. This method only exists in the writer class, not the reader (for whatever reason).
#. It returns a dictionary, so to get the information you want, you'll need to access one of the keys in the returned
dict, based on the documentation.
#. It is not type safe, as Typeshed specifies the return type as ``dict[str, Any]``. Therefore, static type checkers
cannot check the correctness of any access to the returned dict based on either the keys or the value types.
.. _stream classes: https://docs.python.org/3/library/asyncio-stream.html
How does AnyIO fix these problems?
----------------------------------
AnyIO comes with hierarchy of base stream classes:
* :class:`~.abc.UnreliableObjectStream`, :class:`~.abc.UnreliableObjectReceiveStream` and
:class:`~.abc.UnreliableObjectSendStream`: for transporting objects; no guarantees of reliable or ordered delivery,
just like with UDP sockets
* :class:`~.abc.ObjectStream`, :class:`~.abc.ObjectReceiveStream`, :class:`~.abc.ObjectSendStream`: like the above, but
with added guarantees about reliable and ordered delivery
* :class:`~.abc.ByteStream`, :class:`~.abc.ByteReceiveStream`, :class:`~.abc.ByteSendStream`: for transporting bytes;
may split chunks arbitrarily, just like TCP sockets
* :class:`~.abc.SocketStream`: byte streams backed by actual sockets
These interfaces are then implemented by a number of concrete classes, such as:
* :class:`~.streams.memory.MemoryObjectReceiveStream` and :class:`~.streams.memory.MemoryObjectSendStream`: for
exchanging arbitrary objects between tasks within the same process (see :ref:`this section ` for the
rationale for the sender/receiver split)
* :class:`~.streams.buffered.BufferedByteReceiveStream` and :class:`~.streams.buffered.BufferedByteStream`: for adapting
bytes-oriented object streams into byte streams, and for supporting read operations that require a buffer, such as
needing to read a precise amount of bytes, or reading up to a specific delimiter
* :class:`~.streams.tls.TLSStream`: for using TLS encryption over any arbitrary (bytes-oriented) stream
* :class:`~.streams.text.TextReceiveStream` and :class:`~.streams.text.TextStream`: for turning a bytes-oriented stream
into a unicode string-oriented stream
* :class:`~.streams.file.FileReadStream` and :class:`~.streams.file.FileWriteStream`: for reading from or writing to
files
* :class:`~.streams.stapled.StapledObjectStream` and :class:`~.streams.stapled.StapledByteStream`: for combining
different read and write streams into full-duplex streams
.. important:: In contrast with regular sockets or asyncio streams, AnyIO streams raise :exc:`EndOfStream` instead of
returning an empty bytes object or ``None`` when there is no more data to be read.
.. seealso:: :doc:`streams`
As a counterpart to :meth:`~asyncio.StreamWriter.get_extra_info`, AnyIO offers a system of typed attributes where stream
classes (and any kind of class, really) can offer such extra information in a type safe manner. This is
especially useful with stream wrappers such as :class:`~.streams.tls.TLSStream`. Stream wrapper
classes like that can pass through any typed attributes from the wrapped stream while adding their own on top. They can
also choose to just override any attributes they like, all the while preserving type safety.
.. seealso:: :doc:`typedattrs`
Design problems with the thread API
+++++++++++++++++++++++++++++++++++
Asyncio comes with two ways to call blocking code in worker threads, each with its own caveats:
#. :func:`asyncio.to_thread`
#. :meth:`asyncio.loop.run_in_executor`
The first function is the more modern one, and supports :mod:`contextvar ` propagation. However, there is
no way to use it with a thread pool executor other than the default. And due to the design decision of allowing the
pass-through of arbitrary positional and keyword arguments, no such option can ever be added without breaking backwards
compatibility. The second function, on the other hand, allows for explicitly specifying a thread pool to use, but it
doesn't support context variable propagation.
Another inconvenience comes from the inability to synchronously call synchronous functions in the event loop thread from
a worker thread. That is, running a synchronous function in the event loop thread and then returning its return value
from that call. While asyncio provides a way to do this for coroutine functions
(:func:`~asyncio.run_coroutine_threadsafe`), there is no counterpart for synchronous functions. The closest match would
be :meth:`~asyncio.loop.call_soon_threadsafe`, this function only schedules a callback to be run on the event loop
thread and does not provide any means to retrieve the return value.
How does AnyIO fix these problems?
----------------------------------
AnyIO uses its own thread pooling mechanism, based on :ref:`capacity limiters ` which are similar to
semaphores. To call a function in a worker thread, you would use :func:`.to_thread.run_sync`. This function can be
passed a specific capacity limiter to count against. All worker threads will be spawned in a thread pool specific to the
current event loop, and can be reused in any call to :func:`.to_thread.run_sync`, regardless of the capacity limiter
used. More worker threads will be spawned as necessary, so long as the capacity limiter allows it. The event loop's
thread pool is homogeneous, meaning idle threads in it are reused regardless of which capacity limiter was passed to the
call that spawned them.
.. note:: :func:`anyio.to_thread.run_sync` propagates context variables just like :func:`asyncio.to_thread`.
From inside AnyIO worker threads, you can call functions in the event loop thread using :func:`.from_thread.run` and
:func:`.from_thread.run_sync`, for coroutine functions and synchronous functions, respectively. The former is a direct
counterpart to asyncio's :func:`~asyncio.run_coroutine_threadsafe`, but the latter will wait for the function to run and
return its return value, unlike :meth:`~asyncio.loop.call_soon_threadsafe`.
Design problems with signal handling APIs
+++++++++++++++++++++++++++++++++++++++++
Asyncio only provides facilities to set or remove signal handlers. The :meth:`~asyncio.loop.add_signal_handler` method
will replace any existing handler for that signal, and won't return the previous handler for potential chaining. There
is also no way to get the current handler for a signal.
AnyIO provides an alternate mechanism to handle signals with its :func:`open_signal_receiver` context manager.
.. seealso:: :doc:`signals`
Missing file I/O and async path support
+++++++++++++++++++++++++++++++++++++++
Asyncio contains no facilities to help with file I/O, forcing you to use :func:`~asyncio.to_thread` or
:meth:`~asyncio.loop.run_in_executor` with every single file operation to prevent blocking the event loop thread.
To overcome this shortcoming, users often turn to libraries such as aiofiles_ and aiopath_ which offer async interfaces
for file and path access. However, AnyIO provides its own set of async file I/O APIs, including an async compatible
counterpart for the :class:`~pathlib.Path` class. Additionally, it should be noted that AnyIO provides
:ref:`file streams ` compatible with its stream class hierarchy.
.. seealso:: :doc:`fileio`
.. _aiofiles: https://github.com/Tinche/aiofiles
.. _aiopath: https://github.com/alexdelorenzo/aiopath
Features not in asyncio which you might be interested in
++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AnyIO doesn't just offer replacements for asyncio APIs, but provides a bunch of its own conveniences which you may find
helpful.
Built-in pytest plugin
----------------------
AnyIO contains its own pytest_ plugin for running asynchronous tests. It can completely replace pytest-asyncio_ for
testing asynchronous code. It is somewhat simpler to use too, in addition to supporting more event loop implementations
such as Trio_.
.. seealso:: :doc:`testing`
.. _pytest: https://docs.pytest.org/
.. _pytest-asyncio: https://github.com/pytest-dev/pytest-asyncio
Connectables
------------
To complement its stream class hierarchy, AnyIO offers an abstraction for producing connected streams, either object or
bytes-oriented. This can be very useful when writing network clients, as abstracting out the connection mechanism allows
for a lot of customization, including mocking connections without having to resort to monkey patching.
.. seealso:: :ref:`connectables`
Context manager mix-in classes
------------------------------
AnyIO provides mix-in classes for safely implementing context managers which embed other context managers. Typically
this would require implementing ``__aenter__`` and ``__aexit__``, often requiring these classes to store state in the
instance and dealing with exceptions raised in ``__aenter__()``. The context manager mix-ins allow you to replace these
method pairs with a single method where you write your logic just like with
:func:`@asynccontextmanager `, albeit at the cost of sacrificing re-entrancy.
.. seealso:: :doc:`contextmanagers`
.. _Trio: https://github.com/python-trio/trio
anyio-4.11.0/pyproject.toml 0000664 0000000 0000000 00000011276 15064462627 0015653 0 ustar 00root root 0000000 0000000 [build-system]
requires = [
"setuptools >= 77",
"setuptools_scm >= 6.4"
]
build-backend = "setuptools.build_meta"
[project]
name = "anyio"
description = "High-level concurrency and networking framework on top of asyncio or Trio"
readme = "README.rst"
authors = [{name = "Alex Grönholm", email = "alex.gronholm@nextday.fi"}]
license = "MIT"
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Framework :: AnyIO",
"Typing :: Typed",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
]
requires-python = ">= 3.9"
dependencies = [
"exceptiongroup >= 1.0.2; python_version < '3.11'",
"idna >= 2.8",
"sniffio >= 1.1",
"typing_extensions >= 4.5; python_version < '3.13'",
]
dynamic = ["version"]
[project.urls]
Documentation = "https://anyio.readthedocs.io/en/latest/"
Changelog = "https://anyio.readthedocs.io/en/stable/versionhistory.html"
"Source code" = "https://github.com/agronholm/anyio"
"Issue tracker" = "https://github.com/agronholm/anyio/issues"
[project.optional-dependencies]
trio = ["trio >= 0.31.0"]
[project.entry-points]
pytest11 = {anyio = "anyio.pytest_plugin"}
[dependency-groups]
test = [
"anyio[trio]",
"blockbuster >= 1.5.23",
"coverage[toml] >= 7",
"exceptiongroup >= 1.2.0",
"hypothesis >= 4.0",
"psutil >= 5.9",
"pytest >= 7.0",
"pytest-mock >= 3.14",
"trustme",
"truststore >= 0.9.1; python_version >= '3.10'",
"""\
uvloop >= 0.21; platform_python_implementation == 'CPython' \
and platform_system != 'Windows' \
and python_version < '3.14'\
"""
]
doc = [
"packaging",
"Sphinx ~= 8.2",
"sphinx_rtd_theme",
"sphinx-autodoc-typehints >= 1.2.0",
"sphinx-tabs >= 3.3.1",
]
[tool.setuptools_scm]
version_scheme = "post-release"
local_scheme = "dirty-tag"
[tool.ruff]
src = ["src"]
[tool.ruff.lint]
extend-select = [
"ASYNC", # flake8-async
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"G", # flake8-logging-format
"I", # isort
"ISC", # flake8-implicit-str-concat
"PERF", # flake8-performance
"PGH", # pygrep-hooks
"RUF100", # unused noqa (yesqa)
"T201", # print
"UP", # pyupgrade
"W", # pycodestyle warnings
]
ignore = ["B009", "PERF203"]
[tool.ruff.lint.isort]
"required-imports" = ["from __future__ import annotations"]
[tool.ruff.lint.per-file-ignores]
"tests/test_tempfile.py" = ["ASYNC230"]
[tool.mypy]
python_version = "3.13"
strict = true
disallow_any_generics = false
warn_return_any = false
disallow_untyped_decorators = false
[tool.pytest.ini_options]
addopts = "-rsfE --tb=short --strict-config --strict-markers -p anyio -p pytest_mock -p no:asyncio -p no:trio"
testpaths = ["tests"]
anyio_mode = "auto"
xfail_strict = true
filterwarnings = [
"error",
# Ignore resource warnings due to a CPython/Windows bug (https://bugs.python.org/issue44428)
"ignore:unclosed transport <_ProactorSocketTransport.*:ResourceWarning",
# Workaround for Python 3.9.7 (see https://bugs.python.org/issue45097)
"ignore:The loop argument is deprecated since Python 3\\.8, and scheduled for removal in Python 3\\.10\\.:DeprecationWarning:asyncio",
]
markers = [
"network: marks tests as requiring Internet access",
]
[tool.codespell]
ignore-words-list = "asend,daa,hel"
[tool.coverage.run]
source = ["anyio"]
relative_files = true
[tool.coverage.report]
show_missing = true
exclude_also = [
"if TYPE_CHECKING:",
"@(abc\\.)?abstractmethod",
]
[tool.tox]
env_list = ["pre-commit", "py39", "py310", "py311", "py312", "py313", "py314", "pypy3"]
skip_missing_interpreters = true
requires = ["tox >= 4.22"]
[tool.tox.env_run_base]
depends = ["pre-commit"]
package = "editable"
commands = [["coverage", "run", "-m", "pytest", { replace = "posargs", extend = true }]]
dependency_groups = ["test"]
set_env = { PYTEST_DISABLE_PLUGIN_AUTOLOAD = "1" }
[tool.tox.env.pypy3]
commands = [["pytest", { replace = "posargs", extend = true }]]
[tool.tox.env.pre-commit]
commands = [["pre-commit", "run", "--all-files"]]
depends = []
allowlist_externals = ["pre-commit"]
package = "skip"
[tool.tox.env.pyright]
deps = ["pyright"]
commands = [["pyright", "--ignoreexternal", "--verifytypes", "anyio"]]
[tool.tox.env.docs]
depends = []
dependency_groups = ["doc"]
commands = [["sphinx-build", "-n", "docs", "build/sphinx"]]
anyio-4.11.0/src/ 0000775 0000000 0000000 00000000000 15064462627 0013517 5 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/ 0000775 0000000 0000000 00000000000 15064462627 0014636 5 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/__init__.py 0000664 0000000 0000000 00000013713 15064462627 0016754 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from ._core._contextmanagers import AsyncContextManagerMixin as AsyncContextManagerMixin
from ._core._contextmanagers import ContextManagerMixin as ContextManagerMixin
from ._core._eventloop import current_time as current_time
from ._core._eventloop import get_all_backends as get_all_backends
from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
from ._core._eventloop import run as run
from ._core._eventloop import sleep as sleep
from ._core._eventloop import sleep_forever as sleep_forever
from ._core._eventloop import sleep_until as sleep_until
from ._core._exceptions import BrokenResourceError as BrokenResourceError
from ._core._exceptions import BrokenWorkerInterpreter as BrokenWorkerInterpreter
from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
from ._core._exceptions import BusyResourceError as BusyResourceError
from ._core._exceptions import ClosedResourceError as ClosedResourceError
from ._core._exceptions import ConnectionFailed as ConnectionFailed
from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
from ._core._exceptions import EndOfStream as EndOfStream
from ._core._exceptions import IncompleteRead as IncompleteRead
from ._core._exceptions import NoEventLoopError as NoEventLoopError
from ._core._exceptions import RunFinishedError as RunFinishedError
from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
from ._core._exceptions import WouldBlock as WouldBlock
from ._core._fileio import AsyncFile as AsyncFile
from ._core._fileio import Path as Path
from ._core._fileio import open_file as open_file
from ._core._fileio import wrap_file as wrap_file
from ._core._resources import aclose_forcefully as aclose_forcefully
from ._core._signals import open_signal_receiver as open_signal_receiver
from ._core._sockets import TCPConnectable as TCPConnectable
from ._core._sockets import UNIXConnectable as UNIXConnectable
from ._core._sockets import as_connectable as as_connectable
from ._core._sockets import connect_tcp as connect_tcp
from ._core._sockets import connect_unix as connect_unix
from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
from ._core._sockets import (
create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
)
from ._core._sockets import create_tcp_listener as create_tcp_listener
from ._core._sockets import create_udp_socket as create_udp_socket
from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
from ._core._sockets import create_unix_listener as create_unix_listener
from ._core._sockets import getaddrinfo as getaddrinfo
from ._core._sockets import getnameinfo as getnameinfo
from ._core._sockets import notify_closing as notify_closing
from ._core._sockets import wait_readable as wait_readable
from ._core._sockets import wait_socket_readable as wait_socket_readable
from ._core._sockets import wait_socket_writable as wait_socket_writable
from ._core._sockets import wait_writable as wait_writable
from ._core._streams import create_memory_object_stream as create_memory_object_stream
from ._core._subprocesses import open_process as open_process
from ._core._subprocesses import run_process as run_process
from ._core._synchronization import CapacityLimiter as CapacityLimiter
from ._core._synchronization import (
CapacityLimiterStatistics as CapacityLimiterStatistics,
)
from ._core._synchronization import Condition as Condition
from ._core._synchronization import ConditionStatistics as ConditionStatistics
from ._core._synchronization import Event as Event
from ._core._synchronization import EventStatistics as EventStatistics
from ._core._synchronization import Lock as Lock
from ._core._synchronization import LockStatistics as LockStatistics
from ._core._synchronization import ResourceGuard as ResourceGuard
from ._core._synchronization import Semaphore as Semaphore
from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
from ._core._tasks import CancelScope as CancelScope
from ._core._tasks import create_task_group as create_task_group
from ._core._tasks import current_effective_deadline as current_effective_deadline
from ._core._tasks import fail_after as fail_after
from ._core._tasks import move_on_after as move_on_after
from ._core._tempfile import NamedTemporaryFile as NamedTemporaryFile
from ._core._tempfile import SpooledTemporaryFile as SpooledTemporaryFile
from ._core._tempfile import TemporaryDirectory as TemporaryDirectory
from ._core._tempfile import TemporaryFile as TemporaryFile
from ._core._tempfile import gettempdir as gettempdir
from ._core._tempfile import gettempdirb as gettempdirb
from ._core._tempfile import mkdtemp as mkdtemp
from ._core._tempfile import mkstemp as mkstemp
from ._core._testing import TaskInfo as TaskInfo
from ._core._testing import get_current_task as get_current_task
from ._core._testing import get_running_tasks as get_running_tasks
from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
from ._core._typedattr import typed_attribute as typed_attribute
# Re-export imports so they look like they live directly in this package
for __value in list(locals().values()):
if getattr(__value, "__module__", "").startswith("anyio."):
__value.__module__ = __name__
del __value
def __getattr__(attr: str) -> type[BrokenWorkerInterpreter]:
"""Support deprecated aliases."""
if attr == "BrokenWorkerIntepreter":
import warnings
warnings.warn(
"The 'BrokenWorkerIntepreter' alias is deprecated, use 'BrokenWorkerInterpreter' instead.",
DeprecationWarning,
stacklevel=2,
)
return BrokenWorkerInterpreter
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
anyio-4.11.0/src/anyio/_backends/ 0000775 0000000 0000000 00000000000 15064462627 0016547 5 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/_backends/__init__.py 0000664 0000000 0000000 00000000000 15064462627 0020646 0 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/_backends/_asyncio.py 0000664 0000000 0000000 00000277404 15064462627 0020743 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import array
import asyncio
import concurrent.futures
import contextvars
import math
import os
import socket
import sys
import threading
import weakref
from asyncio import (
AbstractEventLoop,
CancelledError,
all_tasks,
create_task,
current_task,
get_running_loop,
sleep,
)
from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined]
from collections import OrderedDict, deque
from collections.abc import (
AsyncGenerator,
AsyncIterator,
Awaitable,
Callable,
Collection,
Coroutine,
Iterable,
Sequence,
)
from concurrent.futures import Future
from contextlib import AbstractContextManager, suppress
from contextvars import Context, copy_context
from dataclasses import dataclass
from functools import partial, wraps
from inspect import (
CORO_RUNNING,
CORO_SUSPENDED,
getcoroutinestate,
iscoroutine,
)
from io import IOBase
from os import PathLike
from queue import Queue
from signal import Signals
from socket import AddressFamily, SocketKind
from threading import Thread
from types import CodeType, TracebackType
from typing import (
IO,
TYPE_CHECKING,
Any,
Optional,
TypeVar,
cast,
)
from weakref import WeakKeyDictionary
import sniffio
from .. import (
CapacityLimiterStatistics,
EventStatistics,
LockStatistics,
TaskInfo,
abc,
)
from .._core._eventloop import claim_worker_thread, threadlocals
from .._core._exceptions import (
BrokenResourceError,
BusyResourceError,
ClosedResourceError,
EndOfStream,
RunFinishedError,
WouldBlock,
iterate_exceptions,
)
from .._core._sockets import convert_ipv6_sockaddr
from .._core._streams import create_memory_object_stream
from .._core._synchronization import (
CapacityLimiter as BaseCapacityLimiter,
)
from .._core._synchronization import Event as BaseEvent
from .._core._synchronization import Lock as BaseLock
from .._core._synchronization import (
ResourceGuard,
SemaphoreStatistics,
)
from .._core._synchronization import Semaphore as BaseSemaphore
from .._core._tasks import CancelScope as BaseCancelScope
from ..abc import (
AsyncBackend,
IPSockAddrType,
SocketListener,
UDPPacketType,
UNIXDatagramPacketType,
)
from ..abc._eventloop import StrOrBytesPath
from ..lowlevel import RunVar
from ..streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
if TYPE_CHECKING:
from _typeshed import FileDescriptorLike
else:
FileDescriptorLike = object
if sys.version_info >= (3, 10):
from typing import ParamSpec
else:
from typing_extensions import ParamSpec
if sys.version_info >= (3, 11):
from asyncio import Runner
from typing import TypeVarTuple, Unpack
else:
import contextvars
import enum
import signal
from asyncio import coroutines, events, exceptions, tasks
from exceptiongroup import BaseExceptionGroup
from typing_extensions import TypeVarTuple, Unpack
class _State(enum.Enum):
CREATED = "created"
INITIALIZED = "initialized"
CLOSED = "closed"
class Runner:
# Copied from CPython 3.11
def __init__(
self,
*,
debug: bool | None = None,
loop_factory: Callable[[], AbstractEventLoop] | None = None,
):
self._state = _State.CREATED
self._debug = debug
self._loop_factory = loop_factory
self._loop: AbstractEventLoop | None = None
self._context = None
self._interrupt_count = 0
self._set_event_loop = False
def __enter__(self) -> Runner:
self._lazy_init()
return self
def __exit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self.close()
def close(self) -> None:
"""Shutdown and close event loop."""
if self._state is not _State.INITIALIZED:
return
try:
loop = self._loop
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
if hasattr(loop, "shutdown_default_executor"):
loop.run_until_complete(loop.shutdown_default_executor())
else:
loop.run_until_complete(_shutdown_default_executor(loop))
finally:
if self._set_event_loop:
events.set_event_loop(None)
loop.close()
self._loop = None
self._state = _State.CLOSED
def get_loop(self) -> AbstractEventLoop:
"""Return embedded event loop."""
self._lazy_init()
return self._loop
def run(self, coro: Coroutine[T_Retval], *, context=None) -> T_Retval:
"""Run a coroutine inside the embedded event loop."""
if not coroutines.iscoroutine(coro):
raise ValueError(f"a coroutine was expected, got {coro!r}")
if events._get_running_loop() is not None:
# fail fast with short traceback
raise RuntimeError(
"Runner.run() cannot be called from a running event loop"
)
self._lazy_init()
if context is None:
context = self._context
task = context.run(self._loop.create_task, coro)
if (
threading.current_thread() is threading.main_thread()
and signal.getsignal(signal.SIGINT) is signal.default_int_handler
):
sigint_handler = partial(self._on_sigint, main_task=task)
try:
signal.signal(signal.SIGINT, sigint_handler)
except ValueError:
# `signal.signal` may throw if `threading.main_thread` does
# not support signals (e.g. embedded interpreter with signals
# not registered - see gh-91880)
sigint_handler = None
else:
sigint_handler = None
self._interrupt_count = 0
try:
return self._loop.run_until_complete(task)
except exceptions.CancelledError:
if self._interrupt_count > 0:
uncancel = getattr(task, "uncancel", None)
if uncancel is not None and uncancel() == 0:
raise KeyboardInterrupt # noqa: B904
raise # CancelledError
finally:
if (
sigint_handler is not None
and signal.getsignal(signal.SIGINT) is sigint_handler
):
signal.signal(signal.SIGINT, signal.default_int_handler)
def _lazy_init(self) -> None:
if self._state is _State.CLOSED:
raise RuntimeError("Runner is closed")
if self._state is _State.INITIALIZED:
return
if self._loop_factory is None:
self._loop = events.new_event_loop()
if not self._set_event_loop:
# Call set_event_loop only once to avoid calling
# attach_loop multiple times on child watchers
events.set_event_loop(self._loop)
self._set_event_loop = True
else:
self._loop = self._loop_factory()
if self._debug is not None:
self._loop.set_debug(self._debug)
self._context = contextvars.copy_context()
self._state = _State.INITIALIZED
def _on_sigint(self, signum, frame, main_task: asyncio.Task) -> None:
self._interrupt_count += 1
if self._interrupt_count == 1 and not main_task.done():
main_task.cancel()
# wakeup loop if it is blocked by select() with long timeout
self._loop.call_soon_threadsafe(lambda: None)
return
raise KeyboardInterrupt()
def _cancel_all_tasks(loop: AbstractEventLoop) -> None:
to_cancel = tasks.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during asyncio.run() shutdown",
"exception": task.exception(),
"task": task,
}
)
async def _shutdown_default_executor(loop: AbstractEventLoop) -> None:
"""Schedule the shutdown of the default executor."""
def _do_shutdown(future: asyncio.futures.Future) -> None:
try:
loop._default_executor.shutdown(wait=True) # type: ignore[attr-defined]
loop.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
loop.call_soon_threadsafe(future.set_exception, ex)
loop._executor_shutdown_called = True
if loop._default_executor is None:
return
future = loop.create_future()
thread = threading.Thread(target=_do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
T_Retval = TypeVar("T_Retval")
T_contra = TypeVar("T_contra", contravariant=True)
PosArgsT = TypeVarTuple("PosArgsT")
P = ParamSpec("P")
_root_task: RunVar[asyncio.Task | None] = RunVar("_root_task")
def find_root_task() -> asyncio.Task:
root_task = _root_task.get(None)
if root_task is not None and not root_task.done():
return root_task
# Look for a task that has been started via run_until_complete()
for task in all_tasks():
if task._callbacks and not task.done():
callbacks = [cb for cb, context in task._callbacks]
for cb in callbacks:
if (
cb is _run_until_complete_cb
or getattr(cb, "__module__", None) == "uvloop.loop"
):
_root_task.set(task)
return task
# Look up the topmost task in the AnyIO task tree, if possible
task = cast(asyncio.Task, current_task())
state = _task_states.get(task)
if state:
cancel_scope = state.cancel_scope
while cancel_scope and cancel_scope._parent_scope is not None:
cancel_scope = cancel_scope._parent_scope
if cancel_scope is not None:
return cast(asyncio.Task, cancel_scope._host_task)
return task
def get_callable_name(func: Callable) -> str:
module = getattr(func, "__module__", None)
qualname = getattr(func, "__qualname__", None)
return ".".join([x for x in (module, qualname) if x])
#
# Event loop
#
_run_vars: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] = WeakKeyDictionary()
def _task_started(task: asyncio.Task) -> bool:
"""Return ``True`` if the task has been started and has not finished."""
# The task coro should never be None here, as we never add finished tasks to the
# task list
coro = task.get_coro()
assert coro is not None
try:
return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED)
except AttributeError:
# task coro is async_genenerator_asend https://bugs.python.org/issue37771
raise Exception(f"Cannot determine if task {task} has started or not") from None
#
# Timeouts and cancellation
#
def is_anyio_cancellation(exc: CancelledError) -> bool:
# Sometimes third party frameworks catch a CancelledError and raise a new one, so as
# a workaround we have to look at the previous ones in __context__ too for a
# matching cancel message
while True:
if (
exc.args
and isinstance(exc.args[0], str)
and exc.args[0].startswith("Cancelled via cancel scope ")
):
return True
if isinstance(exc.__context__, CancelledError):
exc = exc.__context__
continue
return False
class CancelScope(BaseCancelScope):
def __new__(
cls, *, deadline: float = math.inf, shield: bool = False
) -> CancelScope:
return object.__new__(cls)
def __init__(self, deadline: float = math.inf, shield: bool = False):
self._deadline = deadline
self._shield = shield
self._parent_scope: CancelScope | None = None
self._child_scopes: set[CancelScope] = set()
self._cancel_called = False
self._cancel_reason: str | None = None
self._cancelled_caught = False
self._active = False
self._timeout_handle: asyncio.TimerHandle | None = None
self._cancel_handle: asyncio.Handle | None = None
self._tasks: set[asyncio.Task] = set()
self._host_task: asyncio.Task | None = None
if sys.version_info >= (3, 11):
self._pending_uncancellations: int | None = 0
else:
self._pending_uncancellations = None
def __enter__(self) -> CancelScope:
if self._active:
raise RuntimeError(
"Each CancelScope may only be used for a single 'with' block"
)
self._host_task = host_task = cast(asyncio.Task, current_task())
self._tasks.add(host_task)
try:
task_state = _task_states[host_task]
except KeyError:
task_state = TaskState(None, self)
_task_states[host_task] = task_state
else:
self._parent_scope = task_state.cancel_scope
task_state.cancel_scope = self
if self._parent_scope is not None:
# If using an eager task factory, the parent scope may not even contain
# the host task
self._parent_scope._child_scopes.add(self)
self._parent_scope._tasks.discard(host_task)
self._timeout()
self._active = True
# Start cancelling the host task if the scope was cancelled before entering
if self._cancel_called:
self._deliver_cancellation(self)
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
del exc_tb
if not self._active:
raise RuntimeError("This cancel scope is not active")
if current_task() is not self._host_task:
raise RuntimeError(
"Attempted to exit cancel scope in a different task than it was "
"entered in"
)
assert self._host_task is not None
host_task_state = _task_states.get(self._host_task)
if host_task_state is None or host_task_state.cancel_scope is not self:
raise RuntimeError(
"Attempted to exit a cancel scope that isn't the current tasks's "
"current cancel scope"
)
try:
self._active = False
if self._timeout_handle:
self._timeout_handle.cancel()
self._timeout_handle = None
self._tasks.remove(self._host_task)
if self._parent_scope is not None:
self._parent_scope._child_scopes.remove(self)
self._parent_scope._tasks.add(self._host_task)
host_task_state.cancel_scope = self._parent_scope
# Restart the cancellation effort in the closest visible, cancelled parent
# scope if necessary
self._restart_cancellation_in_parent()
# We only swallow the exception iff it was an AnyIO CancelledError, either
# directly as exc_val or inside an exception group and there are no cancelled
# parent cancel scopes visible to us here
if self._cancel_called and not self._parent_cancellation_is_visible_to_us:
# For each level-cancel() call made on the host task, call uncancel()
while self._pending_uncancellations:
self._host_task.uncancel()
self._pending_uncancellations -= 1
# Update cancelled_caught and check for exceptions we must not swallow
cannot_swallow_exc_val = False
if exc_val is not None:
for exc in iterate_exceptions(exc_val):
if isinstance(exc, CancelledError) and is_anyio_cancellation(
exc
):
self._cancelled_caught = True
else:
cannot_swallow_exc_val = True
return self._cancelled_caught and not cannot_swallow_exc_val
else:
if self._pending_uncancellations:
assert self._parent_scope is not None
assert self._parent_scope._pending_uncancellations is not None
self._parent_scope._pending_uncancellations += (
self._pending_uncancellations
)
self._pending_uncancellations = 0
return False
finally:
self._host_task = None
del exc_val
@property
def _effectively_cancelled(self) -> bool:
cancel_scope: CancelScope | None = self
while cancel_scope is not None:
if cancel_scope._cancel_called:
return True
if cancel_scope.shield:
return False
cancel_scope = cancel_scope._parent_scope
return False
@property
def _parent_cancellation_is_visible_to_us(self) -> bool:
return (
self._parent_scope is not None
and not self.shield
and self._parent_scope._effectively_cancelled
)
def _timeout(self) -> None:
if self._deadline != math.inf:
loop = get_running_loop()
if loop.time() >= self._deadline:
self.cancel("deadline exceeded")
else:
self._timeout_handle = loop.call_at(self._deadline, self._timeout)
def _deliver_cancellation(self, origin: CancelScope) -> bool:
"""
Deliver cancellation to directly contained tasks and nested cancel scopes.
Schedule another run at the end if we still have tasks eligible for
cancellation.
:param origin: the cancel scope that originated the cancellation
:return: ``True`` if the delivery needs to be retried on the next cycle
"""
should_retry = False
current = current_task()
for task in self._tasks:
should_retry = True
if task._must_cancel: # type: ignore[attr-defined]
continue
# The task is eligible for cancellation if it has started
if task is not current and (task is self._host_task or _task_started(task)):
waiter = task._fut_waiter # type: ignore[attr-defined]
if not isinstance(waiter, asyncio.Future) or not waiter.done():
task.cancel(origin._cancel_reason)
if (
task is origin._host_task
and origin._pending_uncancellations is not None
):
origin._pending_uncancellations += 1
# Deliver cancellation to child scopes that aren't shielded or running their own
# cancellation callbacks
for scope in self._child_scopes:
if not scope._shield and not scope.cancel_called:
should_retry = scope._deliver_cancellation(origin) or should_retry
# Schedule another callback if there are still tasks left
if origin is self:
if should_retry:
self._cancel_handle = get_running_loop().call_soon(
self._deliver_cancellation, origin
)
else:
self._cancel_handle = None
return should_retry
def _restart_cancellation_in_parent(self) -> None:
"""
Restart the cancellation effort in the closest directly cancelled parent scope.
"""
scope = self._parent_scope
while scope is not None:
if scope._cancel_called:
if scope._cancel_handle is None:
scope._deliver_cancellation(scope)
break
# No point in looking beyond any shielded scope
if scope._shield:
break
scope = scope._parent_scope
def cancel(self, reason: str | None = None) -> None:
if not self._cancel_called:
if self._timeout_handle:
self._timeout_handle.cancel()
self._timeout_handle = None
self._cancel_called = True
self._cancel_reason = f"Cancelled via cancel scope {id(self):x}"
if task := current_task():
self._cancel_reason += f" by {task}"
if reason:
self._cancel_reason += f"; reason: {reason}"
if self._host_task is not None:
self._deliver_cancellation(self)
@property
def deadline(self) -> float:
return self._deadline
@deadline.setter
def deadline(self, value: float) -> None:
self._deadline = float(value)
if self._timeout_handle is not None:
self._timeout_handle.cancel()
self._timeout_handle = None
if self._active and not self._cancel_called:
self._timeout()
@property
def cancel_called(self) -> bool:
return self._cancel_called
@property
def cancelled_caught(self) -> bool:
return self._cancelled_caught
@property
def shield(self) -> bool:
return self._shield
@shield.setter
def shield(self, value: bool) -> None:
if self._shield != value:
self._shield = value
if not value:
self._restart_cancellation_in_parent()
#
# Task states
#
class TaskState:
"""
Encapsulates auxiliary task information that cannot be added to the Task instance
itself because there are no guarantees about its implementation.
"""
__slots__ = "parent_id", "cancel_scope", "__weakref__"
def __init__(self, parent_id: int | None, cancel_scope: CancelScope | None):
self.parent_id = parent_id
self.cancel_scope = cancel_scope
_task_states: WeakKeyDictionary[asyncio.Task, TaskState] = WeakKeyDictionary()
#
# Task groups
#
class _AsyncioTaskStatus(abc.TaskStatus):
def __init__(self, future: asyncio.Future, parent_id: int):
self._future = future
self._parent_id = parent_id
def started(self, value: T_contra | None = None) -> None:
try:
self._future.set_result(value)
except asyncio.InvalidStateError:
if not self._future.cancelled():
raise RuntimeError(
"called 'started' twice on the same task status"
) from None
task = cast(asyncio.Task, current_task())
_task_states[task].parent_id = self._parent_id
if sys.version_info >= (3, 12):
_eager_task_factory_code: CodeType | None = asyncio.eager_task_factory.__code__
else:
_eager_task_factory_code = None
class TaskGroup(abc.TaskGroup):
def __init__(self) -> None:
self.cancel_scope: CancelScope = CancelScope()
self._active = False
self._exceptions: list[BaseException] = []
self._tasks: set[asyncio.Task] = set()
self._on_completed_fut: asyncio.Future[None] | None = None
async def __aenter__(self) -> TaskGroup:
self.cancel_scope.__enter__()
self._active = True
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
try:
if exc_val is not None:
self.cancel_scope.cancel()
if not isinstance(exc_val, CancelledError):
self._exceptions.append(exc_val)
loop = get_running_loop()
try:
if self._tasks:
with CancelScope() as wait_scope:
while self._tasks:
self._on_completed_fut = loop.create_future()
try:
await self._on_completed_fut
except CancelledError as exc:
# Shield the scope against further cancellation attempts,
# as they're not productive (#695)
wait_scope.shield = True
self.cancel_scope.cancel()
# Set exc_val from the cancellation exception if it was
# previously unset. However, we should not replace a native
# cancellation exception with one raise by a cancel scope.
if exc_val is None or (
isinstance(exc_val, CancelledError)
and not is_anyio_cancellation(exc)
):
exc_val = exc
self._on_completed_fut = None
else:
# If there are no child tasks to wait on, run at least one checkpoint
# anyway
await AsyncIOBackend.cancel_shielded_checkpoint()
self._active = False
if self._exceptions:
# The exception that got us here should already have been
# added to self._exceptions so it's ok to break exception
# chaining and avoid adding a "During handling of above..."
# for each nesting level.
raise BaseExceptionGroup(
"unhandled errors in a TaskGroup", self._exceptions
) from None
elif exc_val:
raise exc_val
except BaseException as exc:
if self.cancel_scope.__exit__(type(exc), exc, exc.__traceback__):
return True
raise
return self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
finally:
del exc_val, exc_tb, self._exceptions
def _spawn(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
args: tuple[Unpack[PosArgsT]],
name: object,
task_status_future: asyncio.Future | None = None,
) -> asyncio.Task:
def task_done(_task: asyncio.Task) -> None:
task_state = _task_states[_task]
assert task_state.cancel_scope is not None
assert _task in task_state.cancel_scope._tasks
task_state.cancel_scope._tasks.remove(_task)
self._tasks.remove(task)
del _task_states[_task]
if self._on_completed_fut is not None and not self._tasks:
try:
self._on_completed_fut.set_result(None)
except asyncio.InvalidStateError:
pass
try:
exc = _task.exception()
except CancelledError as e:
while isinstance(e.__context__, CancelledError):
e = e.__context__
exc = e
if exc is not None:
# The future can only be in the cancelled state if the host task was
# cancelled, so return immediately instead of adding one more
# CancelledError to the exceptions list
if task_status_future is not None and task_status_future.cancelled():
return
if task_status_future is None or task_status_future.done():
if not isinstance(exc, CancelledError):
self._exceptions.append(exc)
if not self.cancel_scope._effectively_cancelled:
self.cancel_scope.cancel()
else:
task_status_future.set_exception(exc)
elif task_status_future is not None and not task_status_future.done():
task_status_future.set_exception(
RuntimeError("Child exited without calling task_status.started()")
)
if not self._active:
raise RuntimeError(
"This task group is not active; no new tasks can be started."
)
kwargs = {}
if task_status_future:
parent_id = id(current_task())
kwargs["task_status"] = _AsyncioTaskStatus(
task_status_future, id(self.cancel_scope._host_task)
)
else:
parent_id = id(self.cancel_scope._host_task)
coro = func(*args, **kwargs)
if not iscoroutine(coro):
prefix = f"{func.__module__}." if hasattr(func, "__module__") else ""
raise TypeError(
f"Expected {prefix}{func.__qualname__}() to return a coroutine, but "
f"the return value ({coro!r}) is not a coroutine object"
)
name = get_callable_name(func) if name is None else str(name)
loop = asyncio.get_running_loop()
if (
(factory := loop.get_task_factory())
and getattr(factory, "__code__", None) is _eager_task_factory_code
and (closure := getattr(factory, "__closure__", None))
):
custom_task_constructor = closure[0].cell_contents
task = custom_task_constructor(coro, loop=loop, name=name)
else:
task = create_task(coro, name=name)
# Make the spawned task inherit the task group's cancel scope
_task_states[task] = TaskState(
parent_id=parent_id, cancel_scope=self.cancel_scope
)
self.cancel_scope._tasks.add(task)
self._tasks.add(task)
task.add_done_callback(task_done)
return task
def start_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
*args: Unpack[PosArgsT],
name: object = None,
) -> None:
self._spawn(func, args, name)
async def start(
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
) -> Any:
future: asyncio.Future = asyncio.Future()
task = self._spawn(func, args, name, future)
# If the task raises an exception after sending a start value without a switch
# point between, the task group is cancelled and this method never proceeds to
# process the completed future. That's why we have to have a shielded cancel
# scope here.
try:
return await future
except CancelledError:
# Cancel the task and wait for it to exit before returning
task.cancel()
with CancelScope(shield=True), suppress(CancelledError):
await task
raise
#
# Threads
#
_Retval_Queue_Type = tuple[Optional[T_Retval], Optional[BaseException]]
class WorkerThread(Thread):
MAX_IDLE_TIME = 10 # seconds
def __init__(
self,
root_task: asyncio.Task,
workers: set[WorkerThread],
idle_workers: deque[WorkerThread],
):
super().__init__(name="AnyIO worker thread")
self.root_task = root_task
self.workers = workers
self.idle_workers = idle_workers
self.loop = root_task._loop
self.queue: Queue[
tuple[Context, Callable, tuple, asyncio.Future, CancelScope] | None
] = Queue(2)
self.idle_since = AsyncIOBackend.current_time()
self.stopping = False
def _report_result(
self, future: asyncio.Future, result: Any, exc: BaseException | None
) -> None:
self.idle_since = AsyncIOBackend.current_time()
if not self.stopping:
self.idle_workers.append(self)
if not future.cancelled():
if exc is not None:
if isinstance(exc, StopIteration):
new_exc = RuntimeError("coroutine raised StopIteration")
new_exc.__cause__ = exc
exc = new_exc
future.set_exception(exc)
else:
future.set_result(result)
def run(self) -> None:
with claim_worker_thread(AsyncIOBackend, self.loop):
while True:
item = self.queue.get()
if item is None:
# Shutdown command received
return
context, func, args, future, cancel_scope = item
if not future.cancelled():
result = None
exception: BaseException | None = None
threadlocals.current_cancel_scope = cancel_scope
try:
result = context.run(func, *args)
except BaseException as exc:
exception = exc
finally:
del threadlocals.current_cancel_scope
if not self.loop.is_closed():
self.loop.call_soon_threadsafe(
self._report_result, future, result, exception
)
del result, exception
self.queue.task_done()
del item, context, func, args, future, cancel_scope
def stop(self, f: asyncio.Task | None = None) -> None:
self.stopping = True
self.queue.put_nowait(None)
self.workers.discard(self)
try:
self.idle_workers.remove(self)
except ValueError:
pass
_threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar(
"_threadpool_idle_workers"
)
_threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers")
class BlockingPortal(abc.BlockingPortal):
def __new__(cls) -> BlockingPortal:
return object.__new__(cls)
def __init__(self) -> None:
super().__init__()
self._loop = get_running_loop()
def _spawn_task_from_thread(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
name: object,
future: Future[T_Retval],
) -> None:
AsyncIOBackend.run_sync_from_thread(
partial(self._task_group.start_soon, name=name),
(self._call_func, func, args, kwargs, future),
self._loop,
)
#
# Subprocesses
#
@dataclass(eq=False)
class StreamReaderWrapper(abc.ByteReceiveStream):
_stream: asyncio.StreamReader
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._stream.read(max_bytes)
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
self._stream.set_exception(ClosedResourceError())
await AsyncIOBackend.checkpoint()
@dataclass(eq=False)
class StreamWriterWrapper(abc.ByteSendStream):
_stream: asyncio.StreamWriter
async def send(self, item: bytes) -> None:
self._stream.write(item)
await self._stream.drain()
async def aclose(self) -> None:
self._stream.close()
await AsyncIOBackend.checkpoint()
@dataclass(eq=False)
class Process(abc.Process):
_process: asyncio.subprocess.Process
_stdin: StreamWriterWrapper | None
_stdout: StreamReaderWrapper | None
_stderr: StreamReaderWrapper | None
async def aclose(self) -> None:
with CancelScope(shield=True) as scope:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
scope.shield = False
try:
await self.wait()
except BaseException:
scope.shield = True
self.kill()
await self.wait()
raise
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> int | None:
return self._process.returncode
@property
def stdin(self) -> abc.ByteSendStream | None:
return self._stdin
@property
def stdout(self) -> abc.ByteReceiveStream | None:
return self._stdout
@property
def stderr(self) -> abc.ByteReceiveStream | None:
return self._stderr
def _forcibly_shutdown_process_pool_on_exit(
workers: set[Process], _task: object
) -> None:
"""
Forcibly shuts down worker processes belonging to this event loop."""
child_watcher: asyncio.AbstractChildWatcher | None = None
if sys.version_info < (3, 12):
try:
child_watcher = asyncio.get_event_loop_policy().get_child_watcher()
except NotImplementedError:
pass
# Close as much as possible (w/o async/await) to avoid warnings
for process in workers:
if process.returncode is None:
continue
process._stdin._stream._transport.close() # type: ignore[union-attr]
process._stdout._stream._transport.close() # type: ignore[union-attr]
process._stderr._stream._transport.close() # type: ignore[union-attr]
process.kill()
if child_watcher:
child_watcher.remove_child_handler(process.pid)
async def _shutdown_process_pool_on_exit(workers: set[abc.Process]) -> None:
"""
Shuts down worker processes belonging to this event loop.
NOTE: this only works when the event loop was started using asyncio.run() or
anyio.run().
"""
process: abc.Process
try:
await sleep(math.inf)
except asyncio.CancelledError:
for process in workers:
if process.returncode is None:
process.kill()
for process in workers:
await process.aclose()
#
# Sockets and networking
#
class StreamProtocol(asyncio.Protocol):
read_queue: deque[bytes]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Exception | None = None
is_at_eof: bool = False
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque()
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
cast(asyncio.Transport, transport).set_write_buffer_limits(0)
def connection_lost(self, exc: Exception | None) -> None:
if exc:
self.exception = BrokenResourceError()
self.exception.__cause__ = exc
self.read_event.set()
self.write_event.set()
def data_received(self, data: bytes) -> None:
# ProactorEventloop sometimes sends bytearray instead of bytes
self.read_queue.append(bytes(data))
self.read_event.set()
def eof_received(self) -> bool | None:
self.is_at_eof = True
self.read_event.set()
return True
def pause_writing(self) -> None:
self.write_event = asyncio.Event()
def resume_writing(self) -> None:
self.write_event.set()
class DatagramProtocol(asyncio.DatagramProtocol):
read_queue: deque[tuple[bytes, IPSockAddrType]]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Exception | None = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque(maxlen=100) # arbitrary value
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
def connection_lost(self, exc: Exception | None) -> None:
self.read_event.set()
self.write_event.set()
def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
addr = convert_ipv6_sockaddr(addr)
self.read_queue.append((data, addr))
self.read_event.set()
def error_received(self, exc: Exception) -> None:
self.exception = exc
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class SocketStream(abc.SocketStream):
def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info("socket")
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
if (
not self._protocol.read_event.is_set()
and not self._transport.is_closing()
and not self._protocol.is_at_eof
):
self._transport.resume_reading()
await self._protocol.read_event.wait()
self._transport.pause_reading()
else:
await AsyncIOBackend.checkpoint()
try:
chunk = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
elif self._protocol.exception:
raise self._protocol.exception from None
else:
raise EndOfStream from None
if len(chunk) > max_bytes:
# Split the oversized chunk
chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
self._protocol.read_queue.appendleft(leftover)
# If the read queue is empty, clear the flag so that the next call will
# block until data is available
if not self._protocol.read_queue:
self._protocol.read_event.clear()
return chunk
async def send(self, item: bytes) -> None:
with self._send_guard:
await AsyncIOBackend.checkpoint()
if self._closed:
raise ClosedResourceError
elif self._protocol.exception is not None:
raise self._protocol.exception
try:
self._transport.write(item)
except RuntimeError as exc:
if self._transport.is_closing():
raise BrokenResourceError from exc
else:
raise
await self._protocol.write_event.wait()
async def send_eof(self) -> None:
try:
self._transport.write_eof()
except OSError:
pass
async def aclose(self) -> None:
self._closed = True
if not self._transport.is_closing():
try:
self._transport.write_eof()
except OSError:
pass
self._transport.close()
await sleep(0)
self._transport.abort()
class _RawSocketMixin:
_receive_future: asyncio.Future | None = None
_send_future: asyncio.Future | None = None
_closing = False
def __init__(self, raw_socket: socket.socket):
self.__raw_socket = raw_socket
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
def callback(f: object) -> None:
del self._receive_future
loop.remove_reader(self.__raw_socket)
f = self._receive_future = asyncio.Future()
loop.add_reader(self.__raw_socket, f.set_result, None)
f.add_done_callback(callback)
return f
def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
def callback(f: object) -> None:
del self._send_future
loop.remove_writer(self.__raw_socket)
f = self._send_future = asyncio.Future()
loop.add_writer(self.__raw_socket, f.set_result, None)
f.add_done_callback(callback)
return f
async def aclose(self) -> None:
if not self._closing:
self._closing = True
if self.__raw_socket.fileno() != -1:
self.__raw_socket.close()
if self._receive_future:
self._receive_future.set_result(None)
if self._send_future:
self._send_future.set_result(None)
class UNIXSocketStream(_RawSocketMixin, abc.UNIXSocketStream):
async def send_eof(self) -> None:
with self._send_guard:
self._raw_socket.shutdown(socket.SHUT_WR)
async def receive(self, max_bytes: int = 65536) -> bytes:
loop = get_running_loop()
await AsyncIOBackend.checkpoint()
with self._receive_guard:
while True:
try:
data = self._raw_socket.recv(max_bytes)
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
if not data:
raise EndOfStream
return data
async def send(self, item: bytes) -> None:
loop = get_running_loop()
await AsyncIOBackend.checkpoint()
with self._send_guard:
view = memoryview(item)
while view:
try:
bytes_sent = self._raw_socket.send(view)
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
view = view[bytes_sent:]
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
if not isinstance(msglen, int) or msglen < 0:
raise ValueError("msglen must be a non-negative integer")
if not isinstance(maxfds, int) or maxfds < 1:
raise ValueError("maxfds must be a positive integer")
loop = get_running_loop()
fds = array.array("i")
await AsyncIOBackend.checkpoint()
with self._receive_guard:
while True:
try:
message, ancdata, flags, addr = self._raw_socket.recvmsg(
msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
)
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
if not message and not ancdata:
raise EndOfStream
break
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
raise RuntimeError(
f"Received unexpected ancillary data; message = {message!r}, "
f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
)
fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return message, list(fds)
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
if not message:
raise ValueError("message must not be empty")
if not fds:
raise ValueError("fds must not be empty")
loop = get_running_loop()
filenos: list[int] = []
for fd in fds:
if isinstance(fd, int):
filenos.append(fd)
elif isinstance(fd, IOBase):
filenos.append(fd.fileno())
fdarray = array.array("i", filenos)
await AsyncIOBackend.checkpoint()
with self._send_guard:
while True:
try:
# The ignore can be removed after mypy picks up
# https://github.com/python/typeshed/pull/5545
self._raw_socket.sendmsg(
[message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)]
)
break
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
class TCPSocketListener(abc.SocketListener):
_accept_scope: CancelScope | None = None
_closed = False
def __init__(self, raw_socket: socket.socket):
self.__raw_socket = raw_socket
self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
self._accept_guard = ResourceGuard("accepting connections from")
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
async def accept(self) -> abc.SocketStream:
if self._closed:
raise ClosedResourceError
with self._accept_guard:
await AsyncIOBackend.checkpoint()
with CancelScope() as self._accept_scope:
try:
client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
except asyncio.CancelledError:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except (ValueError, NotImplementedError):
pass
if self._closed:
raise ClosedResourceError from None
raise
finally:
self._accept_scope = None
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
transport, protocol = await self._loop.connect_accepted_socket(
StreamProtocol, client_sock
)
return SocketStream(transport, protocol)
async def aclose(self) -> None:
if self._closed:
return
self._closed = True
if self._accept_scope:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except (ValueError, NotImplementedError):
pass
self._accept_scope.cancel()
await sleep(0)
self._raw_socket.close()
class UNIXSocketListener(abc.SocketListener):
def __init__(self, raw_socket: socket.socket):
self.__raw_socket = raw_socket
self._loop = get_running_loop()
self._accept_guard = ResourceGuard("accepting connections from")
self._closed = False
async def accept(self) -> abc.SocketStream:
await AsyncIOBackend.checkpoint()
with self._accept_guard:
while True:
try:
client_sock, _ = self.__raw_socket.accept()
client_sock.setblocking(False)
return UNIXSocketStream(client_sock)
except BlockingIOError:
f: asyncio.Future = asyncio.Future()
self._loop.add_reader(self.__raw_socket, f.set_result, None)
f.add_done_callback(
lambda _: self._loop.remove_reader(self.__raw_socket)
)
await f
except OSError as exc:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
async def aclose(self) -> None:
self._closed = True
self.__raw_socket.close()
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
class UDPSocket(abc.UDPSocket):
def __init__(
self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info("socket")
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> tuple[bytes, IPSockAddrType]:
with self._receive_guard:
await AsyncIOBackend.checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
return self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
await AsyncIOBackend.checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(*item)
class ConnectedUDPSocket(abc.ConnectedUDPSocket):
def __init__(
self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info("socket")
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> bytes:
with self._receive_guard:
await AsyncIOBackend.checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
packet = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
return packet[0]
async def send(self, item: bytes) -> None:
with self._send_guard:
await AsyncIOBackend.checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(item)
class UNIXDatagramSocket(_RawSocketMixin, abc.UNIXDatagramSocket):
async def receive(self) -> UNIXDatagramPacketType:
loop = get_running_loop()
await AsyncIOBackend.checkpoint()
with self._receive_guard:
while True:
try:
data = self._raw_socket.recvfrom(65536)
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
return data
async def send(self, item: UNIXDatagramPacketType) -> None:
loop = get_running_loop()
await AsyncIOBackend.checkpoint()
with self._send_guard:
while True:
try:
self._raw_socket.sendto(*item)
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
return
class ConnectedUNIXDatagramSocket(_RawSocketMixin, abc.ConnectedUNIXDatagramSocket):
async def receive(self) -> bytes:
loop = get_running_loop()
await AsyncIOBackend.checkpoint()
with self._receive_guard:
while True:
try:
data = self._raw_socket.recv(65536)
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
return data
async def send(self, item: bytes) -> None:
loop = get_running_loop()
await AsyncIOBackend.checkpoint()
with self._send_guard:
while True:
try:
self._raw_socket.send(item)
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
return
_read_events: RunVar[dict[int, asyncio.Future[bool]]] = RunVar("read_events")
_write_events: RunVar[dict[int, asyncio.Future[bool]]] = RunVar("write_events")
#
# Synchronization
#
class Event(BaseEvent):
def __new__(cls) -> Event:
return object.__new__(cls)
def __init__(self) -> None:
self._event = asyncio.Event()
def set(self) -> None:
self._event.set()
def is_set(self) -> bool:
return self._event.is_set()
async def wait(self) -> None:
if self.is_set():
await AsyncIOBackend.checkpoint()
else:
await self._event.wait()
def statistics(self) -> EventStatistics:
return EventStatistics(len(self._event._waiters))
class Lock(BaseLock):
def __new__(cls, *, fast_acquire: bool = False) -> Lock:
return object.__new__(cls)
def __init__(self, *, fast_acquire: bool = False) -> None:
self._fast_acquire = fast_acquire
self._owner_task: asyncio.Task | None = None
self._waiters: deque[tuple[asyncio.Task, asyncio.Future]] = deque()
async def acquire(self) -> None:
task = cast(asyncio.Task, current_task())
if self._owner_task is None and not self._waiters:
await AsyncIOBackend.checkpoint_if_cancelled()
self._owner_task = task
# Unless on the "fast path", yield control of the event loop so that other
# tasks can run too
if not self._fast_acquire:
try:
await AsyncIOBackend.cancel_shielded_checkpoint()
except CancelledError:
self.release()
raise
return
if self._owner_task == task:
raise RuntimeError("Attempted to acquire an already held Lock")
fut: asyncio.Future[None] = asyncio.Future()
item = task, fut
self._waiters.append(item)
try:
await fut
except CancelledError:
self._waiters.remove(item)
if self._owner_task is task:
self.release()
raise
self._waiters.remove(item)
def acquire_nowait(self) -> None:
task = cast(asyncio.Task, current_task())
if self._owner_task is None and not self._waiters:
self._owner_task = task
return
if self._owner_task is task:
raise RuntimeError("Attempted to acquire an already held Lock")
raise WouldBlock
def locked(self) -> bool:
return self._owner_task is not None
def release(self) -> None:
if self._owner_task != current_task():
raise RuntimeError("The current task is not holding this lock")
for task, fut in self._waiters:
if not fut.cancelled():
self._owner_task = task
fut.set_result(None)
return
self._owner_task = None
def statistics(self) -> LockStatistics:
task_info = AsyncIOTaskInfo(self._owner_task) if self._owner_task else None
return LockStatistics(self.locked(), task_info, len(self._waiters))
class Semaphore(BaseSemaphore):
def __new__(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> Semaphore:
return object.__new__(cls)
def __init__(
self,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
):
super().__init__(initial_value, max_value=max_value)
self._value = initial_value
self._max_value = max_value
self._fast_acquire = fast_acquire
self._waiters: deque[asyncio.Future[None]] = deque()
async def acquire(self) -> None:
if self._value > 0 and not self._waiters:
await AsyncIOBackend.checkpoint_if_cancelled()
self._value -= 1
# Unless on the "fast path", yield control of the event loop so that other
# tasks can run too
if not self._fast_acquire:
try:
await AsyncIOBackend.cancel_shielded_checkpoint()
except CancelledError:
self.release()
raise
return
fut: asyncio.Future[None] = asyncio.Future()
self._waiters.append(fut)
try:
await fut
except CancelledError:
try:
self._waiters.remove(fut)
except ValueError:
self.release()
raise
def acquire_nowait(self) -> None:
if self._value == 0:
raise WouldBlock
self._value -= 1
def release(self) -> None:
if self._max_value is not None and self._value == self._max_value:
raise ValueError("semaphore released too many times")
for fut in self._waiters:
if not fut.cancelled():
fut.set_result(None)
self._waiters.remove(fut)
return
self._value += 1
@property
def value(self) -> int:
return self._value
@property
def max_value(self) -> int | None:
return self._max_value
def statistics(self) -> SemaphoreStatistics:
return SemaphoreStatistics(len(self._waiters))
class CapacityLimiter(BaseCapacityLimiter):
_total_tokens: float = 0
def __new__(cls, total_tokens: float) -> CapacityLimiter:
return object.__new__(cls)
def __init__(self, total_tokens: float):
self._borrowers: set[Any] = set()
self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict()
self.total_tokens = total_tokens
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
@property
def total_tokens(self) -> float:
return self._total_tokens
@total_tokens.setter
def total_tokens(self, value: float) -> None:
if not isinstance(value, int) and not math.isinf(value):
raise TypeError("total_tokens must be an int or math.inf")
if value < 1:
raise ValueError("total_tokens must be >= 1")
waiters_to_notify = max(value - self._total_tokens, 0)
self._total_tokens = value
# Notify waiting tasks that they have acquired the limiter
while self._wait_queue and waiters_to_notify:
event = self._wait_queue.popitem(last=False)[1]
event.set()
waiters_to_notify -= 1
@property
def borrowed_tokens(self) -> int:
return len(self._borrowers)
@property
def available_tokens(self) -> float:
return self._total_tokens - len(self._borrowers)
def _notify_next_waiter(self) -> None:
"""Notify the next task in line if this limiter has free capacity now."""
if self._wait_queue and len(self._borrowers) < self._total_tokens:
event = self._wait_queue.popitem(last=False)[1]
event.set()
def acquire_nowait(self) -> None:
self.acquire_on_behalf_of_nowait(current_task())
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
if borrower in self._borrowers:
raise RuntimeError(
"this borrower is already holding one of this CapacityLimiter's tokens"
)
if self._wait_queue or len(self._borrowers) >= self._total_tokens:
raise WouldBlock
self._borrowers.add(borrower)
async def acquire(self) -> None:
return await self.acquire_on_behalf_of(current_task())
async def acquire_on_behalf_of(self, borrower: object) -> None:
await AsyncIOBackend.checkpoint_if_cancelled()
try:
self.acquire_on_behalf_of_nowait(borrower)
except WouldBlock:
event = asyncio.Event()
self._wait_queue[borrower] = event
try:
await event.wait()
except BaseException:
self._wait_queue.pop(borrower, None)
if event.is_set():
self._notify_next_waiter()
raise
self._borrowers.add(borrower)
else:
try:
await AsyncIOBackend.cancel_shielded_checkpoint()
except BaseException:
self.release()
raise
def release(self) -> None:
self.release_on_behalf_of(current_task())
def release_on_behalf_of(self, borrower: object) -> None:
try:
self._borrowers.remove(borrower)
except KeyError:
raise RuntimeError(
"this borrower isn't holding any of this CapacityLimiter's tokens"
) from None
self._notify_next_waiter()
def statistics(self) -> CapacityLimiterStatistics:
return CapacityLimiterStatistics(
self.borrowed_tokens,
self.total_tokens,
tuple(self._borrowers),
len(self._wait_queue),
)
_default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter")
#
# Operating system signals
#
class _SignalReceiver:
def __init__(self, signals: tuple[Signals, ...]):
self._signals = signals
self._loop = get_running_loop()
self._signal_queue: deque[Signals] = deque()
self._future: asyncio.Future = asyncio.Future()
self._handled_signals: set[Signals] = set()
def _deliver(self, signum: Signals) -> None:
self._signal_queue.append(signum)
if not self._future.done():
self._future.set_result(None)
def __enter__(self) -> _SignalReceiver:
for sig in set(self._signals):
self._loop.add_signal_handler(sig, self._deliver, sig)
self._handled_signals.add(sig)
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
for sig in self._handled_signals:
self._loop.remove_signal_handler(sig)
def __aiter__(self) -> _SignalReceiver:
return self
async def __anext__(self) -> Signals:
await AsyncIOBackend.checkpoint()
if not self._signal_queue:
self._future = asyncio.Future()
await self._future
return self._signal_queue.popleft()
#
# Testing and debugging
#
class AsyncIOTaskInfo(TaskInfo):
def __init__(self, task: asyncio.Task):
task_state = _task_states.get(task)
if task_state is None:
parent_id = None
else:
parent_id = task_state.parent_id
coro = task.get_coro()
assert coro is not None, "created TaskInfo from a completed Task"
super().__init__(id(task), parent_id, task.get_name(), coro)
self._task = weakref.ref(task)
def has_pending_cancellation(self) -> bool:
if not (task := self._task()):
# If the task isn't around anymore, it won't have a pending cancellation
return False
if task._must_cancel: # type: ignore[attr-defined]
return True
elif (
isinstance(task._fut_waiter, asyncio.Future) # type: ignore[attr-defined]
and task._fut_waiter.cancelled() # type: ignore[attr-defined]
):
return True
if task_state := _task_states.get(task):
if cancel_scope := task_state.cancel_scope:
return cancel_scope._effectively_cancelled
return False
class TestRunner(abc.TestRunner):
_send_stream: MemoryObjectSendStream[tuple[Awaitable[Any], asyncio.Future[Any]]]
def __init__(
self,
*,
debug: bool | None = None,
use_uvloop: bool = False,
loop_factory: Callable[[], AbstractEventLoop] | None = None,
) -> None:
if use_uvloop and loop_factory is None:
import uvloop
loop_factory = uvloop.new_event_loop
self._runner = Runner(debug=debug, loop_factory=loop_factory)
self._exceptions: list[BaseException] = []
self._runner_task: asyncio.Task | None = None
def __enter__(self) -> TestRunner:
self._runner.__enter__()
self.get_loop().set_exception_handler(self._exception_handler)
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self._runner.__exit__(exc_type, exc_val, exc_tb)
def get_loop(self) -> AbstractEventLoop:
return self._runner.get_loop()
def _exception_handler(
self, loop: asyncio.AbstractEventLoop, context: dict[str, Any]
) -> None:
if isinstance(context.get("exception"), Exception):
self._exceptions.append(context["exception"])
else:
loop.default_exception_handler(context)
def _raise_async_exceptions(self) -> None:
# Re-raise any exceptions raised in asynchronous callbacks
if self._exceptions:
exceptions, self._exceptions = self._exceptions, []
if len(exceptions) == 1:
raise exceptions[0]
elif exceptions:
raise BaseExceptionGroup(
"Multiple exceptions occurred in asynchronous callbacks", exceptions
)
async def _run_tests_and_fixtures(
self,
receive_stream: MemoryObjectReceiveStream[
tuple[Awaitable[T_Retval], asyncio.Future[T_Retval]]
],
) -> None:
from _pytest.outcomes import OutcomeException
with receive_stream, self._send_stream:
async for coro, future in receive_stream:
try:
retval = await coro
except CancelledError as exc:
if not future.cancelled():
future.cancel(*exc.args)
raise
except BaseException as exc:
if not future.cancelled():
future.set_exception(exc)
if not isinstance(exc, (Exception, OutcomeException)):
raise
else:
if not future.cancelled():
future.set_result(retval)
async def _call_in_runner_task(
self,
func: Callable[P, Awaitable[T_Retval]],
*args: P.args,
**kwargs: P.kwargs,
) -> T_Retval:
if not self._runner_task:
self._send_stream, receive_stream = create_memory_object_stream[
tuple[Awaitable[Any], asyncio.Future]
](1)
self._runner_task = self.get_loop().create_task(
self._run_tests_and_fixtures(receive_stream)
)
coro = func(*args, **kwargs)
future: asyncio.Future[T_Retval] = self.get_loop().create_future()
self._send_stream.send_nowait((coro, future))
return await future
def run_asyncgen_fixture(
self,
fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
kwargs: dict[str, Any],
) -> Iterable[T_Retval]:
asyncgen = fixture_func(**kwargs)
fixturevalue: T_Retval = self.get_loop().run_until_complete(
self._call_in_runner_task(asyncgen.asend, None)
)
self._raise_async_exceptions()
yield fixturevalue
try:
self.get_loop().run_until_complete(
self._call_in_runner_task(asyncgen.asend, None)
)
except StopAsyncIteration:
self._raise_async_exceptions()
else:
self.get_loop().run_until_complete(asyncgen.aclose())
raise RuntimeError("Async generator fixture did not stop")
def run_fixture(
self,
fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
kwargs: dict[str, Any],
) -> T_Retval:
retval = self.get_loop().run_until_complete(
self._call_in_runner_task(fixture_func, **kwargs)
)
self._raise_async_exceptions()
return retval
def run_test(
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
) -> None:
try:
self.get_loop().run_until_complete(
self._call_in_runner_task(test_func, **kwargs)
)
except Exception as exc:
self._exceptions.append(exc)
self._raise_async_exceptions()
class AsyncIOBackend(AsyncBackend):
@classmethod
def run(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
options: dict[str, Any],
) -> T_Retval:
@wraps(func)
async def wrapper() -> T_Retval:
task = cast(asyncio.Task, current_task())
task.set_name(get_callable_name(func))
_task_states[task] = TaskState(None, None)
try:
return await func(*args)
finally:
del _task_states[task]
debug = options.get("debug", None)
loop_factory = options.get("loop_factory", None)
if loop_factory is None and options.get("use_uvloop", False):
import uvloop
loop_factory = uvloop.new_event_loop
with Runner(debug=debug, loop_factory=loop_factory) as runner:
return runner.run(wrapper())
@classmethod
def current_token(cls) -> object:
return get_running_loop()
@classmethod
def current_time(cls) -> float:
return get_running_loop().time()
@classmethod
def cancelled_exception_class(cls) -> type[BaseException]:
return CancelledError
@classmethod
async def checkpoint(cls) -> None:
await sleep(0)
@classmethod
async def checkpoint_if_cancelled(cls) -> None:
task = current_task()
if task is None:
return
try:
cancel_scope = _task_states[task].cancel_scope
except KeyError:
return
while cancel_scope:
if cancel_scope.cancel_called:
await sleep(0)
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
@classmethod
async def cancel_shielded_checkpoint(cls) -> None:
with CancelScope(shield=True):
await sleep(0)
@classmethod
async def sleep(cls, delay: float) -> None:
await sleep(delay)
@classmethod
def create_cancel_scope(
cls, *, deadline: float = math.inf, shield: bool = False
) -> CancelScope:
return CancelScope(deadline=deadline, shield=shield)
@classmethod
def current_effective_deadline(cls) -> float:
if (task := current_task()) is None:
return math.inf
try:
cancel_scope = _task_states[task].cancel_scope
except KeyError:
return math.inf
deadline = math.inf
while cancel_scope:
deadline = min(deadline, cancel_scope.deadline)
if cancel_scope._cancel_called:
deadline = -math.inf
break
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
return deadline
@classmethod
def create_task_group(cls) -> abc.TaskGroup:
return TaskGroup()
@classmethod
def create_event(cls) -> abc.Event:
return Event()
@classmethod
def create_lock(cls, *, fast_acquire: bool) -> abc.Lock:
return Lock(fast_acquire=fast_acquire)
@classmethod
def create_semaphore(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> abc.Semaphore:
return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire)
@classmethod
def create_capacity_limiter(cls, total_tokens: float) -> abc.CapacityLimiter:
return CapacityLimiter(total_tokens)
@classmethod
async def run_sync_in_worker_thread( # type: ignore[return]
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
abandon_on_cancel: bool = False,
limiter: abc.CapacityLimiter | None = None,
) -> T_Retval:
await cls.checkpoint()
# If this is the first run in this event loop thread, set up the necessary
# variables
try:
idle_workers = _threadpool_idle_workers.get()
workers = _threadpool_workers.get()
except LookupError:
idle_workers = deque()
workers = set()
_threadpool_idle_workers.set(idle_workers)
_threadpool_workers.set(workers)
async with limiter or cls.current_default_thread_limiter():
with CancelScope(shield=not abandon_on_cancel) as scope:
future = asyncio.Future[T_Retval]()
root_task = find_root_task()
if not idle_workers:
worker = WorkerThread(root_task, workers, idle_workers)
worker.start()
workers.add(worker)
root_task.add_done_callback(
worker.stop, context=contextvars.Context()
)
else:
worker = idle_workers.pop()
# Prune any other workers that have been idle for MAX_IDLE_TIME
# seconds or longer
now = cls.current_time()
while idle_workers:
if (
now - idle_workers[0].idle_since
< WorkerThread.MAX_IDLE_TIME
):
break
expired_worker = idle_workers.popleft()
expired_worker.root_task.remove_done_callback(
expired_worker.stop
)
expired_worker.stop()
context = copy_context()
context.run(sniffio.current_async_library_cvar.set, None)
if abandon_on_cancel or scope._parent_scope is None:
worker_scope = scope
else:
worker_scope = scope._parent_scope
worker.queue.put_nowait((context, func, args, future, worker_scope))
return await future
@classmethod
def check_cancelled(cls) -> None:
scope: CancelScope | None = threadlocals.current_cancel_scope
while scope is not None:
if scope.cancel_called:
raise CancelledError(f"Cancelled by cancel scope {id(scope):x}")
if scope.shield:
return
scope = scope._parent_scope
@classmethod
def run_async_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
async def task_wrapper() -> T_Retval:
__tracebackhide__ = True
if scope is not None:
task = cast(asyncio.Task, current_task())
_task_states[task] = TaskState(None, scope)
scope._tasks.add(task)
try:
return await func(*args)
except CancelledError as exc:
raise concurrent.futures.CancelledError(str(exc)) from None
finally:
if scope is not None:
scope._tasks.discard(task)
loop = cast(
"AbstractEventLoop", token or threadlocals.current_token.native_token
)
if loop.is_closed():
raise RunFinishedError
context = copy_context()
context.run(sniffio.current_async_library_cvar.set, "asyncio")
scope = getattr(threadlocals, "current_cancel_scope", None)
f: concurrent.futures.Future[T_Retval] = context.run(
asyncio.run_coroutine_threadsafe, task_wrapper(), loop=loop
)
return f.result()
@classmethod
def run_sync_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
@wraps(func)
def wrapper() -> None:
try:
sniffio.current_async_library_cvar.set("asyncio")
f.set_result(func(*args))
except BaseException as exc:
f.set_exception(exc)
if not isinstance(exc, Exception):
raise
loop = cast(
"AbstractEventLoop", token or threadlocals.current_token.native_token
)
if loop.is_closed():
raise RunFinishedError
f: concurrent.futures.Future[T_Retval] = Future()
loop.call_soon_threadsafe(wrapper)
return f.result()
@classmethod
def create_blocking_portal(cls) -> abc.BlockingPortal:
return BlockingPortal()
@classmethod
async def open_process(
cls,
command: StrOrBytesPath | Sequence[StrOrBytesPath],
*,
stdin: int | IO[Any] | None,
stdout: int | IO[Any] | None,
stderr: int | IO[Any] | None,
**kwargs: Any,
) -> Process:
await cls.checkpoint()
if isinstance(command, PathLike):
command = os.fspath(command)
if isinstance(command, (str, bytes)):
process = await asyncio.create_subprocess_shell(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
**kwargs,
)
else:
process = await asyncio.create_subprocess_exec(
*command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
**kwargs,
)
stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
@classmethod
def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
create_task(
_shutdown_process_pool_on_exit(workers),
name="AnyIO process pool shutdown task",
)
find_root_task().add_done_callback(
partial(_forcibly_shutdown_process_pool_on_exit, workers) # type:ignore[arg-type]
)
@classmethod
async def connect_tcp(
cls, host: str, port: int, local_address: IPSockAddrType | None = None
) -> abc.SocketStream:
transport, protocol = cast(
tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_connection(
StreamProtocol, host, port, local_addr=local_address
),
)
transport.pause_reading()
return SocketStream(transport, protocol)
@classmethod
async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
await cls.checkpoint()
loop = get_running_loop()
raw_socket = socket.socket(socket.AF_UNIX)
raw_socket.setblocking(False)
while True:
try:
raw_socket.connect(path)
except BlockingIOError:
f: asyncio.Future = asyncio.Future()
loop.add_writer(raw_socket, f.set_result, None)
f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
await f
except BaseException:
raw_socket.close()
raise
else:
return UNIXSocketStream(raw_socket)
@classmethod
def create_tcp_listener(cls, sock: socket.socket) -> SocketListener:
return TCPSocketListener(sock)
@classmethod
def create_unix_listener(cls, sock: socket.socket) -> SocketListener:
return UNIXSocketListener(sock)
@classmethod
async def create_udp_socket(
cls,
family: AddressFamily,
local_address: IPSockAddrType | None,
remote_address: IPSockAddrType | None,
reuse_port: bool,
) -> UDPSocket | ConnectedUDPSocket:
transport, protocol = await get_running_loop().create_datagram_endpoint(
DatagramProtocol,
local_addr=local_address,
remote_addr=remote_address,
family=family,
reuse_port=reuse_port,
)
if protocol.exception:
transport.close()
raise protocol.exception
if not remote_address:
return UDPSocket(transport, protocol)
else:
return ConnectedUDPSocket(transport, protocol)
@classmethod
async def create_unix_datagram_socket( # type: ignore[override]
cls, raw_socket: socket.socket, remote_path: str | bytes | None
) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
await cls.checkpoint()
loop = get_running_loop()
if remote_path:
while True:
try:
raw_socket.connect(remote_path)
except BlockingIOError:
f: asyncio.Future = asyncio.Future()
loop.add_writer(raw_socket, f.set_result, None)
f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
await f
except BaseException:
raw_socket.close()
raise
else:
return ConnectedUNIXDatagramSocket(raw_socket)
else:
return UNIXDatagramSocket(raw_socket)
@classmethod
async def getaddrinfo(
cls,
host: bytes | str | None,
port: str | int | None,
*,
family: int | AddressFamily = 0,
type: int | SocketKind = 0,
proto: int = 0,
flags: int = 0,
) -> Sequence[
tuple[
AddressFamily,
SocketKind,
int,
str,
tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes],
]
]:
return await get_running_loop().getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags
)
@classmethod
async def getnameinfo(
cls, sockaddr: IPSockAddrType, flags: int = 0
) -> tuple[str, str]:
return await get_running_loop().getnameinfo(sockaddr, flags)
@classmethod
async def wait_readable(cls, obj: FileDescriptorLike) -> None:
try:
read_events = _read_events.get()
except LookupError:
read_events = {}
_read_events.set(read_events)
fd = obj if isinstance(obj, int) else obj.fileno()
if read_events.get(fd):
raise BusyResourceError("reading from")
loop = get_running_loop()
fut: asyncio.Future[bool] = loop.create_future()
def cb() -> None:
try:
del read_events[fd]
except KeyError:
pass
else:
remove_reader(fd)
try:
fut.set_result(True)
except asyncio.InvalidStateError:
pass
try:
loop.add_reader(fd, cb)
except NotImplementedError:
from anyio._core._asyncio_selector_thread import get_selector
selector = get_selector()
selector.add_reader(fd, cb)
remove_reader = selector.remove_reader
else:
remove_reader = loop.remove_reader
read_events[fd] = fut
try:
success = await fut
finally:
try:
del read_events[fd]
except KeyError:
pass
else:
remove_reader(fd)
if not success:
raise ClosedResourceError
@classmethod
async def wait_writable(cls, obj: FileDescriptorLike) -> None:
try:
write_events = _write_events.get()
except LookupError:
write_events = {}
_write_events.set(write_events)
fd = obj if isinstance(obj, int) else obj.fileno()
if write_events.get(fd):
raise BusyResourceError("writing to")
loop = get_running_loop()
fut: asyncio.Future[bool] = loop.create_future()
def cb() -> None:
try:
del write_events[fd]
except KeyError:
pass
else:
remove_writer(fd)
try:
fut.set_result(True)
except asyncio.InvalidStateError:
pass
try:
loop.add_writer(fd, cb)
except NotImplementedError:
from anyio._core._asyncio_selector_thread import get_selector
selector = get_selector()
selector.add_writer(fd, cb)
remove_writer = selector.remove_writer
else:
remove_writer = loop.remove_writer
write_events[fd] = fut
try:
success = await fut
finally:
try:
del write_events[fd]
except KeyError:
pass
else:
remove_writer(fd)
if not success:
raise ClosedResourceError
@classmethod
def notify_closing(cls, obj: FileDescriptorLike) -> None:
fd = obj if isinstance(obj, int) else obj.fileno()
loop = get_running_loop()
try:
write_events = _write_events.get()
except LookupError:
pass
else:
try:
fut = write_events.pop(fd)
except KeyError:
pass
else:
try:
fut.set_result(False)
except asyncio.InvalidStateError:
pass
try:
loop.remove_writer(fd)
except NotImplementedError:
from anyio._core._asyncio_selector_thread import get_selector
get_selector().remove_writer(fd)
try:
read_events = _read_events.get()
except LookupError:
pass
else:
try:
fut = read_events.pop(fd)
except KeyError:
pass
else:
try:
fut.set_result(False)
except asyncio.InvalidStateError:
pass
try:
loop.remove_reader(fd)
except NotImplementedError:
from anyio._core._asyncio_selector_thread import get_selector
get_selector().remove_reader(fd)
@classmethod
async def wrap_listener_socket(cls, sock: socket.socket) -> SocketListener:
return TCPSocketListener(sock)
@classmethod
async def wrap_stream_socket(cls, sock: socket.socket) -> SocketStream:
transport, protocol = await get_running_loop().create_connection(
StreamProtocol, sock=sock
)
return SocketStream(transport, protocol)
@classmethod
async def wrap_unix_stream_socket(cls, sock: socket.socket) -> UNIXSocketStream:
return UNIXSocketStream(sock)
@classmethod
async def wrap_udp_socket(cls, sock: socket.socket) -> UDPSocket:
transport, protocol = await get_running_loop().create_datagram_endpoint(
DatagramProtocol, sock=sock
)
return UDPSocket(transport, protocol)
@classmethod
async def wrap_connected_udp_socket(cls, sock: socket.socket) -> ConnectedUDPSocket:
transport, protocol = await get_running_loop().create_datagram_endpoint(
DatagramProtocol, sock=sock
)
return ConnectedUDPSocket(transport, protocol)
@classmethod
async def wrap_unix_datagram_socket(cls, sock: socket.socket) -> UNIXDatagramSocket:
return UNIXDatagramSocket(sock)
@classmethod
async def wrap_connected_unix_datagram_socket(
cls, sock: socket.socket
) -> ConnectedUNIXDatagramSocket:
return ConnectedUNIXDatagramSocket(sock)
@classmethod
def current_default_thread_limiter(cls) -> CapacityLimiter:
try:
return _default_thread_limiter.get()
except LookupError:
limiter = CapacityLimiter(40)
_default_thread_limiter.set(limiter)
return limiter
@classmethod
def open_signal_receiver(
cls, *signals: Signals
) -> AbstractContextManager[AsyncIterator[Signals]]:
return _SignalReceiver(signals)
@classmethod
def get_current_task(cls) -> TaskInfo:
return AsyncIOTaskInfo(current_task()) # type: ignore[arg-type]
@classmethod
def get_running_tasks(cls) -> Sequence[TaskInfo]:
return [AsyncIOTaskInfo(task) for task in all_tasks() if not task.done()]
@classmethod
async def wait_all_tasks_blocked(cls) -> None:
await cls.checkpoint()
this_task = current_task()
while True:
for task in all_tasks():
if task is this_task:
continue
waiter = task._fut_waiter # type: ignore[attr-defined]
if waiter is None or waiter.done():
await sleep(0.1)
break
else:
return
@classmethod
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
return TestRunner(**options)
backend_class = AsyncIOBackend
anyio-4.11.0/src/anyio/_backends/_trio.py 0000664 0000000 0000000 00000122636 15064462627 0020247 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import array
import math
import os
import socket
import sys
import types
import weakref
from collections.abc import (
AsyncGenerator,
AsyncIterator,
Awaitable,
Callable,
Collection,
Coroutine,
Iterable,
Sequence,
)
from concurrent.futures import Future
from contextlib import AbstractContextManager
from dataclasses import dataclass
from functools import partial
from io import IOBase
from os import PathLike
from signal import Signals
from socket import AddressFamily, SocketKind
from types import TracebackType
from typing import (
IO,
TYPE_CHECKING,
Any,
Generic,
NoReturn,
TypeVar,
cast,
overload,
)
import trio.from_thread
import trio.lowlevel
from outcome import Error, Outcome, Value
from trio.lowlevel import (
current_root_task,
current_task,
notify_closing,
wait_readable,
wait_writable,
)
from trio.socket import SocketType as TrioSocketType
from trio.to_thread import run_sync
from .. import (
CapacityLimiterStatistics,
EventStatistics,
LockStatistics,
RunFinishedError,
TaskInfo,
WouldBlock,
abc,
)
from .._core._eventloop import claim_worker_thread
from .._core._exceptions import (
BrokenResourceError,
BusyResourceError,
ClosedResourceError,
EndOfStream,
)
from .._core._sockets import convert_ipv6_sockaddr
from .._core._streams import create_memory_object_stream
from .._core._synchronization import (
CapacityLimiter as BaseCapacityLimiter,
)
from .._core._synchronization import Event as BaseEvent
from .._core._synchronization import Lock as BaseLock
from .._core._synchronization import (
ResourceGuard,
SemaphoreStatistics,
)
from .._core._synchronization import Semaphore as BaseSemaphore
from .._core._tasks import CancelScope as BaseCancelScope
from ..abc import IPSockAddrType, UDPPacketType, UNIXDatagramPacketType
from ..abc._eventloop import AsyncBackend, StrOrBytesPath
from ..streams.memory import MemoryObjectSendStream
if TYPE_CHECKING:
from _typeshed import FileDescriptorLike
if sys.version_info >= (3, 10):
from typing import ParamSpec
else:
from typing_extensions import ParamSpec
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from exceptiongroup import BaseExceptionGroup
from typing_extensions import TypeVarTuple, Unpack
T = TypeVar("T")
T_Retval = TypeVar("T_Retval")
T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)
PosArgsT = TypeVarTuple("PosArgsT")
P = ParamSpec("P")
#
# Event loop
#
RunVar = trio.lowlevel.RunVar
#
# Timeouts and cancellation
#
class CancelScope(BaseCancelScope):
def __new__(
cls, original: trio.CancelScope | None = None, **kwargs: object
) -> CancelScope:
return object.__new__(cls)
def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:
self.__original = original or trio.CancelScope(**kwargs)
def __enter__(self) -> CancelScope:
self.__original.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
return self.__original.__exit__(exc_type, exc_val, exc_tb)
def cancel(self, reason: str | None = None) -> None:
self.__original.cancel(reason)
@property
def deadline(self) -> float:
return self.__original.deadline
@deadline.setter
def deadline(self, value: float) -> None:
self.__original.deadline = value
@property
def cancel_called(self) -> bool:
return self.__original.cancel_called
@property
def cancelled_caught(self) -> bool:
return self.__original.cancelled_caught
@property
def shield(self) -> bool:
return self.__original.shield
@shield.setter
def shield(self, value: bool) -> None:
self.__original.shield = value
#
# Task groups
#
class TaskGroup(abc.TaskGroup):
def __init__(self) -> None:
self._active = False
self._nursery_manager = trio.open_nursery(strict_exception_groups=True)
self.cancel_scope = None # type: ignore[assignment]
async def __aenter__(self) -> TaskGroup:
self._active = True
self._nursery = await self._nursery_manager.__aenter__()
self.cancel_scope = CancelScope(self._nursery.cancel_scope)
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
try:
# trio.Nursery.__exit__ returns bool; .open_nursery has wrong type
return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) # type: ignore[return-value]
except BaseExceptionGroup as exc:
if not exc.split(trio.Cancelled)[1]:
raise trio.Cancelled._create() from exc
raise
finally:
del exc_val, exc_tb
self._active = False
def start_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
*args: Unpack[PosArgsT],
name: object = None,
) -> None:
if not self._active:
raise RuntimeError(
"This task group is not active; no new tasks can be started."
)
self._nursery.start_soon(func, *args, name=name)
async def start(
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
) -> Any:
if not self._active:
raise RuntimeError(
"This task group is not active; no new tasks can be started."
)
return await self._nursery.start(func, *args, name=name)
#
# Threads
#
class BlockingPortal(abc.BlockingPortal):
def __new__(cls) -> BlockingPortal:
return object.__new__(cls)
def __init__(self) -> None:
super().__init__()
self._token = trio.lowlevel.current_trio_token()
def _spawn_task_from_thread(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
name: object,
future: Future[T_Retval],
) -> None:
trio.from_thread.run_sync(
partial(self._task_group.start_soon, name=name),
self._call_func,
func,
args,
kwargs,
future,
trio_token=self._token,
)
#
# Subprocesses
#
@dataclass(eq=False)
class ReceiveStreamWrapper(abc.ByteReceiveStream):
_stream: trio.abc.ReceiveStream
async def receive(self, max_bytes: int | None = None) -> bytes:
try:
data = await self._stream.receive_some(max_bytes)
except trio.ClosedResourceError as exc:
raise ClosedResourceError from exc.__cause__
except trio.BrokenResourceError as exc:
raise BrokenResourceError from exc.__cause__
if data:
return bytes(data)
else:
raise EndOfStream
async def aclose(self) -> None:
await self._stream.aclose()
@dataclass(eq=False)
class SendStreamWrapper(abc.ByteSendStream):
_stream: trio.abc.SendStream
async def send(self, item: bytes) -> None:
try:
await self._stream.send_all(item)
except trio.ClosedResourceError as exc:
raise ClosedResourceError from exc.__cause__
except trio.BrokenResourceError as exc:
raise BrokenResourceError from exc.__cause__
async def aclose(self) -> None:
await self._stream.aclose()
@dataclass(eq=False)
class Process(abc.Process):
_process: trio.Process
_stdin: abc.ByteSendStream | None
_stdout: abc.ByteReceiveStream | None
_stderr: abc.ByteReceiveStream | None
async def aclose(self) -> None:
with CancelScope(shield=True):
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
try:
await self.wait()
except BaseException:
self.kill()
with CancelScope(shield=True):
await self.wait()
raise
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: Signals) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> int | None:
return self._process.returncode
@property
def stdin(self) -> abc.ByteSendStream | None:
return self._stdin
@property
def stdout(self) -> abc.ByteReceiveStream | None:
return self._stdout
@property
def stderr(self) -> abc.ByteReceiveStream | None:
return self._stderr
class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
def after_run(self) -> None:
super().after_run()
current_default_worker_process_limiter: trio.lowlevel.RunVar = RunVar(
"current_default_worker_process_limiter"
)
async def _shutdown_process_pool(workers: set[abc.Process]) -> None:
try:
await trio.sleep(math.inf)
except trio.Cancelled:
for process in workers:
if process.returncode is None:
process.kill()
with CancelScope(shield=True):
for process in workers:
await process.aclose()
#
# Sockets and networking
#
class _TrioSocketMixin(Generic[T_SockAddr]):
def __init__(self, trio_socket: TrioSocketType) -> None:
self._trio_socket = trio_socket
self._closed = False
def _check_closed(self) -> None:
if self._closed:
raise ClosedResourceError
if self._trio_socket.fileno() < 0:
raise BrokenResourceError
@property
def _raw_socket(self) -> socket.socket:
return self._trio_socket._sock # type: ignore[attr-defined]
async def aclose(self) -> None:
if self._trio_socket.fileno() >= 0:
self._closed = True
self._trio_socket.close()
def _convert_socket_error(self, exc: BaseException) -> NoReturn:
if isinstance(exc, trio.ClosedResourceError):
raise ClosedResourceError from exc
elif self._trio_socket.fileno() < 0 and self._closed:
raise ClosedResourceError from None
elif isinstance(exc, OSError):
raise BrokenResourceError from exc
else:
raise exc
class SocketStream(_TrioSocketMixin, abc.SocketStream):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
try:
data = await self._trio_socket.recv(max_bytes)
except BaseException as exc:
self._convert_socket_error(exc)
if data:
return data
else:
raise EndOfStream
async def send(self, item: bytes) -> None:
with self._send_guard:
view = memoryview(item)
while view:
try:
bytes_sent = await self._trio_socket.send(view)
except BaseException as exc:
self._convert_socket_error(exc)
view = view[bytes_sent:]
async def send_eof(self) -> None:
self._trio_socket.shutdown(socket.SHUT_WR)
class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
if not isinstance(msglen, int) or msglen < 0:
raise ValueError("msglen must be a non-negative integer")
if not isinstance(maxfds, int) or maxfds < 1:
raise ValueError("maxfds must be a positive integer")
fds = array.array("i")
await trio.lowlevel.checkpoint()
with self._receive_guard:
while True:
try:
message, ancdata, flags, addr = await self._trio_socket.recvmsg(
msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
)
except BaseException as exc:
self._convert_socket_error(exc)
else:
if not message and not ancdata:
raise EndOfStream
break
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
raise RuntimeError(
f"Received unexpected ancillary data; message = {message!r}, "
f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
)
fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return message, list(fds)
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
if not message:
raise ValueError("message must not be empty")
if not fds:
raise ValueError("fds must not be empty")
filenos: list[int] = []
for fd in fds:
if isinstance(fd, int):
filenos.append(fd)
elif isinstance(fd, IOBase):
filenos.append(fd.fileno())
fdarray = array.array("i", filenos)
await trio.lowlevel.checkpoint()
with self._send_guard:
while True:
try:
await self._trio_socket.sendmsg(
[message],
[
(
socket.SOL_SOCKET,
socket.SCM_RIGHTS,
fdarray,
)
],
)
break
except BaseException as exc:
self._convert_socket_error(exc)
class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
def __init__(self, raw_socket: socket.socket):
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
self._accept_guard = ResourceGuard("accepting connections from")
async def accept(self) -> SocketStream:
with self._accept_guard:
try:
trio_socket, _addr = await self._trio_socket.accept()
except BaseException as exc:
self._convert_socket_error(exc)
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return SocketStream(trio_socket)
class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
def __init__(self, raw_socket: socket.socket):
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
self._accept_guard = ResourceGuard("accepting connections from")
async def accept(self) -> UNIXSocketStream:
with self._accept_guard:
try:
trio_socket, _addr = await self._trio_socket.accept()
except BaseException as exc:
self._convert_socket_error(exc)
return UNIXSocketStream(trio_socket)
class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self) -> tuple[bytes, IPSockAddrType]:
with self._receive_guard:
try:
data, addr = await self._trio_socket.recvfrom(65536)
return data, convert_ipv6_sockaddr(addr)
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
try:
await self._trio_socket.sendto(*item)
except BaseException as exc:
self._convert_socket_error(exc)
class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self) -> bytes:
with self._receive_guard:
try:
return await self._trio_socket.recv(65536)
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: bytes) -> None:
with self._send_guard:
try:
await self._trio_socket.send(item)
except BaseException as exc:
self._convert_socket_error(exc)
class UNIXDatagramSocket(_TrioSocketMixin[str], abc.UNIXDatagramSocket):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self) -> UNIXDatagramPacketType:
with self._receive_guard:
try:
data, addr = await self._trio_socket.recvfrom(65536)
return data, addr
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: UNIXDatagramPacketType) -> None:
with self._send_guard:
try:
await self._trio_socket.sendto(*item)
except BaseException as exc:
self._convert_socket_error(exc)
class ConnectedUNIXDatagramSocket(
_TrioSocketMixin[str], abc.ConnectedUNIXDatagramSocket
):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self) -> bytes:
with self._receive_guard:
try:
return await self._trio_socket.recv(65536)
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: bytes) -> None:
with self._send_guard:
try:
await self._trio_socket.send(item)
except BaseException as exc:
self._convert_socket_error(exc)
#
# Synchronization
#
class Event(BaseEvent):
def __new__(cls) -> Event:
return object.__new__(cls)
def __init__(self) -> None:
self.__original = trio.Event()
def is_set(self) -> bool:
return self.__original.is_set()
async def wait(self) -> None:
return await self.__original.wait()
def statistics(self) -> EventStatistics:
orig_statistics = self.__original.statistics()
return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)
def set(self) -> None:
self.__original.set()
class Lock(BaseLock):
def __new__(cls, *, fast_acquire: bool = False) -> Lock:
return object.__new__(cls)
def __init__(self, *, fast_acquire: bool = False) -> None:
self._fast_acquire = fast_acquire
self.__original = trio.Lock()
@staticmethod
def _convert_runtime_error_msg(exc: RuntimeError) -> None:
if exc.args == ("attempt to re-acquire an already held Lock",):
exc.args = ("Attempted to acquire an already held Lock",)
async def acquire(self) -> None:
if not self._fast_acquire:
try:
await self.__original.acquire()
except RuntimeError as exc:
self._convert_runtime_error_msg(exc)
raise
return
# This is the "fast path" where we don't let other tasks run
await trio.lowlevel.checkpoint_if_cancelled()
try:
self.__original.acquire_nowait()
except trio.WouldBlock:
await self.__original._lot.park()
except RuntimeError as exc:
self._convert_runtime_error_msg(exc)
raise
def acquire_nowait(self) -> None:
try:
self.__original.acquire_nowait()
except trio.WouldBlock:
raise WouldBlock from None
except RuntimeError as exc:
self._convert_runtime_error_msg(exc)
raise
def locked(self) -> bool:
return self.__original.locked()
def release(self) -> None:
self.__original.release()
def statistics(self) -> LockStatistics:
orig_statistics = self.__original.statistics()
owner = TrioTaskInfo(orig_statistics.owner) if orig_statistics.owner else None
return LockStatistics(
orig_statistics.locked, owner, orig_statistics.tasks_waiting
)
class Semaphore(BaseSemaphore):
def __new__(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> Semaphore:
return object.__new__(cls)
def __init__(
self,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> None:
super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
self.__original = trio.Semaphore(initial_value, max_value=max_value)
async def acquire(self) -> None:
if not self._fast_acquire:
await self.__original.acquire()
return
# This is the "fast path" where we don't let other tasks run
await trio.lowlevel.checkpoint_if_cancelled()
try:
self.__original.acquire_nowait()
except trio.WouldBlock:
await self.__original._lot.park()
def acquire_nowait(self) -> None:
try:
self.__original.acquire_nowait()
except trio.WouldBlock:
raise WouldBlock from None
@property
def max_value(self) -> int | None:
return self.__original.max_value
@property
def value(self) -> int:
return self.__original.value
def release(self) -> None:
self.__original.release()
def statistics(self) -> SemaphoreStatistics:
orig_statistics = self.__original.statistics()
return SemaphoreStatistics(orig_statistics.tasks_waiting)
class CapacityLimiter(BaseCapacityLimiter):
def __new__(
cls,
total_tokens: float | None = None,
*,
original: trio.CapacityLimiter | None = None,
) -> CapacityLimiter:
return object.__new__(cls)
def __init__(
self,
total_tokens: float | None = None,
*,
original: trio.CapacityLimiter | None = None,
) -> None:
if original is not None:
self.__original = original
else:
assert total_tokens is not None
self.__original = trio.CapacityLimiter(total_tokens)
async def __aenter__(self) -> None:
return await self.__original.__aenter__()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.__original.__aexit__(exc_type, exc_val, exc_tb)
@property
def total_tokens(self) -> float:
return self.__original.total_tokens
@total_tokens.setter
def total_tokens(self, value: float) -> None:
self.__original.total_tokens = value
@property
def borrowed_tokens(self) -> int:
return self.__original.borrowed_tokens
@property
def available_tokens(self) -> float:
return self.__original.available_tokens
def acquire_nowait(self) -> None:
self.__original.acquire_nowait()
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
self.__original.acquire_on_behalf_of_nowait(borrower)
async def acquire(self) -> None:
await self.__original.acquire()
async def acquire_on_behalf_of(self, borrower: object) -> None:
await self.__original.acquire_on_behalf_of(borrower)
def release(self) -> None:
return self.__original.release()
def release_on_behalf_of(self, borrower: object) -> None:
return self.__original.release_on_behalf_of(borrower)
def statistics(self) -> CapacityLimiterStatistics:
orig = self.__original.statistics()
return CapacityLimiterStatistics(
borrowed_tokens=orig.borrowed_tokens,
total_tokens=orig.total_tokens,
borrowers=tuple(orig.borrowers),
tasks_waiting=orig.tasks_waiting,
)
_capacity_limiter_wrapper: trio.lowlevel.RunVar = RunVar("_capacity_limiter_wrapper")
#
# Signal handling
#
class _SignalReceiver:
_iterator: AsyncIterator[int]
def __init__(self, signals: tuple[Signals, ...]):
self._signals = signals
def __enter__(self) -> _SignalReceiver:
self._cm = trio.open_signal_receiver(*self._signals)
self._iterator = self._cm.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
return self._cm.__exit__(exc_type, exc_val, exc_tb)
def __aiter__(self) -> _SignalReceiver:
return self
async def __anext__(self) -> Signals:
signum = await self._iterator.__anext__()
return Signals(signum)
#
# Testing and debugging
#
class TestRunner(abc.TestRunner):
def __init__(self, **options: Any) -> None:
from queue import Queue
self._call_queue: Queue[Callable[[], object]] = Queue()
self._send_stream: MemoryObjectSendStream | None = None
self._options = options
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: types.TracebackType | None,
) -> None:
if self._send_stream:
self._send_stream.close()
while self._send_stream is not None:
self._call_queue.get()()
async def _run_tests_and_fixtures(self) -> None:
self._send_stream, receive_stream = create_memory_object_stream(1)
with receive_stream:
async for coro, outcome_holder in receive_stream:
try:
retval = await coro
except BaseException as exc:
outcome_holder.append(Error(exc))
else:
outcome_holder.append(Value(retval))
def _main_task_finished(self, outcome: object) -> None:
self._send_stream = None
def _call_in_runner_task(
self,
func: Callable[P, Awaitable[T_Retval]],
*args: P.args,
**kwargs: P.kwargs,
) -> T_Retval:
if self._send_stream is None:
trio.lowlevel.start_guest_run(
self._run_tests_and_fixtures,
run_sync_soon_threadsafe=self._call_queue.put,
done_callback=self._main_task_finished,
**self._options,
)
while self._send_stream is None:
self._call_queue.get()()
outcome_holder: list[Outcome] = []
self._send_stream.send_nowait((func(*args, **kwargs), outcome_holder))
while not outcome_holder:
self._call_queue.get()()
return outcome_holder[0].unwrap()
def run_asyncgen_fixture(
self,
fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
kwargs: dict[str, Any],
) -> Iterable[T_Retval]:
asyncgen = fixture_func(**kwargs)
fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None)
yield fixturevalue
try:
self._call_in_runner_task(asyncgen.asend, None)
except StopAsyncIteration:
pass
else:
self._call_in_runner_task(asyncgen.aclose)
raise RuntimeError("Async generator fixture did not stop")
def run_fixture(
self,
fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
kwargs: dict[str, Any],
) -> T_Retval:
return self._call_in_runner_task(fixture_func, **kwargs)
def run_test(
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
) -> None:
self._call_in_runner_task(test_func, **kwargs)
class TrioTaskInfo(TaskInfo):
def __init__(self, task: trio.lowlevel.Task):
parent_id = None
if task.parent_nursery and task.parent_nursery.parent_task:
parent_id = id(task.parent_nursery.parent_task)
super().__init__(id(task), parent_id, task.name, task.coro)
self._task = weakref.proxy(task)
def has_pending_cancellation(self) -> bool:
try:
return self._task._cancel_status.effectively_cancelled
except ReferenceError:
# If the task is no longer around, it surely doesn't have a cancellation
# pending
return False
class TrioBackend(AsyncBackend):
@classmethod
def run(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
options: dict[str, Any],
) -> T_Retval:
return trio.run(func, *args)
@classmethod
def current_token(cls) -> object:
return trio.lowlevel.current_trio_token()
@classmethod
def current_time(cls) -> float:
return trio.current_time()
@classmethod
def cancelled_exception_class(cls) -> type[BaseException]:
return trio.Cancelled
@classmethod
async def checkpoint(cls) -> None:
await trio.lowlevel.checkpoint()
@classmethod
async def checkpoint_if_cancelled(cls) -> None:
await trio.lowlevel.checkpoint_if_cancelled()
@classmethod
async def cancel_shielded_checkpoint(cls) -> None:
await trio.lowlevel.cancel_shielded_checkpoint()
@classmethod
async def sleep(cls, delay: float) -> None:
await trio.sleep(delay)
@classmethod
def create_cancel_scope(
cls, *, deadline: float = math.inf, shield: bool = False
) -> abc.CancelScope:
return CancelScope(deadline=deadline, shield=shield)
@classmethod
def current_effective_deadline(cls) -> float:
return trio.current_effective_deadline()
@classmethod
def create_task_group(cls) -> abc.TaskGroup:
return TaskGroup()
@classmethod
def create_event(cls) -> abc.Event:
return Event()
@classmethod
def create_lock(cls, *, fast_acquire: bool) -> Lock:
return Lock(fast_acquire=fast_acquire)
@classmethod
def create_semaphore(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> abc.Semaphore:
return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire)
@classmethod
def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
return CapacityLimiter(total_tokens)
@classmethod
async def run_sync_in_worker_thread(
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
abandon_on_cancel: bool = False,
limiter: abc.CapacityLimiter | None = None,
) -> T_Retval:
def wrapper() -> T_Retval:
with claim_worker_thread(TrioBackend, token):
return func(*args)
token = TrioBackend.current_token()
return await run_sync(
wrapper,
abandon_on_cancel=abandon_on_cancel,
limiter=cast(trio.CapacityLimiter, limiter),
)
@classmethod
def check_cancelled(cls) -> None:
trio.from_thread.check_cancelled()
@classmethod
def run_async_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
trio_token = cast("trio.lowlevel.TrioToken | None", token)
try:
return trio.from_thread.run(func, *args, trio_token=trio_token)
except trio.RunFinishedError:
raise RunFinishedError from None
@classmethod
def run_sync_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
trio_token = cast("trio.lowlevel.TrioToken | None", token)
try:
return trio.from_thread.run_sync(func, *args, trio_token=trio_token)
except trio.RunFinishedError:
raise RunFinishedError from None
@classmethod
def create_blocking_portal(cls) -> abc.BlockingPortal:
return BlockingPortal()
@classmethod
async def open_process(
cls,
command: StrOrBytesPath | Sequence[StrOrBytesPath],
*,
stdin: int | IO[Any] | None,
stdout: int | IO[Any] | None,
stderr: int | IO[Any] | None,
**kwargs: Any,
) -> Process:
def convert_item(item: StrOrBytesPath) -> str:
str_or_bytes = os.fspath(item)
if isinstance(str_or_bytes, str):
return str_or_bytes
else:
return os.fsdecode(str_or_bytes)
if isinstance(command, (str, bytes, PathLike)):
process = await trio.lowlevel.open_process(
convert_item(command),
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=True,
**kwargs,
)
else:
process = await trio.lowlevel.open_process(
[convert_item(item) for item in command],
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=False,
**kwargs,
)
stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
@classmethod
def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
@classmethod
async def connect_tcp(
cls, host: str, port: int, local_address: IPSockAddrType | None = None
) -> SocketStream:
family = socket.AF_INET6 if ":" in host else socket.AF_INET
trio_socket = trio.socket.socket(family)
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if local_address:
await trio_socket.bind(local_address)
try:
await trio_socket.connect((host, port))
except BaseException:
trio_socket.close()
raise
return SocketStream(trio_socket)
@classmethod
async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
trio_socket = trio.socket.socket(socket.AF_UNIX)
try:
await trio_socket.connect(path)
except BaseException:
trio_socket.close()
raise
return UNIXSocketStream(trio_socket)
@classmethod
def create_tcp_listener(cls, sock: socket.socket) -> abc.SocketListener:
return TCPSocketListener(sock)
@classmethod
def create_unix_listener(cls, sock: socket.socket) -> abc.SocketListener:
return UNIXSocketListener(sock)
@classmethod
async def create_udp_socket(
cls,
family: socket.AddressFamily,
local_address: IPSockAddrType | None,
remote_address: IPSockAddrType | None,
reuse_port: bool,
) -> UDPSocket | ConnectedUDPSocket:
trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
if reuse_port:
trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if local_address:
await trio_socket.bind(local_address)
if remote_address:
await trio_socket.connect(remote_address)
return ConnectedUDPSocket(trio_socket)
else:
return UDPSocket(trio_socket)
@classmethod
@overload
async def create_unix_datagram_socket(
cls, raw_socket: socket.socket, remote_path: None
) -> abc.UNIXDatagramSocket: ...
@classmethod
@overload
async def create_unix_datagram_socket(
cls, raw_socket: socket.socket, remote_path: str | bytes
) -> abc.ConnectedUNIXDatagramSocket: ...
@classmethod
async def create_unix_datagram_socket(
cls, raw_socket: socket.socket, remote_path: str | bytes | None
) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
trio_socket = trio.socket.from_stdlib_socket(raw_socket)
if remote_path:
await trio_socket.connect(remote_path)
return ConnectedUNIXDatagramSocket(trio_socket)
else:
return UNIXDatagramSocket(trio_socket)
@classmethod
async def getaddrinfo(
cls,
host: bytes | str | None,
port: str | int | None,
*,
family: int | AddressFamily = 0,
type: int | SocketKind = 0,
proto: int = 0,
flags: int = 0,
) -> Sequence[
tuple[
AddressFamily,
SocketKind,
int,
str,
tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes],
]
]:
return await trio.socket.getaddrinfo(host, port, family, type, proto, flags)
@classmethod
async def getnameinfo(
cls, sockaddr: IPSockAddrType, flags: int = 0
) -> tuple[str, str]:
return await trio.socket.getnameinfo(sockaddr, flags)
@classmethod
async def wait_readable(cls, obj: FileDescriptorLike) -> None:
try:
await wait_readable(obj)
except trio.ClosedResourceError as exc:
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
except trio.BusyResourceError:
raise BusyResourceError("reading from") from None
@classmethod
async def wait_writable(cls, obj: FileDescriptorLike) -> None:
try:
await wait_writable(obj)
except trio.ClosedResourceError as exc:
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
except trio.BusyResourceError:
raise BusyResourceError("writing to") from None
@classmethod
def notify_closing(cls, obj: FileDescriptorLike) -> None:
notify_closing(obj)
@classmethod
async def wrap_listener_socket(cls, sock: socket.socket) -> abc.SocketListener:
return TCPSocketListener(sock)
@classmethod
async def wrap_stream_socket(cls, sock: socket.socket) -> SocketStream:
trio_sock = trio.socket.from_stdlib_socket(sock)
return SocketStream(trio_sock)
@classmethod
async def wrap_unix_stream_socket(cls, sock: socket.socket) -> UNIXSocketStream:
trio_sock = trio.socket.from_stdlib_socket(sock)
return UNIXSocketStream(trio_sock)
@classmethod
async def wrap_udp_socket(cls, sock: socket.socket) -> UDPSocket:
trio_sock = trio.socket.from_stdlib_socket(sock)
return UDPSocket(trio_sock)
@classmethod
async def wrap_connected_udp_socket(cls, sock: socket.socket) -> ConnectedUDPSocket:
trio_sock = trio.socket.from_stdlib_socket(sock)
return ConnectedUDPSocket(trio_sock)
@classmethod
async def wrap_unix_datagram_socket(cls, sock: socket.socket) -> UNIXDatagramSocket:
trio_sock = trio.socket.from_stdlib_socket(sock)
return UNIXDatagramSocket(trio_sock)
@classmethod
async def wrap_connected_unix_datagram_socket(
cls, sock: socket.socket
) -> ConnectedUNIXDatagramSocket:
trio_sock = trio.socket.from_stdlib_socket(sock)
return ConnectedUNIXDatagramSocket(trio_sock)
@classmethod
def current_default_thread_limiter(cls) -> CapacityLimiter:
try:
return _capacity_limiter_wrapper.get()
except LookupError:
limiter = CapacityLimiter(
original=trio.to_thread.current_default_thread_limiter()
)
_capacity_limiter_wrapper.set(limiter)
return limiter
@classmethod
def open_signal_receiver(
cls, *signals: Signals
) -> AbstractContextManager[AsyncIterator[Signals]]:
return _SignalReceiver(signals)
@classmethod
def get_current_task(cls) -> TaskInfo:
task = current_task()
return TrioTaskInfo(task)
@classmethod
def get_running_tasks(cls) -> Sequence[TaskInfo]:
root_task = current_root_task()
assert root_task
task_infos = [TrioTaskInfo(root_task)]
nurseries = root_task.child_nurseries
while nurseries:
new_nurseries: list[trio.Nursery] = []
for nursery in nurseries:
for task in nursery.child_tasks:
task_infos.append(TrioTaskInfo(task))
new_nurseries.extend(task.child_nurseries)
nurseries = new_nurseries
return task_infos
@classmethod
async def wait_all_tasks_blocked(cls) -> None:
from trio.testing import wait_all_tasks_blocked
await wait_all_tasks_blocked()
@classmethod
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
return TestRunner(**options)
backend_class = TrioBackend
anyio-4.11.0/src/anyio/_core/ 0000775 0000000 0000000 00000000000 15064462627 0015725 5 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/_core/__init__.py 0000664 0000000 0000000 00000000000 15064462627 0020024 0 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/_core/_asyncio_selector_thread.py 0000664 0000000 0000000 00000012772 15064462627 0023343 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import socket
import threading
from collections.abc import Callable
from selectors import EVENT_READ, EVENT_WRITE, DefaultSelector
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from _typeshed import FileDescriptorLike
_selector_lock = threading.Lock()
_selector: Selector | None = None
class Selector:
def __init__(self) -> None:
self._thread = threading.Thread(target=self.run, name="AnyIO socket selector")
self._selector = DefaultSelector()
self._send, self._receive = socket.socketpair()
self._send.setblocking(False)
self._receive.setblocking(False)
# This somewhat reduces the amount of memory wasted queueing up data
# for wakeups. With these settings, maximum number of 1-byte sends
# before getting BlockingIOError:
# Linux 4.8: 6
# macOS (darwin 15.5): 1
# Windows 10: 525347
# Windows you're weird. (And on Windows setting SNDBUF to 0 makes send
# blocking, even on non-blocking sockets, so don't do that.)
self._receive.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
self._send.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
# On Windows this is a TCP socket so this might matter. On other
# platforms this fails b/c AF_UNIX sockets aren't actually TCP.
try:
self._send.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except OSError:
pass
self._selector.register(self._receive, EVENT_READ)
self._closed = False
def start(self) -> None:
self._thread.start()
threading._register_atexit(self._stop) # type: ignore[attr-defined]
def _stop(self) -> None:
global _selector
self._closed = True
self._notify_self()
self._send.close()
self._thread.join()
self._selector.unregister(self._receive)
self._receive.close()
self._selector.close()
_selector = None
assert not self._selector.get_map(), (
"selector still has registered file descriptors after shutdown"
)
def _notify_self(self) -> None:
try:
self._send.send(b"\x00")
except BlockingIOError:
pass
def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
loop = asyncio.get_running_loop()
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)})
else:
if EVENT_READ in key.data:
raise ValueError(
"this file descriptor is already registered for reading"
)
key.data[EVENT_READ] = loop, callback
self._selector.modify(fd, key.events | EVENT_READ, key.data)
self._notify_self()
def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
loop = asyncio.get_running_loop()
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)})
else:
if EVENT_WRITE in key.data:
raise ValueError(
"this file descriptor is already registered for writing"
)
key.data[EVENT_WRITE] = loop, callback
self._selector.modify(fd, key.events | EVENT_WRITE, key.data)
self._notify_self()
def remove_reader(self, fd: FileDescriptorLike) -> bool:
try:
key = self._selector.get_key(fd)
except KeyError:
return False
if new_events := key.events ^ EVENT_READ:
del key.data[EVENT_READ]
self._selector.modify(fd, new_events, key.data)
else:
self._selector.unregister(fd)
return True
def remove_writer(self, fd: FileDescriptorLike) -> bool:
try:
key = self._selector.get_key(fd)
except KeyError:
return False
if new_events := key.events ^ EVENT_WRITE:
del key.data[EVENT_WRITE]
self._selector.modify(fd, new_events, key.data)
else:
self._selector.unregister(fd)
return True
def run(self) -> None:
while not self._closed:
for key, events in self._selector.select():
if key.fileobj is self._receive:
try:
while self._receive.recv(4096):
pass
except BlockingIOError:
pass
continue
if events & EVENT_READ:
loop, callback = key.data[EVENT_READ]
self.remove_reader(key.fd)
try:
loop.call_soon_threadsafe(callback)
except RuntimeError:
pass # the loop was already closed
if events & EVENT_WRITE:
loop, callback = key.data[EVENT_WRITE]
self.remove_writer(key.fd)
try:
loop.call_soon_threadsafe(callback)
except RuntimeError:
pass # the loop was already closed
def get_selector() -> Selector:
global _selector
with _selector_lock:
if _selector is None:
_selector = Selector()
_selector.start()
return _selector
anyio-4.11.0/src/anyio/_core/_contextmanagers.py 0000664 0000000 0000000 00000016057 15064462627 0021651 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from abc import abstractmethod
from contextlib import AbstractAsyncContextManager, AbstractContextManager
from inspect import isasyncgen, iscoroutine, isgenerator
from types import TracebackType
from typing import Protocol, TypeVar, cast, final
_T_co = TypeVar("_T_co", covariant=True)
_ExitT_co = TypeVar("_ExitT_co", covariant=True, bound="bool | None")
class _SupportsCtxMgr(Protocol[_T_co, _ExitT_co]):
def __contextmanager__(self) -> AbstractContextManager[_T_co, _ExitT_co]: ...
class _SupportsAsyncCtxMgr(Protocol[_T_co, _ExitT_co]):
def __asynccontextmanager__(
self,
) -> AbstractAsyncContextManager[_T_co, _ExitT_co]: ...
class ContextManagerMixin:
"""
Mixin class providing context manager functionality via a generator-based
implementation.
This class allows you to implement a context manager via :meth:`__contextmanager__`
which should return a generator. The mechanics are meant to mirror those of
:func:`@contextmanager `.
.. note:: Classes using this mix-in are not reentrant as context managers, meaning
that once you enter it, you can't re-enter before first exiting it.
.. seealso:: :doc:`contextmanagers`
"""
__cm: AbstractContextManager[object, bool | None] | None = None
@final
def __enter__(self: _SupportsCtxMgr[_T_co, bool | None]) -> _T_co:
# Needed for mypy to assume self still has the __cm member
assert isinstance(self, ContextManagerMixin)
if self.__cm is not None:
raise RuntimeError(
f"this {self.__class__.__qualname__} has already been entered"
)
cm = self.__contextmanager__()
if not isinstance(cm, AbstractContextManager):
if isgenerator(cm):
raise TypeError(
"__contextmanager__() returned a generator object instead of "
"a context manager. Did you forget to add the @contextmanager "
"decorator?"
)
raise TypeError(
f"__contextmanager__() did not return a context manager object, "
f"but {cm.__class__!r}"
)
if cm is self:
raise TypeError(
f"{self.__class__.__qualname__}.__contextmanager__() returned "
f"self. Did you forget to add the @contextmanager decorator and a "
f"'yield' statement?"
)
value = cm.__enter__()
self.__cm = cm
return value
@final
def __exit__(
self: _SupportsCtxMgr[object, _ExitT_co],
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> _ExitT_co:
# Needed for mypy to assume self still has the __cm member
assert isinstance(self, ContextManagerMixin)
if self.__cm is None:
raise RuntimeError(
f"this {self.__class__.__qualname__} has not been entered yet"
)
# Prevent circular references
cm = self.__cm
del self.__cm
return cast(_ExitT_co, cm.__exit__(exc_type, exc_val, exc_tb))
@abstractmethod
def __contextmanager__(self) -> AbstractContextManager[object, bool | None]:
"""
Implement your context manager logic here.
This method **must** be decorated with
:func:`@contextmanager `.
.. note:: Remember that the ``yield`` will raise any exception raised in the
enclosed context block, so use a ``finally:`` block to clean up resources!
:return: a context manager object
"""
class AsyncContextManagerMixin:
"""
Mixin class providing async context manager functionality via a generator-based
implementation.
This class allows you to implement a context manager via
:meth:`__asynccontextmanager__`. The mechanics are meant to mirror those of
:func:`@asynccontextmanager `.
.. note:: Classes using this mix-in are not reentrant as context managers, meaning
that once you enter it, you can't re-enter before first exiting it.
.. seealso:: :doc:`contextmanagers`
"""
__cm: AbstractAsyncContextManager[object, bool | None] | None = None
@final
async def __aenter__(self: _SupportsAsyncCtxMgr[_T_co, bool | None]) -> _T_co:
# Needed for mypy to assume self still has the __cm member
assert isinstance(self, AsyncContextManagerMixin)
if self.__cm is not None:
raise RuntimeError(
f"this {self.__class__.__qualname__} has already been entered"
)
cm = self.__asynccontextmanager__()
if not isinstance(cm, AbstractAsyncContextManager):
if isasyncgen(cm):
raise TypeError(
"__asynccontextmanager__() returned an async generator instead of "
"an async context manager. Did you forget to add the "
"@asynccontextmanager decorator?"
)
elif iscoroutine(cm):
cm.close()
raise TypeError(
"__asynccontextmanager__() returned a coroutine object instead of "
"an async context manager. Did you forget to add the "
"@asynccontextmanager decorator and a 'yield' statement?"
)
raise TypeError(
f"__asynccontextmanager__() did not return an async context manager, "
f"but {cm.__class__!r}"
)
if cm is self:
raise TypeError(
f"{self.__class__.__qualname__}.__asynccontextmanager__() returned "
f"self. Did you forget to add the @asynccontextmanager decorator and a "
f"'yield' statement?"
)
value = await cm.__aenter__()
self.__cm = cm
return value
@final
async def __aexit__(
self: _SupportsAsyncCtxMgr[object, _ExitT_co],
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> _ExitT_co:
assert isinstance(self, AsyncContextManagerMixin)
if self.__cm is None:
raise RuntimeError(
f"this {self.__class__.__qualname__} has not been entered yet"
)
# Prevent circular references
cm = self.__cm
del self.__cm
return cast(_ExitT_co, await cm.__aexit__(exc_type, exc_val, exc_tb))
@abstractmethod
def __asynccontextmanager__(
self,
) -> AbstractAsyncContextManager[object, bool | None]:
"""
Implement your async context manager logic here.
This method **must** be decorated with
:func:`@asynccontextmanager `.
.. note:: Remember that the ``yield`` will raise any exception raised in the
enclosed context block, so use a ``finally:`` block to clean up resources!
:return: an async context manager object
"""
anyio-4.11.0/src/anyio/_core/_eventloop.py 0000664 0000000 0000000 00000011073 15064462627 0020453 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import math
import sys
import threading
from collections.abc import Awaitable, Callable, Generator
from contextlib import contextmanager
from importlib import import_module
from typing import TYPE_CHECKING, Any, TypeVar
import sniffio
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
if TYPE_CHECKING:
from ..abc import AsyncBackend
# This must be updated when new backends are introduced
BACKENDS = "asyncio", "trio"
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
threadlocals = threading.local()
loaded_backends: dict[str, type[AsyncBackend]] = {}
def run(
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
*args: Unpack[PosArgsT],
backend: str = "asyncio",
backend_options: dict[str, Any] | None = None,
) -> T_Retval:
"""
Run the given coroutine function in an asynchronous event loop.
The current thread must not be already running an event loop.
:param func: a coroutine function
:param args: positional arguments to ``func``
:param backend: name of the asynchronous event loop implementation – currently
either ``asyncio`` or ``trio``
:param backend_options: keyword arguments to call the backend ``run()``
implementation with (documented :ref:`here `)
:return: the return value of the coroutine function
:raises RuntimeError: if an asynchronous event loop is already running in this
thread
:raises LookupError: if the named backend is not found
"""
try:
asynclib_name = sniffio.current_async_library()
except sniffio.AsyncLibraryNotFoundError:
pass
else:
raise RuntimeError(f"Already running {asynclib_name} in this thread")
try:
async_backend = get_async_backend(backend)
except ImportError as exc:
raise LookupError(f"No such backend: {backend}") from exc
token = None
if sniffio.current_async_library_cvar.get(None) is None:
# Since we're in control of the event loop, we can cache the name of the async
# library
token = sniffio.current_async_library_cvar.set(backend)
try:
backend_options = backend_options or {}
return async_backend.run(func, args, {}, backend_options)
finally:
if token:
sniffio.current_async_library_cvar.reset(token)
async def sleep(delay: float) -> None:
"""
Pause the current task for the specified duration.
:param delay: the duration, in seconds
"""
return await get_async_backend().sleep(delay)
async def sleep_forever() -> None:
"""
Pause the current task until it's cancelled.
This is a shortcut for ``sleep(math.inf)``.
.. versionadded:: 3.1
"""
await sleep(math.inf)
async def sleep_until(deadline: float) -> None:
"""
Pause the current task until the given time.
:param deadline: the absolute time to wake up at (according to the internal
monotonic clock of the event loop)
.. versionadded:: 3.1
"""
now = current_time()
await sleep(max(deadline - now, 0))
def current_time() -> float:
"""
Return the current value of the event loop's internal clock.
:return: the clock value (seconds)
"""
return get_async_backend().current_time()
def get_all_backends() -> tuple[str, ...]:
"""Return a tuple of the names of all built-in backends."""
return BACKENDS
def get_cancelled_exc_class() -> type[BaseException]:
"""Return the current async library's cancellation exception class."""
return get_async_backend().cancelled_exception_class()
#
# Private API
#
@contextmanager
def claim_worker_thread(
backend_class: type[AsyncBackend], token: object
) -> Generator[Any, None, None]:
from ..lowlevel import EventLoopToken
threadlocals.current_token = EventLoopToken(backend_class, token)
try:
yield
finally:
del threadlocals.current_token
def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:
if asynclib_name is None:
asynclib_name = sniffio.current_async_library()
# We use our own dict instead of sys.modules to get the already imported back-end
# class because the appropriate modules in sys.modules could potentially be only
# partially initialized
try:
return loaded_backends[asynclib_name]
except KeyError:
module = import_module(f"anyio._backends._{asynclib_name}")
loaded_backends[asynclib_name] = module.backend_class
return module.backend_class
anyio-4.11.0/src/anyio/_core/_exceptions.py 0000664 0000000 0000000 00000010241 15064462627 0020615 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from collections.abc import Generator
from textwrap import dedent
from typing import Any
if sys.version_info < (3, 11):
from exceptiongroup import BaseExceptionGroup
class BrokenResourceError(Exception):
"""
Raised when trying to use a resource that has been rendered unusable due to external
causes (e.g. a send stream whose peer has disconnected).
"""
class BrokenWorkerProcess(Exception):
"""
Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or
otherwise misbehaves.
"""
class BrokenWorkerInterpreter(Exception):
"""
Raised by :meth:`~anyio.to_interpreter.run_sync` if an unexpected exception is
raised in the subinterpreter.
"""
def __init__(self, excinfo: Any):
# This was adapted from concurrent.futures.interpreter.ExecutionFailed
msg = excinfo.formatted
if not msg:
if excinfo.type and excinfo.msg:
msg = f"{excinfo.type.__name__}: {excinfo.msg}"
else:
msg = excinfo.type.__name__ or excinfo.msg
super().__init__(msg)
self.excinfo = excinfo
def __str__(self) -> str:
try:
formatted = self.excinfo.errdisplay
except Exception:
return super().__str__()
else:
return dedent(
f"""
{super().__str__()}
Uncaught in the interpreter:
{formatted}
""".strip()
)
class BusyResourceError(Exception):
"""
Raised when two tasks are trying to read from or write to the same resource
concurrently.
"""
def __init__(self, action: str):
super().__init__(f"Another task is already {action} this resource")
class ClosedResourceError(Exception):
"""Raised when trying to use a resource that has been closed."""
class ConnectionFailed(OSError):
"""
Raised when a connection attempt fails.
.. note:: This class inherits from :exc:`OSError` for backwards compatibility.
"""
def iterate_exceptions(
exception: BaseException,
) -> Generator[BaseException, None, None]:
if isinstance(exception, BaseExceptionGroup):
for exc in exception.exceptions:
yield from iterate_exceptions(exc)
else:
yield exception
class DelimiterNotFound(Exception):
"""
Raised during
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
maximum number of bytes has been read without the delimiter being found.
"""
def __init__(self, max_bytes: int) -> None:
super().__init__(
f"The delimiter was not found among the first {max_bytes} bytes"
)
class EndOfStream(Exception):
"""
Raised when trying to read from a stream that has been closed from the other end.
"""
class IncompleteRead(Exception):
"""
Raised during
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
connection is closed before the requested amount of bytes has been read.
"""
def __init__(self) -> None:
super().__init__(
"The stream was closed before the read operation could be completed"
)
class TypedAttributeLookupError(LookupError):
"""
Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
is not found and no default value has been given.
"""
class WouldBlock(Exception):
"""Raised by ``X_nowait`` functions if ``X()`` would block."""
class NoEventLoopError(RuntimeError):
"""
Raised by :func:`.from_thread.run` and :func:`.from_thread.run_sync` if
not calling from an AnyIO worker thread, and no ``token`` was passed.
"""
class RunFinishedError(RuntimeError):
"""
Raised by :func:`.from_thread.run` and :func:`.from_thread.run_sync` if the event
loop associated with the explicitly passed token has already finished.
"""
def __init__(self) -> None:
super().__init__(
"The event loop associated with the given token has already finished"
)
anyio-4.11.0/src/anyio/_core/_fileio.py 0000664 0000000 0000000 00000055444 15064462627 0017721 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import pathlib
import sys
from collections.abc import (
AsyncIterator,
Callable,
Iterable,
Iterator,
Sequence,
)
from dataclasses import dataclass
from functools import partial
from os import PathLike
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
ClassVar,
Final,
Generic,
overload,
)
from .. import to_thread
from ..abc import AsyncResource
if TYPE_CHECKING:
from types import ModuleType
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
else:
ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
class AsyncFile(AsyncResource, Generic[AnyStr]):
"""
An asynchronous file object.
This class wraps a standard file object and provides async friendly versions of the
following blocking methods (where available on the original file object):
* read
* read1
* readline
* readlines
* readinto
* readinto1
* write
* writelines
* truncate
* seek
* tell
* flush
All other methods are directly passed through.
This class supports the asynchronous context manager protocol which closes the
underlying file at the end of the context block.
This class also supports asynchronous iteration::
async with await open_file(...) as f:
async for line in f:
print(line)
"""
def __init__(self, fp: IO[AnyStr]) -> None:
self._fp: Any = fp
def __getattr__(self, name: str) -> object:
return getattr(self._fp, name)
@property
def wrapped(self) -> IO[AnyStr]:
"""The wrapped file object."""
return self._fp
async def __aiter__(self) -> AsyncIterator[AnyStr]:
while True:
line = await self.readline()
if line:
yield line
else:
break
async def aclose(self) -> None:
return await to_thread.run_sync(self._fp.close)
async def read(self, size: int = -1) -> AnyStr:
return await to_thread.run_sync(self._fp.read, size)
async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
return await to_thread.run_sync(self._fp.read1, size)
async def readline(self) -> AnyStr:
return await to_thread.run_sync(self._fp.readline)
async def readlines(self) -> list[AnyStr]:
return await to_thread.run_sync(self._fp.readlines)
async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
return await to_thread.run_sync(self._fp.readinto, b)
async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
return await to_thread.run_sync(self._fp.readinto1, b)
@overload
async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ...
@overload
async def write(self: AsyncFile[str], b: str) -> int: ...
async def write(self, b: ReadableBuffer | str) -> int:
return await to_thread.run_sync(self._fp.write, b)
@overload
async def writelines(
self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
) -> None: ...
@overload
async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ...
async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
return await to_thread.run_sync(self._fp.writelines, lines)
async def truncate(self, size: int | None = None) -> int:
return await to_thread.run_sync(self._fp.truncate, size)
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
return await to_thread.run_sync(self._fp.seek, offset, whence)
async def tell(self) -> int:
return await to_thread.run_sync(self._fp.tell)
async def flush(self) -> None:
return await to_thread.run_sync(self._fp.flush)
@overload
async def open_file(
file: str | PathLike[str] | int,
mode: OpenBinaryMode,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
closefd: bool = ...,
opener: Callable[[str, int], int] | None = ...,
) -> AsyncFile[bytes]: ...
@overload
async def open_file(
file: str | PathLike[str] | int,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
closefd: bool = ...,
opener: Callable[[str, int], int] | None = ...,
) -> AsyncFile[str]: ...
async def open_file(
file: str | PathLike[str] | int,
mode: str = "r",
buffering: int = -1,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
closefd: bool = True,
opener: Callable[[str, int], int] | None = None,
) -> AsyncFile[Any]:
"""
Open a file asynchronously.
The arguments are exactly the same as for the builtin :func:`open`.
:return: an asynchronous file object
"""
fp = await to_thread.run_sync(
open, file, mode, buffering, encoding, errors, newline, closefd, opener
)
return AsyncFile(fp)
def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
"""
Wrap an existing file as an asynchronous file.
:param file: an existing file-like object
:return: an asynchronous file object
"""
return AsyncFile(file)
@dataclass(eq=False)
class _PathIterator(AsyncIterator["Path"]):
iterator: Iterator[PathLike[str]]
async def __anext__(self) -> Path:
nextval = await to_thread.run_sync(
next, self.iterator, None, abandon_on_cancel=True
)
if nextval is None:
raise StopAsyncIteration from None
return Path(nextval)
class Path:
"""
An asynchronous version of :class:`pathlib.Path`.
This class cannot be substituted for :class:`pathlib.Path` or
:class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
interface.
It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
the deprecated :meth:`~pathlib.Path.link_to` method.
Some methods may be unavailable or have limited functionality, based on the Python
version:
* :meth:`~pathlib.Path.copy` (available on Python 3.14 or later)
* :meth:`~pathlib.Path.copy_into` (available on Python 3.14 or later)
* :meth:`~pathlib.Path.from_uri` (available on Python 3.13 or later)
* :meth:`~pathlib.PurePath.full_match` (available on Python 3.13 or later)
* :attr:`~pathlib.Path.info` (available on Python 3.14 or later)
* :meth:`~pathlib.Path.is_junction` (available on Python 3.12 or later)
* :meth:`~pathlib.PurePath.match` (the ``case_sensitive`` parameter is only
available on Python 3.13 or later)
* :meth:`~pathlib.Path.move` (available on Python 3.14 or later)
* :meth:`~pathlib.Path.move_into` (available on Python 3.14 or later)
* :meth:`~pathlib.PurePath.relative_to` (the ``walk_up`` parameter is only available
on Python 3.12 or later)
* :meth:`~pathlib.Path.walk` (available on Python 3.12 or later)
Any methods that do disk I/O need to be awaited on. These methods are:
* :meth:`~pathlib.Path.absolute`
* :meth:`~pathlib.Path.chmod`
* :meth:`~pathlib.Path.cwd`
* :meth:`~pathlib.Path.exists`
* :meth:`~pathlib.Path.expanduser`
* :meth:`~pathlib.Path.group`
* :meth:`~pathlib.Path.hardlink_to`
* :meth:`~pathlib.Path.home`
* :meth:`~pathlib.Path.is_block_device`
* :meth:`~pathlib.Path.is_char_device`
* :meth:`~pathlib.Path.is_dir`
* :meth:`~pathlib.Path.is_fifo`
* :meth:`~pathlib.Path.is_file`
* :meth:`~pathlib.Path.is_junction`
* :meth:`~pathlib.Path.is_mount`
* :meth:`~pathlib.Path.is_socket`
* :meth:`~pathlib.Path.is_symlink`
* :meth:`~pathlib.Path.lchmod`
* :meth:`~pathlib.Path.lstat`
* :meth:`~pathlib.Path.mkdir`
* :meth:`~pathlib.Path.open`
* :meth:`~pathlib.Path.owner`
* :meth:`~pathlib.Path.read_bytes`
* :meth:`~pathlib.Path.read_text`
* :meth:`~pathlib.Path.readlink`
* :meth:`~pathlib.Path.rename`
* :meth:`~pathlib.Path.replace`
* :meth:`~pathlib.Path.resolve`
* :meth:`~pathlib.Path.rmdir`
* :meth:`~pathlib.Path.samefile`
* :meth:`~pathlib.Path.stat`
* :meth:`~pathlib.Path.symlink_to`
* :meth:`~pathlib.Path.touch`
* :meth:`~pathlib.Path.unlink`
* :meth:`~pathlib.Path.walk`
* :meth:`~pathlib.Path.write_bytes`
* :meth:`~pathlib.Path.write_text`
Additionally, the following methods return an async iterator yielding
:class:`~.Path` objects:
* :meth:`~pathlib.Path.glob`
* :meth:`~pathlib.Path.iterdir`
* :meth:`~pathlib.Path.rglob`
"""
__slots__ = "_path", "__weakref__"
__weakref__: Any
def __init__(self, *args: str | PathLike[str]) -> None:
self._path: Final[pathlib.Path] = pathlib.Path(*args)
def __fspath__(self) -> str:
return self._path.__fspath__()
def __str__(self) -> str:
return self._path.__str__()
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.as_posix()!r})"
def __bytes__(self) -> bytes:
return self._path.__bytes__()
def __hash__(self) -> int:
return self._path.__hash__()
def __eq__(self, other: object) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__eq__(target)
def __lt__(self, other: pathlib.PurePath | Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__lt__(target)
def __le__(self, other: pathlib.PurePath | Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__le__(target)
def __gt__(self, other: pathlib.PurePath | Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__gt__(target)
def __ge__(self, other: pathlib.PurePath | Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__ge__(target)
def __truediv__(self, other: str | PathLike[str]) -> Path:
return Path(self._path / other)
def __rtruediv__(self, other: str | PathLike[str]) -> Path:
return Path(other) / self
@property
def parts(self) -> tuple[str, ...]:
return self._path.parts
@property
def drive(self) -> str:
return self._path.drive
@property
def root(self) -> str:
return self._path.root
@property
def anchor(self) -> str:
return self._path.anchor
@property
def parents(self) -> Sequence[Path]:
return tuple(Path(p) for p in self._path.parents)
@property
def parent(self) -> Path:
return Path(self._path.parent)
@property
def name(self) -> str:
return self._path.name
@property
def suffix(self) -> str:
return self._path.suffix
@property
def suffixes(self) -> list[str]:
return self._path.suffixes
@property
def stem(self) -> str:
return self._path.stem
async def absolute(self) -> Path:
path = await to_thread.run_sync(self._path.absolute)
return Path(path)
def as_posix(self) -> str:
return self._path.as_posix()
def as_uri(self) -> str:
return self._path.as_uri()
if sys.version_info >= (3, 13):
parser: ClassVar[ModuleType] = pathlib.Path.parser
@classmethod
def from_uri(cls, uri: str) -> Path:
return Path(pathlib.Path.from_uri(uri))
def full_match(
self, path_pattern: str, *, case_sensitive: bool | None = None
) -> bool:
return self._path.full_match(path_pattern, case_sensitive=case_sensitive)
def match(
self, path_pattern: str, *, case_sensitive: bool | None = None
) -> bool:
return self._path.match(path_pattern, case_sensitive=case_sensitive)
else:
def match(self, path_pattern: str) -> bool:
return self._path.match(path_pattern)
if sys.version_info >= (3, 14):
@property
def info(self) -> Any: # TODO: add return type annotation when Typeshed gets it
return self._path.info
async def copy(
self,
target: str | os.PathLike[str],
*,
follow_symlinks: bool = True,
preserve_metadata: bool = False,
) -> Path:
func = partial(
self._path.copy,
follow_symlinks=follow_symlinks,
preserve_metadata=preserve_metadata,
)
return Path(await to_thread.run_sync(func, pathlib.Path(target)))
async def copy_into(
self,
target_dir: str | os.PathLike[str],
*,
follow_symlinks: bool = True,
preserve_metadata: bool = False,
) -> Path:
func = partial(
self._path.copy_into,
follow_symlinks=follow_symlinks,
preserve_metadata=preserve_metadata,
)
return Path(await to_thread.run_sync(func, pathlib.Path(target_dir)))
async def move(self, target: str | os.PathLike[str]) -> Path:
# Upstream does not handle anyio.Path properly as a PathLike
target = pathlib.Path(target)
return Path(await to_thread.run_sync(self._path.move, target))
async def move_into(
self,
target_dir: str | os.PathLike[str],
) -> Path:
return Path(await to_thread.run_sync(self._path.move_into, target_dir))
def is_relative_to(self, other: str | PathLike[str]) -> bool:
try:
self.relative_to(other)
return True
except ValueError:
return False
async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
func = partial(os.chmod, follow_symlinks=follow_symlinks)
return await to_thread.run_sync(func, self._path, mode)
@classmethod
async def cwd(cls) -> Path:
path = await to_thread.run_sync(pathlib.Path.cwd)
return cls(path)
async def exists(self) -> bool:
return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
async def expanduser(self) -> Path:
return Path(
await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
)
def glob(self, pattern: str) -> AsyncIterator[Path]:
gen = self._path.glob(pattern)
return _PathIterator(gen)
async def group(self) -> str:
return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
async def hardlink_to(
self, target: str | bytes | PathLike[str] | PathLike[bytes]
) -> None:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(os.link, target, self)
@classmethod
async def home(cls) -> Path:
home_path = await to_thread.run_sync(pathlib.Path.home)
return cls(home_path)
def is_absolute(self) -> bool:
return self._path.is_absolute()
async def is_block_device(self) -> bool:
return await to_thread.run_sync(
self._path.is_block_device, abandon_on_cancel=True
)
async def is_char_device(self) -> bool:
return await to_thread.run_sync(
self._path.is_char_device, abandon_on_cancel=True
)
async def is_dir(self) -> bool:
return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
async def is_fifo(self) -> bool:
return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
async def is_file(self) -> bool:
return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
if sys.version_info >= (3, 12):
async def is_junction(self) -> bool:
return await to_thread.run_sync(self._path.is_junction)
async def is_mount(self) -> bool:
return await to_thread.run_sync(
os.path.ismount, self._path, abandon_on_cancel=True
)
def is_reserved(self) -> bool:
return self._path.is_reserved()
async def is_socket(self) -> bool:
return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
async def is_symlink(self) -> bool:
return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
async def iterdir(self) -> AsyncIterator[Path]:
gen = (
self._path.iterdir()
if sys.version_info < (3, 13)
else await to_thread.run_sync(self._path.iterdir, abandon_on_cancel=True)
)
async for path in _PathIterator(gen):
yield path
def joinpath(self, *args: str | PathLike[str]) -> Path:
return Path(self._path.joinpath(*args))
async def lchmod(self, mode: int) -> None:
await to_thread.run_sync(self._path.lchmod, mode)
async def lstat(self) -> os.stat_result:
return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
async def mkdir(
self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
) -> None:
await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
@overload
async def open(
self,
mode: OpenBinaryMode,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> AsyncFile[bytes]: ...
@overload
async def open(
self,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> AsyncFile[str]: ...
async def open(
self,
mode: str = "r",
buffering: int = -1,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> AsyncFile[Any]:
fp = await to_thread.run_sync(
self._path.open, mode, buffering, encoding, errors, newline
)
return AsyncFile(fp)
async def owner(self) -> str:
return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
async def read_bytes(self) -> bytes:
return await to_thread.run_sync(self._path.read_bytes)
async def read_text(
self, encoding: str | None = None, errors: str | None = None
) -> str:
return await to_thread.run_sync(self._path.read_text, encoding, errors)
if sys.version_info >= (3, 12):
def relative_to(
self, *other: str | PathLike[str], walk_up: bool = False
) -> Path:
# relative_to() should work with any PathLike but it doesn't
others = [pathlib.Path(other) for other in other]
return Path(self._path.relative_to(*others, walk_up=walk_up))
else:
def relative_to(self, *other: str | PathLike[str]) -> Path:
return Path(self._path.relative_to(*other))
async def readlink(self) -> Path:
target = await to_thread.run_sync(os.readlink, self._path)
return Path(target)
async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(self._path.rename, target)
return Path(target)
async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(self._path.replace, target)
return Path(target)
async def resolve(self, strict: bool = False) -> Path:
func = partial(self._path.resolve, strict=strict)
return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
def rglob(self, pattern: str) -> AsyncIterator[Path]:
gen = self._path.rglob(pattern)
return _PathIterator(gen)
async def rmdir(self) -> None:
await to_thread.run_sync(self._path.rmdir)
async def samefile(self, other_path: str | PathLike[str]) -> bool:
if isinstance(other_path, Path):
other_path = other_path._path
return await to_thread.run_sync(
self._path.samefile, other_path, abandon_on_cancel=True
)
async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
func = partial(os.stat, follow_symlinks=follow_symlinks)
return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
async def symlink_to(
self,
target: str | bytes | PathLike[str] | PathLike[bytes],
target_is_directory: bool = False,
) -> None:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
await to_thread.run_sync(self._path.touch, mode, exist_ok)
async def unlink(self, missing_ok: bool = False) -> None:
try:
await to_thread.run_sync(self._path.unlink)
except FileNotFoundError:
if not missing_ok:
raise
if sys.version_info >= (3, 12):
async def walk(
self,
top_down: bool = True,
on_error: Callable[[OSError], object] | None = None,
follow_symlinks: bool = False,
) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
try:
return next(gen)
except StopIteration:
return None
gen = self._path.walk(top_down, on_error, follow_symlinks)
while True:
value = await to_thread.run_sync(get_next_value)
if value is None:
return
root, dirs, paths = value
yield Path(root), dirs, paths
def with_name(self, name: str) -> Path:
return Path(self._path.with_name(name))
def with_stem(self, stem: str) -> Path:
return Path(self._path.with_name(stem + self._path.suffix))
def with_suffix(self, suffix: str) -> Path:
return Path(self._path.with_suffix(suffix))
def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
return Path(*pathsegments)
async def write_bytes(self, data: bytes) -> int:
return await to_thread.run_sync(self._path.write_bytes, data)
async def write_text(
self,
data: str,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> int:
# Path.write_text() does not support the "newline" parameter before Python 3.10
def sync_write_text() -> int:
with self._path.open(
"w", encoding=encoding, errors=errors, newline=newline
) as fp:
return fp.write(data)
return await to_thread.run_sync(sync_write_text)
PathLike.register(Path)
anyio-4.11.0/src/anyio/_core/_resources.py 0000664 0000000 0000000 00000000663 15064462627 0020455 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from ..abc import AsyncResource
from ._tasks import CancelScope
async def aclose_forcefully(resource: AsyncResource) -> None:
"""
Close an asynchronous resource in a cancelled scope.
Doing this closes the resource without waiting on anything.
:param resource: the resource to close
"""
with CancelScope() as scope:
scope.cancel()
await resource.aclose()
anyio-4.11.0/src/anyio/_core/_signals.py 0000664 0000000 0000000 00000001611 15064462627 0020075 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from collections.abc import AsyncIterator
from contextlib import AbstractContextManager
from signal import Signals
from ._eventloop import get_async_backend
def open_signal_receiver(
*signals: Signals,
) -> AbstractContextManager[AsyncIterator[Signals]]:
"""
Start receiving operating system signals.
:param signals: signals to receive (e.g. ``signal.SIGINT``)
:return: an asynchronous context manager for an asynchronous iterator which yields
signal numbers
.. warning:: Windows does not support signals natively so it is best to avoid
relying on this in cross-platform applications.
.. warning:: On asyncio, this permanently replaces any previous signal handler for
the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
"""
return get_async_backend().open_signal_receiver(*signals)
anyio-4.11.0/src/anyio/_core/_sockets.py 0000664 0000000 0000000 00000103007 15064462627 0020112 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import errno
import os
import socket
import ssl
import stat
import sys
from collections.abc import Awaitable
from dataclasses import dataclass
from ipaddress import IPv4Address, IPv6Address, ip_address
from os import PathLike, chmod
from socket import AddressFamily, SocketKind
from typing import TYPE_CHECKING, Any, Literal, cast, overload
from .. import ConnectionFailed, to_thread
from ..abc import (
ByteStreamConnectable,
ConnectedUDPSocket,
ConnectedUNIXDatagramSocket,
IPAddressType,
IPSockAddrType,
SocketListener,
SocketStream,
UDPSocket,
UNIXDatagramSocket,
UNIXSocketStream,
)
from ..streams.stapled import MultiListener
from ..streams.tls import TLSConnectable, TLSStream
from ._eventloop import get_async_backend
from ._resources import aclose_forcefully
from ._synchronization import Event
from ._tasks import create_task_group, move_on_after
if TYPE_CHECKING:
from _typeshed import FileDescriptorLike
else:
FileDescriptorLike = object
if sys.version_info < (3, 11):
from exceptiongroup import ExceptionGroup
if sys.version_info >= (3, 12):
from typing import override
else:
from typing_extensions import override
if sys.version_info < (3, 13):
from typing_extensions import deprecated
else:
from warnings import deprecated
IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
AnyIPAddressFamily = Literal[
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
]
IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
# tls_hostname given
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
ssl_context: ssl.SSLContext | None = ...,
tls_standard_compatible: bool = ...,
tls_hostname: str,
happy_eyeballs_delay: float = ...,
) -> TLSStream: ...
# ssl_context given
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
ssl_context: ssl.SSLContext,
tls_standard_compatible: bool = ...,
tls_hostname: str | None = ...,
happy_eyeballs_delay: float = ...,
) -> TLSStream: ...
# tls=True
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
tls: Literal[True],
ssl_context: ssl.SSLContext | None = ...,
tls_standard_compatible: bool = ...,
tls_hostname: str | None = ...,
happy_eyeballs_delay: float = ...,
) -> TLSStream: ...
# tls=False
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
tls: Literal[False],
ssl_context: ssl.SSLContext | None = ...,
tls_standard_compatible: bool = ...,
tls_hostname: str | None = ...,
happy_eyeballs_delay: float = ...,
) -> SocketStream: ...
# No TLS arguments
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
happy_eyeballs_delay: float = ...,
) -> SocketStream: ...
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = None,
tls: bool = False,
ssl_context: ssl.SSLContext | None = None,
tls_standard_compatible: bool = True,
tls_hostname: str | None = None,
happy_eyeballs_delay: float = 0.25,
) -> SocketStream | TLSStream:
"""
Connect to a host using the TCP protocol.
This function implements the stateless version of the Happy Eyeballs algorithm (RFC
6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
each one is tried until one connection attempt succeeds. If the first attempt does
not connected within 250 milliseconds, a second attempt is started using the next
address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
available) is tried first.
When the connection has been established, a TLS handshake will be done if either
``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
:param remote_host: the IP address or host name to connect to
:param remote_port: port on the target host to connect to
:param local_host: the interface address or name to bind the socket to before
connecting
:param tls: ``True`` to do a TLS handshake with the connected stream and return a
:class:`~anyio.streams.tls.TLSStream` instead
:param ssl_context: the SSL context object to use (if omitted, a default context is
created)
:param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake
before closing the stream and requires that the server does this as well.
Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
Some protocols, such as HTTP, require this option to be ``False``.
See :meth:`~ssl.SSLContext.wrap_socket` for details.
:param tls_hostname: host name to check the server certificate against (defaults to
the value of ``remote_host``)
:param happy_eyeballs_delay: delay (in seconds) before starting the next connection
attempt
:return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
:raises ConnectionFailed: if the connection fails
"""
# Placed here due to https://github.com/python/mypy/issues/7057
connected_stream: SocketStream | None = None
async def try_connect(remote_host: str, event: Event) -> None:
nonlocal connected_stream
try:
stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
except OSError as exc:
oserrors.append(exc)
return
else:
if connected_stream is None:
connected_stream = stream
tg.cancel_scope.cancel()
else:
await stream.aclose()
finally:
event.set()
asynclib = get_async_backend()
local_address: IPSockAddrType | None = None
family = socket.AF_UNSPEC
if local_host:
gai_res = await getaddrinfo(str(local_host), None)
family, *_, local_address = gai_res[0]
target_host = str(remote_host)
try:
addr_obj = ip_address(remote_host)
except ValueError:
addr_obj = None
if addr_obj is not None:
if isinstance(addr_obj, IPv6Address):
target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
else:
target_addrs = [(socket.AF_INET, addr_obj.compressed)]
else:
# getaddrinfo() will raise an exception if name resolution fails
gai_res = await getaddrinfo(
target_host, remote_port, family=family, type=socket.SOCK_STREAM
)
# Organize the list so that the first address is an IPv6 address (if available)
# and the second one is an IPv4 addresses. The rest can be in whatever order.
v6_found = v4_found = False
target_addrs = []
for af, *_, sa in gai_res:
if af == socket.AF_INET6 and not v6_found:
v6_found = True
target_addrs.insert(0, (af, sa[0]))
elif af == socket.AF_INET and not v4_found and v6_found:
v4_found = True
target_addrs.insert(1, (af, sa[0]))
else:
target_addrs.append((af, sa[0]))
oserrors: list[OSError] = []
try:
async with create_task_group() as tg:
for _af, addr in target_addrs:
event = Event()
tg.start_soon(try_connect, addr, event)
with move_on_after(happy_eyeballs_delay):
await event.wait()
if connected_stream is None:
cause = (
oserrors[0]
if len(oserrors) == 1
else ExceptionGroup("multiple connection attempts failed", oserrors)
)
raise OSError("All connection attempts failed") from cause
finally:
oserrors.clear()
if tls or tls_hostname or ssl_context:
try:
return await TLSStream.wrap(
connected_stream,
server_side=False,
hostname=tls_hostname or str(remote_host),
ssl_context=ssl_context,
standard_compatible=tls_standard_compatible,
)
except BaseException:
await aclose_forcefully(connected_stream)
raise
return connected_stream
async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream:
"""
Connect to the given UNIX socket.
Not available on Windows.
:param path: path to the socket
:return: a socket stream object
:raises ConnectionFailed: if the connection fails
"""
path = os.fspath(path)
return await get_async_backend().connect_unix(path)
async def create_tcp_listener(
*,
local_host: IPAddressType | None = None,
local_port: int = 0,
family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
backlog: int = 65536,
reuse_port: bool = False,
) -> MultiListener[SocketStream]:
"""
Create a TCP socket listener.
:param local_port: port number to listen on
:param local_host: IP address of the interface to listen on. If omitted, listen on
all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
:param family: address family (used if ``local_host`` was omitted)
:param backlog: maximum number of queued incoming connections (up to a maximum of
2**16, or 65536)
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
address/port (not supported on Windows)
:return: a multi-listener object containing one or more socket listeners
:raises OSError: if there's an error creating a socket, or binding to one or more
interfaces failed
"""
asynclib = get_async_backend()
backlog = min(backlog, 65536)
local_host = str(local_host) if local_host is not None else None
def setup_raw_socket(
fam: AddressFamily,
bind_addr: tuple[str, int] | tuple[str, int, int, int],
*,
v6only: bool = True,
) -> socket.socket:
sock = socket.socket(fam)
try:
sock.setblocking(False)
if fam == AddressFamily.AF_INET6:
sock.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, v6only)
# For Windows, enable exclusive address use. For others, enable address
# reuse.
if sys.platform == "win32":
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Workaround for #554
if fam == socket.AF_INET6 and "%" in bind_addr[0]:
addr, scope_id = bind_addr[0].split("%", 1)
bind_addr = (addr, bind_addr[1], 0, int(scope_id))
sock.bind(bind_addr)
sock.listen(backlog)
except BaseException:
sock.close()
raise
return sock
# We passing type=0 on non-Windows platforms as a workaround for a uvloop bug
# where we don't get the correct scope ID for IPv6 link-local addresses when passing
# type=socket.SOCK_STREAM to getaddrinfo():
# https://github.com/MagicStack/uvloop/issues/539
gai_res = await getaddrinfo(
local_host,
local_port,
family=family,
type=socket.SOCK_STREAM if sys.platform == "win32" else 0,
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
)
# The set comprehension is here to work around a glibc bug:
# https://sourceware.org/bugzilla/show_bug.cgi?id=14969
sockaddrs = sorted({res for res in gai_res if res[1] == SocketKind.SOCK_STREAM})
# Special case for dual-stack binding on the "any" interface
if (
local_host is None
and family == AddressFamily.AF_UNSPEC
and socket.has_dualstack_ipv6()
and any(fam == AddressFamily.AF_INET6 for fam, *_ in gai_res)
):
raw_socket = setup_raw_socket(
AddressFamily.AF_INET6, ("::", local_port), v6only=False
)
listener = asynclib.create_tcp_listener(raw_socket)
return MultiListener([listener])
errors: list[OSError] = []
try:
for _ in range(len(sockaddrs)):
listeners: list[SocketListener] = []
bound_ephemeral_port = local_port
try:
for fam, *_, sockaddr in sockaddrs:
sockaddr = sockaddr[0], bound_ephemeral_port, *sockaddr[2:]
raw_socket = setup_raw_socket(fam, sockaddr)
# Store the assigned port if an ephemeral port was requested, so
# we'll bind to the same port on all interfaces
if local_port == 0 and len(gai_res) > 1:
bound_ephemeral_port = raw_socket.getsockname()[1]
listeners.append(asynclib.create_tcp_listener(raw_socket))
except BaseException as exc:
for listener in listeners:
await listener.aclose()
# If an ephemeral port was requested but binding the assigned port
# failed for another interface, rotate the address list and try again
if (
isinstance(exc, OSError)
and exc.errno == errno.EADDRINUSE
and local_port == 0
and bound_ephemeral_port
):
errors.append(exc)
sockaddrs.append(sockaddrs.pop(0))
continue
raise
return MultiListener(listeners)
raise OSError(
f"Could not create {len(sockaddrs)} listeners with a consistent port"
) from ExceptionGroup("Several bind attempts failed", errors)
finally:
del errors # Prevent reference cycles
async def create_unix_listener(
path: str | bytes | PathLike[Any],
*,
mode: int | None = None,
backlog: int = 65536,
) -> SocketListener:
"""
Create a UNIX socket listener.
Not available on Windows.
:param path: path of the socket
:param mode: permissions to set on the socket
:param backlog: maximum number of queued incoming connections (up to a maximum of
2**16, or 65536)
:return: a listener object
.. versionchanged:: 3.0
If a socket already exists on the file system in the given path, it will be
removed first.
"""
backlog = min(backlog, 65536)
raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM)
try:
raw_socket.listen(backlog)
return get_async_backend().create_unix_listener(raw_socket)
except BaseException:
raw_socket.close()
raise
async def create_udp_socket(
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
*,
local_host: IPAddressType | None = None,
local_port: int = 0,
reuse_port: bool = False,
) -> UDPSocket:
"""
Create a UDP socket.
If ``port`` has been given, the socket will be bound to this port on the local
machine, making this socket suitable for providing UDP based services.
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
determined from ``local_host`` if omitted
:param local_host: IP address or host name of the local interface to bind to
:param local_port: local port to bind to
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
address/port (not supported on Windows)
:return: a UDP socket
"""
if family is AddressFamily.AF_UNSPEC and not local_host:
raise ValueError('Either "family" or "local_host" must be given')
if local_host:
gai_res = await getaddrinfo(
str(local_host),
local_port,
family=family,
type=socket.SOCK_DGRAM,
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
)
family = cast(AnyIPAddressFamily, gai_res[0][0])
local_address = gai_res[0][-1]
elif family is AddressFamily.AF_INET6:
local_address = ("::", 0)
else:
local_address = ("0.0.0.0", 0)
sock = await get_async_backend().create_udp_socket(
family, local_address, None, reuse_port
)
return cast(UDPSocket, sock)
async def create_connected_udp_socket(
remote_host: IPAddressType,
remote_port: int,
*,
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
local_host: IPAddressType | None = None,
local_port: int = 0,
reuse_port: bool = False,
) -> ConnectedUDPSocket:
"""
Create a connected UDP socket.
Connected UDP sockets can only communicate with the specified remote host/port, an
any packets sent from other sources are dropped.
:param remote_host: remote host to set as the default target
:param remote_port: port on the remote host to set as the default target
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
determined from ``local_host`` or ``remote_host`` if omitted
:param local_host: IP address or host name of the local interface to bind to
:param local_port: local port to bind to
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
address/port (not supported on Windows)
:return: a connected UDP socket
"""
local_address = None
if local_host:
gai_res = await getaddrinfo(
str(local_host),
local_port,
family=family,
type=socket.SOCK_DGRAM,
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
)
family = cast(AnyIPAddressFamily, gai_res[0][0])
local_address = gai_res[0][-1]
gai_res = await getaddrinfo(
str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
)
family = cast(AnyIPAddressFamily, gai_res[0][0])
remote_address = gai_res[0][-1]
sock = await get_async_backend().create_udp_socket(
family, local_address, remote_address, reuse_port
)
return cast(ConnectedUDPSocket, sock)
async def create_unix_datagram_socket(
*,
local_path: None | str | bytes | PathLike[Any] = None,
local_mode: int | None = None,
) -> UNIXDatagramSocket:
"""
Create a UNIX datagram socket.
Not available on Windows.
If ``local_path`` has been given, the socket will be bound to this path, making this
socket suitable for receiving datagrams from other processes. Other processes can
send datagrams to this socket only if ``local_path`` is set.
If a socket already exists on the file system in the ``local_path``, it will be
removed first.
:param local_path: the path on which to bind to
:param local_mode: permissions to set on the local socket
:return: a UNIX datagram socket
"""
raw_socket = await setup_unix_local_socket(
local_path, local_mode, socket.SOCK_DGRAM
)
return await get_async_backend().create_unix_datagram_socket(raw_socket, None)
async def create_connected_unix_datagram_socket(
remote_path: str | bytes | PathLike[Any],
*,
local_path: None | str | bytes | PathLike[Any] = None,
local_mode: int | None = None,
) -> ConnectedUNIXDatagramSocket:
"""
Create a connected UNIX datagram socket.
Connected datagram sockets can only communicate with the specified remote path.
If ``local_path`` has been given, the socket will be bound to this path, making
this socket suitable for receiving datagrams from other processes. Other processes
can send datagrams to this socket only if ``local_path`` is set.
If a socket already exists on the file system in the ``local_path``, it will be
removed first.
:param remote_path: the path to set as the default target
:param local_path: the path on which to bind to
:param local_mode: permissions to set on the local socket
:return: a connected UNIX datagram socket
"""
remote_path = os.fspath(remote_path)
raw_socket = await setup_unix_local_socket(
local_path, local_mode, socket.SOCK_DGRAM
)
return await get_async_backend().create_unix_datagram_socket(
raw_socket, remote_path
)
async def getaddrinfo(
host: bytes | str | None,
port: str | int | None,
*,
family: int | AddressFamily = 0,
type: int | SocketKind = 0,
proto: int = 0,
flags: int = 0,
) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]:
"""
Look up a numeric IP address given a host name.
Internationalized domain names are translated according to the (non-transitional)
IDNA 2008 standard.
.. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
(host, port), unlike what :func:`socket.getaddrinfo` does.
:param host: host name
:param port: port number
:param family: socket family (`'AF_INET``, ...)
:param type: socket type (``SOCK_STREAM``, ...)
:param proto: protocol number
:param flags: flags to pass to upstream ``getaddrinfo()``
:return: list of tuples containing (family, type, proto, canonname, sockaddr)
.. seealso:: :func:`socket.getaddrinfo`
"""
# Handle unicode hostnames
if isinstance(host, str):
try:
encoded_host: bytes | None = host.encode("ascii")
except UnicodeEncodeError:
import idna
encoded_host = idna.encode(host, uts46=True)
else:
encoded_host = host
gai_res = await get_async_backend().getaddrinfo(
encoded_host, port, family=family, type=type, proto=proto, flags=flags
)
return [
(family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
for family, type, proto, canonname, sockaddr in gai_res
# filter out IPv6 results when IPv6 is disabled
if not isinstance(sockaddr[0], int)
]
def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
"""
Look up the host name of an IP address.
:param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
:param flags: flags to pass to upstream ``getnameinfo()``
:return: a tuple of (host name, service name)
.. seealso:: :func:`socket.getnameinfo`
"""
return get_async_backend().getnameinfo(sockaddr, flags)
@deprecated("This function is deprecated; use `wait_readable` instead")
def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
"""
.. deprecated:: 4.7.0
Use :func:`wait_readable` instead.
Wait until the given socket has data to be read.
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
level constructs like socket streams!
:param sock: a socket object
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
socket to become readable
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
to become readable
"""
return get_async_backend().wait_readable(sock.fileno())
@deprecated("This function is deprecated; use `wait_writable` instead")
def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
"""
.. deprecated:: 4.7.0
Use :func:`wait_writable` instead.
Wait until the given socket can be written to.
This does **NOT** work on Windows when using the asyncio backend with a proactor
event loop (default on py3.8+).
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
level constructs like socket streams!
:param sock: a socket object
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
socket to become writable
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
to become writable
"""
return get_async_backend().wait_writable(sock.fileno())
def wait_readable(obj: FileDescriptorLike) -> Awaitable[None]:
"""
Wait until the given object has data to be read.
On Unix systems, ``obj`` must either be an integer file descriptor, or else an
object with a ``.fileno()`` method which returns an integer file descriptor. Any
kind of file descriptor can be passed, though the exact semantics will depend on
your kernel. For example, this probably won't do anything useful for on-disk files.
On Windows systems, ``obj`` must either be an integer ``SOCKET`` handle, or else an
object with a ``.fileno()`` method which returns an integer ``SOCKET`` handle. File
descriptors aren't supported, and neither are handles that refer to anything besides
a ``SOCKET``.
On backends where this functionality is not natively provided (asyncio
``ProactorEventLoop`` on Windows), it is provided using a separate selector thread
which is set to shut down when the interpreter shuts down.
.. warning:: Don't use this on raw sockets that have been wrapped by any higher
level constructs like socket streams!
:param obj: an object with a ``.fileno()`` method or an integer handle
:raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
object to become readable
:raises ~anyio.BusyResourceError: if another task is already waiting for the object
to become readable
"""
return get_async_backend().wait_readable(obj)
def wait_writable(obj: FileDescriptorLike) -> Awaitable[None]:
"""
Wait until the given object can be written to.
:param obj: an object with a ``.fileno()`` method or an integer handle
:raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
object to become writable
:raises ~anyio.BusyResourceError: if another task is already waiting for the object
to become writable
.. seealso:: See the documentation of :func:`wait_readable` for the definition of
``obj`` and notes on backend compatibility.
.. warning:: Don't use this on raw sockets that have been wrapped by any higher
level constructs like socket streams!
"""
return get_async_backend().wait_writable(obj)
def notify_closing(obj: FileDescriptorLike) -> None:
"""
Call this before closing a file descriptor (on Unix) or socket (on
Windows). This will cause any `wait_readable` or `wait_writable`
calls on the given object to immediately wake up and raise
`~anyio.ClosedResourceError`.
This doesn't actually close the object – you still have to do that
yourself afterwards. Also, you want to be careful to make sure no
new tasks start waiting on the object in between when you call this
and when it's actually closed. So to close something properly, you
usually want to do these steps in order:
1. Explicitly mark the object as closed, so that any new attempts
to use it will abort before they start.
2. Call `notify_closing` to wake up any already-existing users.
3. Actually close the object.
It's also possible to do them in a different order if that's more
convenient, *but only if* you make sure not to have any checkpoints in
between the steps. This way they all happen in a single atomic
step, so other tasks won't be able to tell what order they happened
in anyway.
:param obj: an object with a ``.fileno()`` method or an integer handle
"""
get_async_backend().notify_closing(obj)
#
# Private API
#
def convert_ipv6_sockaddr(
sockaddr: tuple[str, int, int, int] | tuple[str, int],
) -> tuple[str, int]:
"""
Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
If the scope ID is nonzero, it is added to the address, separated with ``%``.
Otherwise the flow id and scope id are simply cut off from the tuple.
Any other kinds of socket addresses are returned as-is.
:param sockaddr: the result of :meth:`~socket.socket.getsockname`
:return: the converted socket address
"""
# This is more complicated than it should be because of MyPy
if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
host, port, flowinfo, scope_id = sockaddr
if scope_id:
# PyPy (as of v7.3.11) leaves the interface name in the result, so
# we discard it and only get the scope ID from the end
# (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
host = host.split("%")[0]
# Add scope_id to the address
return f"{host}%{scope_id}", port
else:
return host, port
else:
return sockaddr
async def setup_unix_local_socket(
path: None | str | bytes | PathLike[Any],
mode: int | None,
socktype: int,
) -> socket.socket:
"""
Create a UNIX local socket object, deleting the socket at the given path if it
exists.
Not available on Windows.
:param path: path of the socket
:param mode: permissions to set on the socket
:param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM
"""
path_str: str | None
if path is not None:
path_str = os.fsdecode(path)
# Linux abstract namespace sockets aren't backed by a concrete file so skip stat call
if not path_str.startswith("\0"):
# Copied from pathlib...
try:
stat_result = os.stat(path)
except OSError as e:
if e.errno not in (
errno.ENOENT,
errno.ENOTDIR,
errno.EBADF,
errno.ELOOP,
):
raise
else:
if stat.S_ISSOCK(stat_result.st_mode):
os.unlink(path)
else:
path_str = None
raw_socket = socket.socket(socket.AF_UNIX, socktype)
raw_socket.setblocking(False)
if path_str is not None:
try:
await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True)
if mode is not None:
await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True)
except BaseException:
raw_socket.close()
raise
return raw_socket
@dataclass
class TCPConnectable(ByteStreamConnectable):
"""
Connects to a TCP server at the given host and port.
:param host: host name or IP address of the server
:param port: TCP port number of the server
"""
host: str | IPv4Address | IPv6Address
port: int
def __post_init__(self) -> None:
if self.port < 1 or self.port > 65535:
raise ValueError("TCP port number out of range")
@override
async def connect(self) -> SocketStream:
try:
return await connect_tcp(self.host, self.port)
except OSError as exc:
raise ConnectionFailed(
f"error connecting to {self.host}:{self.port}: {exc}"
) from exc
@dataclass
class UNIXConnectable(ByteStreamConnectable):
"""
Connects to a UNIX domain socket at the given path.
:param path: the file system path of the socket
"""
path: str | bytes | PathLike[str] | PathLike[bytes]
@override
async def connect(self) -> UNIXSocketStream:
try:
return await connect_unix(self.path)
except OSError as exc:
raise ConnectionFailed(f"error connecting to {self.path!r}: {exc}") from exc
def as_connectable(
remote: ByteStreamConnectable
| tuple[str | IPv4Address | IPv6Address, int]
| str
| bytes
| PathLike[str],
/,
*,
tls: bool = False,
ssl_context: ssl.SSLContext | None = None,
tls_hostname: str | None = None,
tls_standard_compatible: bool = True,
) -> ByteStreamConnectable:
"""
Return a byte stream connectable from the given object.
If a bytestream connectable is given, it is returned unchanged.
If a tuple of (host, port) is given, a TCP connectable is returned.
If a string or bytes path is given, a UNIX connectable is returned.
If ``tls=True``, the connectable will be wrapped in a
:class:`~.streams.tls.TLSConnectable`.
:param remote: a connectable, a tuple of (host, port) or a path to a UNIX socket
:param tls: if ``True``, wrap the plaintext connectable in a
:class:`~.streams.tls.TLSConnectable`, using the provided TLS settings)
:param ssl_context: if ``tls=True``, the SSLContext object to use (if not provided,
a secure default will be created)
:param tls_hostname: if ``tls=True``, host name of the server to use for checking
the server certificate (defaults to the host portion of the address for TCP
connectables)
:param tls_standard_compatible: if ``False`` and ``tls=True``, makes the TLS stream
skip the closing handshake when closing the connection, so it won't raise an
exception if the server does the same
"""
connectable: TCPConnectable | UNIXConnectable | TLSConnectable
if isinstance(remote, ByteStreamConnectable):
return remote
elif isinstance(remote, tuple) and len(remote) == 2:
connectable = TCPConnectable(*remote)
elif isinstance(remote, (str, bytes, PathLike)):
connectable = UNIXConnectable(remote)
else:
raise TypeError(f"cannot convert {remote!r} to a connectable")
if tls:
if not tls_hostname and isinstance(connectable, TCPConnectable):
tls_hostname = str(connectable.host)
connectable = TLSConnectable(
connectable,
ssl_context=ssl_context,
hostname=tls_hostname,
standard_compatible=tls_standard_compatible,
)
return connectable
anyio-4.11.0/src/anyio/_core/_streams.py 0000664 0000000 0000000 00000003414 15064462627 0020116 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import math
from typing import TypeVar
from warnings import warn
from ..streams.memory import (
MemoryObjectReceiveStream,
MemoryObjectSendStream,
MemoryObjectStreamState,
)
T_Item = TypeVar("T_Item")
class create_memory_object_stream(
tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
):
"""
Create a memory object stream.
The stream's item type can be annotated like
:func:`create_memory_object_stream[T_Item]`.
:param max_buffer_size: number of items held in the buffer until ``send()`` starts
blocking
:param item_type: old way of marking the streams with the right generic type for
static typing (does nothing on AnyIO 4)
.. deprecated:: 4.0
Use ``create_memory_object_stream[YourItemType](...)`` instead.
:return: a tuple of (send stream, receive stream)
"""
def __new__( # type: ignore[misc]
cls, max_buffer_size: float = 0, item_type: object = None
) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
raise ValueError("max_buffer_size must be either an integer or math.inf")
if max_buffer_size < 0:
raise ValueError("max_buffer_size cannot be negative")
if item_type is not None:
warn(
"The item_type argument has been deprecated in AnyIO 4.0. "
"Use create_memory_object_stream[YourItemType](...) instead.",
DeprecationWarning,
stacklevel=2,
)
state = MemoryObjectStreamState[T_Item](max_buffer_size)
return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
anyio-4.11.0/src/anyio/_core/_subprocesses.py 0000664 0000000 0000000 00000017557 15064462627 0021175 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from collections.abc import AsyncIterable, Iterable, Mapping, Sequence
from io import BytesIO
from os import PathLike
from subprocess import PIPE, CalledProcessError, CompletedProcess
from typing import IO, Any, Union, cast
from ..abc import Process
from ._eventloop import get_async_backend
from ._tasks import create_task_group
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
async def run_process(
command: StrOrBytesPath | Sequence[StrOrBytesPath],
*,
input: bytes | None = None,
stdin: int | IO[Any] | None = None,
stdout: int | IO[Any] | None = PIPE,
stderr: int | IO[Any] | None = PIPE,
check: bool = True,
cwd: StrOrBytesPath | None = None,
env: Mapping[str, str] | None = None,
startupinfo: Any = None,
creationflags: int = 0,
start_new_session: bool = False,
pass_fds: Sequence[int] = (),
user: str | int | None = None,
group: str | int | None = None,
extra_groups: Iterable[str | int] | None = None,
umask: int = -1,
) -> CompletedProcess[bytes]:
"""
Run an external command in a subprocess and wait until it completes.
.. seealso:: :func:`subprocess.run`
:param command: either a string to pass to the shell, or an iterable of strings
containing the executable name or path and its arguments
:param input: bytes passed to the standard input of the subprocess
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
a file-like object, or `None`; ``input`` overrides this
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
a file-like object, or `None`
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
:data:`subprocess.STDOUT`, a file-like object, or `None`
:param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
process terminates with a return code other than 0
:param cwd: If not ``None``, change the working directory to this before running the
command
:param env: if not ``None``, this mapping replaces the inherited environment
variables from the parent process
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
to specify process startup parameters (Windows only)
:param creationflags: flags that can be used to control the creation of the
subprocess (see :class:`subprocess.Popen` for the specifics)
:param start_new_session: if ``true`` the setsid() system call will be made in the
child process prior to the execution of the subprocess. (POSIX only)
:param pass_fds: sequence of file descriptors to keep open between the parent and
child processes. (POSIX only)
:param user: effective user to run the process as (Python >= 3.9, POSIX only)
:param group: effective group to run the process as (Python >= 3.9, POSIX only)
:param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9,
POSIX only)
:param umask: if not negative, this umask is applied in the child process before
running the given command (Python >= 3.9, POSIX only)
:return: an object representing the completed process
:raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
exits with a nonzero return code
"""
async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
buffer = BytesIO()
async for chunk in stream:
buffer.write(chunk)
stream_contents[index] = buffer.getvalue()
if stdin is not None and input is not None:
raise ValueError("only one of stdin and input is allowed")
async with await open_process(
command,
stdin=PIPE if input else stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
env=env,
startupinfo=startupinfo,
creationflags=creationflags,
start_new_session=start_new_session,
pass_fds=pass_fds,
user=user,
group=group,
extra_groups=extra_groups,
umask=umask,
) as process:
stream_contents: list[bytes | None] = [None, None]
async with create_task_group() as tg:
if process.stdout:
tg.start_soon(drain_stream, process.stdout, 0)
if process.stderr:
tg.start_soon(drain_stream, process.stderr, 1)
if process.stdin and input:
await process.stdin.send(input)
await process.stdin.aclose()
await process.wait()
output, errors = stream_contents
if check and process.returncode != 0:
raise CalledProcessError(cast(int, process.returncode), command, output, errors)
return CompletedProcess(command, cast(int, process.returncode), output, errors)
async def open_process(
command: StrOrBytesPath | Sequence[StrOrBytesPath],
*,
stdin: int | IO[Any] | None = PIPE,
stdout: int | IO[Any] | None = PIPE,
stderr: int | IO[Any] | None = PIPE,
cwd: StrOrBytesPath | None = None,
env: Mapping[str, str] | None = None,
startupinfo: Any = None,
creationflags: int = 0,
start_new_session: bool = False,
pass_fds: Sequence[int] = (),
user: str | int | None = None,
group: str | int | None = None,
extra_groups: Iterable[str | int] | None = None,
umask: int = -1,
) -> Process:
"""
Start an external command in a subprocess.
.. seealso:: :class:`subprocess.Popen`
:param command: either a string to pass to the shell, or an iterable of strings
containing the executable name or path and its arguments
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
file-like object, or ``None``
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
a file-like object, or ``None``
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
:data:`subprocess.STDOUT`, a file-like object, or ``None``
:param cwd: If not ``None``, the working directory is changed before executing
:param env: If env is not ``None``, it must be a mapping that defines the
environment variables for the new process
:param creationflags: flags that can be used to control the creation of the
subprocess (see :class:`subprocess.Popen` for the specifics)
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
to specify process startup parameters (Windows only)
:param start_new_session: if ``true`` the setsid() system call will be made in the
child process prior to the execution of the subprocess. (POSIX only)
:param pass_fds: sequence of file descriptors to keep open between the parent and
child processes. (POSIX only)
:param user: effective user to run the process as (POSIX only)
:param group: effective group to run the process as (POSIX only)
:param extra_groups: supplementary groups to set in the subprocess (POSIX only)
:param umask: if not negative, this umask is applied in the child process before
running the given command (POSIX only)
:return: an asynchronous process object
"""
kwargs: dict[str, Any] = {}
if user is not None:
kwargs["user"] = user
if group is not None:
kwargs["group"] = group
if extra_groups is not None:
kwargs["extra_groups"] = group
if umask >= 0:
kwargs["umask"] = umask
return await get_async_backend().open_process(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
env=env,
startupinfo=startupinfo,
creationflags=creationflags,
start_new_session=start_new_session,
pass_fds=pass_fds,
**kwargs,
)
anyio-4.11.0/src/anyio/_core/_synchronization.py 0000664 0000000 0000000 00000050572 15064462627 0021710 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import math
from collections import deque
from collections.abc import Callable
from dataclasses import dataclass
from types import TracebackType
from typing import TypeVar
from sniffio import AsyncLibraryNotFoundError
from ..lowlevel import checkpoint_if_cancelled
from ._eventloop import get_async_backend
from ._exceptions import BusyResourceError
from ._tasks import CancelScope
from ._testing import TaskInfo, get_current_task
T = TypeVar("T")
@dataclass(frozen=True)
class EventStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
"""
tasks_waiting: int
@dataclass(frozen=True)
class CapacityLimiterStatistics:
"""
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
:ivar float total_tokens: total number of available tokens
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from
this limiter
:ivar int tasks_waiting: number of tasks waiting on
:meth:`~.CapacityLimiter.acquire` or
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
"""
borrowed_tokens: int
total_tokens: float
borrowers: tuple[object, ...]
tasks_waiting: int
@dataclass(frozen=True)
class LockStatistics:
"""
:ivar bool locked: flag indicating if this lock is locked or not
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the
lock is not held by any task)
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
"""
locked: bool
owner: TaskInfo | None
tasks_waiting: int
@dataclass(frozen=True)
class ConditionStatistics:
"""
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying
:class:`~.Lock`
"""
tasks_waiting: int
lock_statistics: LockStatistics
@dataclass(frozen=True)
class SemaphoreStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
"""
tasks_waiting: int
class Event:
def __new__(cls) -> Event:
try:
return get_async_backend().create_event()
except AsyncLibraryNotFoundError:
return EventAdapter()
def set(self) -> None:
"""Set the flag, notifying all listeners."""
raise NotImplementedError
def is_set(self) -> bool:
"""Return ``True`` if the flag is set, ``False`` if not."""
raise NotImplementedError
async def wait(self) -> None:
"""
Wait until the flag has been set.
If the flag has already been set when this method is called, it returns
immediately.
"""
raise NotImplementedError
def statistics(self) -> EventStatistics:
"""Return statistics about the current state of this event."""
raise NotImplementedError
class EventAdapter(Event):
_internal_event: Event | None = None
_is_set: bool = False
def __new__(cls) -> EventAdapter:
return object.__new__(cls)
@property
def _event(self) -> Event:
if self._internal_event is None:
self._internal_event = get_async_backend().create_event()
if self._is_set:
self._internal_event.set()
return self._internal_event
def set(self) -> None:
if self._internal_event is None:
self._is_set = True
else:
self._event.set()
def is_set(self) -> bool:
if self._internal_event is None:
return self._is_set
return self._internal_event.is_set()
async def wait(self) -> None:
await self._event.wait()
def statistics(self) -> EventStatistics:
if self._internal_event is None:
return EventStatistics(tasks_waiting=0)
return self._internal_event.statistics()
class Lock:
def __new__(cls, *, fast_acquire: bool = False) -> Lock:
try:
return get_async_backend().create_lock(fast_acquire=fast_acquire)
except AsyncLibraryNotFoundError:
return LockAdapter(fast_acquire=fast_acquire)
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
async def acquire(self) -> None:
"""Acquire the lock."""
raise NotImplementedError
def acquire_nowait(self) -> None:
"""
Acquire the lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
raise NotImplementedError
def release(self) -> None:
"""Release the lock."""
raise NotImplementedError
def locked(self) -> bool:
"""Return True if the lock is currently held."""
raise NotImplementedError
def statistics(self) -> LockStatistics:
"""
Return statistics about the current state of this lock.
.. versionadded:: 3.0
"""
raise NotImplementedError
class LockAdapter(Lock):
_internal_lock: Lock | None = None
def __new__(cls, *, fast_acquire: bool = False) -> LockAdapter:
return object.__new__(cls)
def __init__(self, *, fast_acquire: bool = False):
self._fast_acquire = fast_acquire
@property
def _lock(self) -> Lock:
if self._internal_lock is None:
self._internal_lock = get_async_backend().create_lock(
fast_acquire=self._fast_acquire
)
return self._internal_lock
async def __aenter__(self) -> None:
await self._lock.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self._internal_lock is not None:
self._internal_lock.release()
async def acquire(self) -> None:
"""Acquire the lock."""
await self._lock.acquire()
def acquire_nowait(self) -> None:
"""
Acquire the lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
self._lock.acquire_nowait()
def release(self) -> None:
"""Release the lock."""
self._lock.release()
def locked(self) -> bool:
"""Return True if the lock is currently held."""
return self._lock.locked()
def statistics(self) -> LockStatistics:
"""
Return statistics about the current state of this lock.
.. versionadded:: 3.0
"""
if self._internal_lock is None:
return LockStatistics(False, None, 0)
return self._internal_lock.statistics()
class Condition:
_owner_task: TaskInfo | None = None
def __init__(self, lock: Lock | None = None):
self._lock = lock or Lock()
self._waiters: deque[Event] = deque()
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
def _check_acquired(self) -> None:
if self._owner_task != get_current_task():
raise RuntimeError("The current task is not holding the underlying lock")
async def acquire(self) -> None:
"""Acquire the underlying lock."""
await self._lock.acquire()
self._owner_task = get_current_task()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
self._lock.acquire_nowait()
self._owner_task = get_current_task()
def release(self) -> None:
"""Release the underlying lock."""
self._lock.release()
def locked(self) -> bool:
"""Return True if the lock is set."""
return self._lock.locked()
def notify(self, n: int = 1) -> None:
"""Notify exactly n listeners."""
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set()
def notify_all(self) -> None:
"""Notify all the listeners."""
self._check_acquired()
for event in self._waiters:
event.set()
self._waiters.clear()
async def wait(self) -> None:
"""Wait for a notification."""
await checkpoint_if_cancelled()
self._check_acquired()
event = Event()
self._waiters.append(event)
self.release()
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(event)
raise
finally:
with CancelScope(shield=True):
await self.acquire()
async def wait_for(self, predicate: Callable[[], T]) -> T:
"""
Wait until a predicate becomes true.
:param predicate: a callable that returns a truthy value when the condition is
met
:return: the result of the predicate
.. versionadded:: 4.11.0
"""
while not (result := predicate()):
await self.wait()
return result
def statistics(self) -> ConditionStatistics:
"""
Return statistics about the current state of this condition.
.. versionadded:: 3.0
"""
return ConditionStatistics(len(self._waiters), self._lock.statistics())
class Semaphore:
def __new__(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> Semaphore:
try:
return get_async_backend().create_semaphore(
initial_value, max_value=max_value, fast_acquire=fast_acquire
)
except AsyncLibraryNotFoundError:
return SemaphoreAdapter(initial_value, max_value=max_value)
def __init__(
self,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
):
if not isinstance(initial_value, int):
raise TypeError("initial_value must be an integer")
if initial_value < 0:
raise ValueError("initial_value must be >= 0")
if max_value is not None:
if not isinstance(max_value, int):
raise TypeError("max_value must be an integer or None")
if max_value < initial_value:
raise ValueError(
"max_value must be equal to or higher than initial_value"
)
self._fast_acquire = fast_acquire
async def __aenter__(self) -> Semaphore:
await self.acquire()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
async def acquire(self) -> None:
"""Decrement the semaphore value, blocking if necessary."""
raise NotImplementedError
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
raise NotImplementedError
def release(self) -> None:
"""Increment the semaphore value."""
raise NotImplementedError
@property
def value(self) -> int:
"""The current value of the semaphore."""
raise NotImplementedError
@property
def max_value(self) -> int | None:
"""The maximum value of the semaphore."""
raise NotImplementedError
def statistics(self) -> SemaphoreStatistics:
"""
Return statistics about the current state of this semaphore.
.. versionadded:: 3.0
"""
raise NotImplementedError
class SemaphoreAdapter(Semaphore):
_internal_semaphore: Semaphore | None = None
def __new__(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> SemaphoreAdapter:
return object.__new__(cls)
def __init__(
self,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> None:
super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
self._initial_value = initial_value
self._max_value = max_value
@property
def _semaphore(self) -> Semaphore:
if self._internal_semaphore is None:
self._internal_semaphore = get_async_backend().create_semaphore(
self._initial_value, max_value=self._max_value
)
return self._internal_semaphore
async def acquire(self) -> None:
await self._semaphore.acquire()
def acquire_nowait(self) -> None:
self._semaphore.acquire_nowait()
def release(self) -> None:
self._semaphore.release()
@property
def value(self) -> int:
if self._internal_semaphore is None:
return self._initial_value
return self._semaphore.value
@property
def max_value(self) -> int | None:
return self._max_value
def statistics(self) -> SemaphoreStatistics:
if self._internal_semaphore is None:
return SemaphoreStatistics(tasks_waiting=0)
return self._semaphore.statistics()
class CapacityLimiter:
def __new__(cls, total_tokens: float) -> CapacityLimiter:
try:
return get_async_backend().create_capacity_limiter(total_tokens)
except AsyncLibraryNotFoundError:
return CapacityLimiterAdapter(total_tokens)
async def __aenter__(self) -> None:
raise NotImplementedError
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
raise NotImplementedError
@property
def total_tokens(self) -> float:
"""
The total number of tokens available for borrowing.
This is a read-write property. If the total number of tokens is increased, the
proportionate number of tasks waiting on this limiter will be granted their
tokens.
.. versionchanged:: 3.0
The property is now writable.
"""
raise NotImplementedError
@total_tokens.setter
def total_tokens(self, value: float) -> None:
raise NotImplementedError
@property
def borrowed_tokens(self) -> int:
"""The number of tokens that have currently been borrowed."""
raise NotImplementedError
@property
def available_tokens(self) -> float:
"""The number of tokens currently available to be borrowed"""
raise NotImplementedError
def acquire_nowait(self) -> None:
"""
Acquire a token for the current task without waiting for one to become
available.
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
"""
Acquire a token without waiting for one to become available.
:param borrower: the entity borrowing a token
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
async def acquire(self) -> None:
"""
Acquire a token for the current task, waiting if necessary for one to become
available.
"""
raise NotImplementedError
async def acquire_on_behalf_of(self, borrower: object) -> None:
"""
Acquire a token, waiting if necessary for one to become available.
:param borrower: the entity borrowing a token
"""
raise NotImplementedError
def release(self) -> None:
"""
Release the token held by the current task.
:raises RuntimeError: if the current task has not borrowed a token from this
limiter.
"""
raise NotImplementedError
def release_on_behalf_of(self, borrower: object) -> None:
"""
Release the token held by the given borrower.
:raises RuntimeError: if the borrower has not borrowed a token from this
limiter.
"""
raise NotImplementedError
def statistics(self) -> CapacityLimiterStatistics:
"""
Return statistics about the current state of this limiter.
.. versionadded:: 3.0
"""
raise NotImplementedError
class CapacityLimiterAdapter(CapacityLimiter):
_internal_limiter: CapacityLimiter | None = None
def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:
return object.__new__(cls)
def __init__(self, total_tokens: float) -> None:
self.total_tokens = total_tokens
@property
def _limiter(self) -> CapacityLimiter:
if self._internal_limiter is None:
self._internal_limiter = get_async_backend().create_capacity_limiter(
self._total_tokens
)
return self._internal_limiter
async def __aenter__(self) -> None:
await self._limiter.__aenter__()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)
@property
def total_tokens(self) -> float:
if self._internal_limiter is None:
return self._total_tokens
return self._internal_limiter.total_tokens
@total_tokens.setter
def total_tokens(self, value: float) -> None:
if not isinstance(value, int) and value is not math.inf:
raise TypeError("total_tokens must be an int or math.inf")
elif value < 1:
raise ValueError("total_tokens must be >= 1")
if self._internal_limiter is None:
self._total_tokens = value
return
self._limiter.total_tokens = value
@property
def borrowed_tokens(self) -> int:
if self._internal_limiter is None:
return 0
return self._internal_limiter.borrowed_tokens
@property
def available_tokens(self) -> float:
if self._internal_limiter is None:
return self._total_tokens
return self._internal_limiter.available_tokens
def acquire_nowait(self) -> None:
self._limiter.acquire_nowait()
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
self._limiter.acquire_on_behalf_of_nowait(borrower)
async def acquire(self) -> None:
await self._limiter.acquire()
async def acquire_on_behalf_of(self, borrower: object) -> None:
await self._limiter.acquire_on_behalf_of(borrower)
def release(self) -> None:
self._limiter.release()
def release_on_behalf_of(self, borrower: object) -> None:
self._limiter.release_on_behalf_of(borrower)
def statistics(self) -> CapacityLimiterStatistics:
if self._internal_limiter is None:
return CapacityLimiterStatistics(
borrowed_tokens=0,
total_tokens=self.total_tokens,
borrowers=(),
tasks_waiting=0,
)
return self._internal_limiter.statistics()
class ResourceGuard:
"""
A context manager for ensuring that a resource is only used by a single task at a
time.
Entering this context manager while the previous has not exited it yet will trigger
:exc:`BusyResourceError`.
:param action: the action to guard against (visible in the :exc:`BusyResourceError`
when triggered, e.g. "Another task is already {action} this resource")
.. versionadded:: 4.1
"""
__slots__ = "action", "_guarded"
def __init__(self, action: str = "using"):
self.action: str = action
self._guarded = False
def __enter__(self) -> None:
if self._guarded:
raise BusyResourceError(self.action)
self._guarded = True
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self._guarded = False
anyio-4.11.0/src/anyio/_core/_tasks.py 0000664 0000000 0000000 00000011420 15064462627 0017561 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import math
from collections.abc import Generator
from contextlib import contextmanager
from types import TracebackType
from ..abc._tasks import TaskGroup, TaskStatus
from ._eventloop import get_async_backend
class _IgnoredTaskStatus(TaskStatus[object]):
def started(self, value: object = None) -> None:
pass
TASK_STATUS_IGNORED = _IgnoredTaskStatus()
class CancelScope:
"""
Wraps a unit of work that can be made separately cancellable.
:param deadline: The time (clock value) when this scope is cancelled automatically
:param shield: ``True`` to shield the cancel scope from external cancellation
"""
def __new__(
cls, *, deadline: float = math.inf, shield: bool = False
) -> CancelScope:
return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
def cancel(self, reason: str | None = None) -> None:
"""
Cancel this scope immediately.
:param reason: a message describing the reason for the cancellation
"""
raise NotImplementedError
@property
def deadline(self) -> float:
"""
The time (clock value) when this scope is cancelled automatically.
Will be ``float('inf')`` if no timeout has been set.
"""
raise NotImplementedError
@deadline.setter
def deadline(self, value: float) -> None:
raise NotImplementedError
@property
def cancel_called(self) -> bool:
"""``True`` if :meth:`cancel` has been called."""
raise NotImplementedError
@property
def cancelled_caught(self) -> bool:
"""
``True`` if this scope suppressed a cancellation exception it itself raised.
This is typically used to check if any work was interrupted, or to see if the
scope was cancelled due to its deadline being reached. The value will, however,
only be ``True`` if the cancellation was triggered by the scope itself (and not
an outer scope).
"""
raise NotImplementedError
@property
def shield(self) -> bool:
"""
``True`` if this scope is shielded from external cancellation.
While a scope is shielded, it will not receive cancellations from outside.
"""
raise NotImplementedError
@shield.setter
def shield(self, value: bool) -> None:
raise NotImplementedError
def __enter__(self) -> CancelScope:
raise NotImplementedError
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
raise NotImplementedError
@contextmanager
def fail_after(
delay: float | None, shield: bool = False
) -> Generator[CancelScope, None, None]:
"""
Create a context manager which raises a :class:`TimeoutError` if does not finish in
time.
:param delay: maximum allowed time (in seconds) before raising the exception, or
``None`` to disable the timeout
:param shield: ``True`` to shield the cancel scope from external cancellation
:return: a context manager that yields a cancel scope
:rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
"""
current_time = get_async_backend().current_time
deadline = (current_time() + delay) if delay is not None else math.inf
with get_async_backend().create_cancel_scope(
deadline=deadline, shield=shield
) as cancel_scope:
yield cancel_scope
if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
raise TimeoutError
def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
"""
Create a cancel scope with a deadline that expires after the given delay.
:param delay: maximum allowed time (in seconds) before exiting the context block, or
``None`` to disable the timeout
:param shield: ``True`` to shield the cancel scope from external cancellation
:return: a cancel scope
"""
deadline = (
(get_async_backend().current_time() + delay) if delay is not None else math.inf
)
return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
def current_effective_deadline() -> float:
"""
Return the nearest deadline among all the cancel scopes effective for the current
task.
:return: a clock value from the event loop's internal clock (or ``float('inf')`` if
there is no deadline in effect, or ``float('-inf')`` if the current scope has
been cancelled)
:rtype: float
"""
return get_async_backend().current_effective_deadline()
def create_task_group() -> TaskGroup:
"""
Create a task group.
:return: a task group
"""
return get_async_backend().create_task_group()
anyio-4.11.0/src/anyio/_core/_tempfile.py 0000664 0000000 0000000 00000046361 15064462627 0020255 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import sys
import tempfile
from collections.abc import Iterable
from io import BytesIO, TextIOWrapper
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Generic,
overload,
)
from .. import to_thread
from .._core._fileio import AsyncFile
from ..lowlevel import checkpoint_if_cancelled
if TYPE_CHECKING:
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
class TemporaryFile(Generic[AnyStr]):
"""
An asynchronous temporary file that is automatically created and cleaned up.
This class provides an asynchronous context manager interface to a temporary file.
The file is created using Python's standard `tempfile.TemporaryFile` function in a
background thread, and is wrapped as an asynchronous file using `AsyncFile`.
:param mode: The mode in which the file is opened. Defaults to "w+b".
:param buffering: The buffering policy (-1 means the default buffering).
:param encoding: The encoding used to decode or encode the file. Only applicable in
text mode.
:param newline: Controls how universal newlines mode works (only applicable in text
mode).
:param suffix: The suffix for the temporary file name.
:param prefix: The prefix for the temporary file name.
:param dir: The directory in which the temporary file is created.
:param errors: The error handling scheme used for encoding/decoding errors.
"""
_async_file: AsyncFile[AnyStr]
@overload
def __init__(
self: TemporaryFile[bytes],
mode: OpenBinaryMode = ...,
buffering: int = ...,
encoding: str | None = ...,
newline: str | None = ...,
suffix: str | None = ...,
prefix: str | None = ...,
dir: str | None = ...,
*,
errors: str | None = ...,
): ...
@overload
def __init__(
self: TemporaryFile[str],
mode: OpenTextMode,
buffering: int = ...,
encoding: str | None = ...,
newline: str | None = ...,
suffix: str | None = ...,
prefix: str | None = ...,
dir: str | None = ...,
*,
errors: str | None = ...,
): ...
def __init__(
self,
mode: OpenTextMode | OpenBinaryMode = "w+b",
buffering: int = -1,
encoding: str | None = None,
newline: str | None = None,
suffix: str | None = None,
prefix: str | None = None,
dir: str | None = None,
*,
errors: str | None = None,
) -> None:
self.mode = mode
self.buffering = buffering
self.encoding = encoding
self.newline = newline
self.suffix: str | None = suffix
self.prefix: str | None = prefix
self.dir: str | None = dir
self.errors = errors
async def __aenter__(self) -> AsyncFile[AnyStr]:
fp = await to_thread.run_sync(
lambda: tempfile.TemporaryFile(
self.mode,
self.buffering,
self.encoding,
self.newline,
self.suffix,
self.prefix,
self.dir,
errors=self.errors,
)
)
self._async_file = AsyncFile(fp)
return self._async_file
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
await self._async_file.aclose()
class NamedTemporaryFile(Generic[AnyStr]):
"""
An asynchronous named temporary file that is automatically created and cleaned up.
This class provides an asynchronous context manager for a temporary file with a
visible name in the file system. It uses Python's standard
:func:`~tempfile.NamedTemporaryFile` function and wraps the file object with
:class:`AsyncFile` for asynchronous operations.
:param mode: The mode in which the file is opened. Defaults to "w+b".
:param buffering: The buffering policy (-1 means the default buffering).
:param encoding: The encoding used to decode or encode the file. Only applicable in
text mode.
:param newline: Controls how universal newlines mode works (only applicable in text
mode).
:param suffix: The suffix for the temporary file name.
:param prefix: The prefix for the temporary file name.
:param dir: The directory in which the temporary file is created.
:param delete: Whether to delete the file when it is closed.
:param errors: The error handling scheme used for encoding/decoding errors.
:param delete_on_close: (Python 3.12+) Whether to delete the file on close.
"""
_async_file: AsyncFile[AnyStr]
@overload
def __init__(
self: NamedTemporaryFile[bytes],
mode: OpenBinaryMode = ...,
buffering: int = ...,
encoding: str | None = ...,
newline: str | None = ...,
suffix: str | None = ...,
prefix: str | None = ...,
dir: str | None = ...,
delete: bool = ...,
*,
errors: str | None = ...,
delete_on_close: bool = ...,
): ...
@overload
def __init__(
self: NamedTemporaryFile[str],
mode: OpenTextMode,
buffering: int = ...,
encoding: str | None = ...,
newline: str | None = ...,
suffix: str | None = ...,
prefix: str | None = ...,
dir: str | None = ...,
delete: bool = ...,
*,
errors: str | None = ...,
delete_on_close: bool = ...,
): ...
def __init__(
self,
mode: OpenBinaryMode | OpenTextMode = "w+b",
buffering: int = -1,
encoding: str | None = None,
newline: str | None = None,
suffix: str | None = None,
prefix: str | None = None,
dir: str | None = None,
delete: bool = True,
*,
errors: str | None = None,
delete_on_close: bool = True,
) -> None:
self._params: dict[str, Any] = {
"mode": mode,
"buffering": buffering,
"encoding": encoding,
"newline": newline,
"suffix": suffix,
"prefix": prefix,
"dir": dir,
"delete": delete,
"errors": errors,
}
if sys.version_info >= (3, 12):
self._params["delete_on_close"] = delete_on_close
async def __aenter__(self) -> AsyncFile[AnyStr]:
fp = await to_thread.run_sync(
lambda: tempfile.NamedTemporaryFile(**self._params)
)
self._async_file = AsyncFile(fp)
return self._async_file
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
await self._async_file.aclose()
class SpooledTemporaryFile(AsyncFile[AnyStr]):
"""
An asynchronous spooled temporary file that starts in memory and is spooled to disk.
This class provides an asynchronous interface to a spooled temporary file, much like
Python's standard :class:`~tempfile.SpooledTemporaryFile`. It supports asynchronous
write operations and provides a method to force a rollover to disk.
:param max_size: Maximum size in bytes before the file is rolled over to disk.
:param mode: The mode in which the file is opened. Defaults to "w+b".
:param buffering: The buffering policy (-1 means the default buffering).
:param encoding: The encoding used to decode or encode the file (text mode only).
:param newline: Controls how universal newlines mode works (text mode only).
:param suffix: The suffix for the temporary file name.
:param prefix: The prefix for the temporary file name.
:param dir: The directory in which the temporary file is created.
:param errors: The error handling scheme used for encoding/decoding errors.
"""
_rolled: bool = False
@overload
def __init__(
self: SpooledTemporaryFile[bytes],
max_size: int = ...,
mode: OpenBinaryMode = ...,
buffering: int = ...,
encoding: str | None = ...,
newline: str | None = ...,
suffix: str | None = ...,
prefix: str | None = ...,
dir: str | None = ...,
*,
errors: str | None = ...,
): ...
@overload
def __init__(
self: SpooledTemporaryFile[str],
max_size: int = ...,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: str | None = ...,
newline: str | None = ...,
suffix: str | None = ...,
prefix: str | None = ...,
dir: str | None = ...,
*,
errors: str | None = ...,
): ...
def __init__(
self,
max_size: int = 0,
mode: OpenBinaryMode | OpenTextMode = "w+b",
buffering: int = -1,
encoding: str | None = None,
newline: str | None = None,
suffix: str | None = None,
prefix: str | None = None,
dir: str | None = None,
*,
errors: str | None = None,
) -> None:
self._tempfile_params: dict[str, Any] = {
"mode": mode,
"buffering": buffering,
"encoding": encoding,
"newline": newline,
"suffix": suffix,
"prefix": prefix,
"dir": dir,
"errors": errors,
}
self._max_size = max_size
if "b" in mode:
super().__init__(BytesIO()) # type: ignore[arg-type]
else:
super().__init__(
TextIOWrapper( # type: ignore[arg-type]
BytesIO(),
encoding=encoding,
errors=errors,
newline=newline,
write_through=True,
)
)
async def aclose(self) -> None:
if not self._rolled:
self._fp.close()
return
await super().aclose()
async def _check(self) -> None:
if self._rolled or self._fp.tell() <= self._max_size:
return
await self.rollover()
async def rollover(self) -> None:
if self._rolled:
return
self._rolled = True
buffer = self._fp
buffer.seek(0)
self._fp = await to_thread.run_sync(
lambda: tempfile.TemporaryFile(**self._tempfile_params)
)
await self.write(buffer.read())
buffer.close()
@property
def closed(self) -> bool:
return self._fp.closed
async def read(self, size: int = -1) -> AnyStr:
if not self._rolled:
await checkpoint_if_cancelled()
return self._fp.read(size)
return await super().read(size) # type: ignore[return-value]
async def read1(self: SpooledTemporaryFile[bytes], size: int = -1) -> bytes:
if not self._rolled:
await checkpoint_if_cancelled()
return self._fp.read1(size)
return await super().read1(size)
async def readline(self) -> AnyStr:
if not self._rolled:
await checkpoint_if_cancelled()
return self._fp.readline()
return await super().readline() # type: ignore[return-value]
async def readlines(self) -> list[AnyStr]:
if not self._rolled:
await checkpoint_if_cancelled()
return self._fp.readlines()
return await super().readlines() # type: ignore[return-value]
async def readinto(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:
if not self._rolled:
await checkpoint_if_cancelled()
self._fp.readinto(b)
return await super().readinto(b)
async def readinto1(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:
if not self._rolled:
await checkpoint_if_cancelled()
self._fp.readinto(b)
return await super().readinto1(b)
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
if not self._rolled:
await checkpoint_if_cancelled()
return self._fp.seek(offset, whence)
return await super().seek(offset, whence)
async def tell(self) -> int:
if not self._rolled:
await checkpoint_if_cancelled()
return self._fp.tell()
return await super().tell()
async def truncate(self, size: int | None = None) -> int:
if not self._rolled:
await checkpoint_if_cancelled()
return self._fp.truncate(size)
return await super().truncate(size)
@overload
async def write(self: SpooledTemporaryFile[bytes], b: ReadableBuffer) -> int: ...
@overload
async def write(self: SpooledTemporaryFile[str], b: str) -> int: ...
async def write(self, b: ReadableBuffer | str) -> int:
"""
Asynchronously write data to the spooled temporary file.
If the file has not yet been rolled over, the data is written synchronously,
and a rollover is triggered if the size exceeds the maximum size.
:param s: The data to write.
:return: The number of bytes written.
:raises RuntimeError: If the underlying file is not initialized.
"""
if not self._rolled:
await checkpoint_if_cancelled()
result = self._fp.write(b)
await self._check()
return result
return await super().write(b) # type: ignore[misc]
@overload
async def writelines(
self: SpooledTemporaryFile[bytes], lines: Iterable[ReadableBuffer]
) -> None: ...
@overload
async def writelines(
self: SpooledTemporaryFile[str], lines: Iterable[str]
) -> None: ...
async def writelines(self, lines: Iterable[str] | Iterable[ReadableBuffer]) -> None:
"""
Asynchronously write a list of lines to the spooled temporary file.
If the file has not yet been rolled over, the lines are written synchronously,
and a rollover is triggered if the size exceeds the maximum size.
:param lines: An iterable of lines to write.
:raises RuntimeError: If the underlying file is not initialized.
"""
if not self._rolled:
await checkpoint_if_cancelled()
result = self._fp.writelines(lines)
await self._check()
return result
return await super().writelines(lines) # type: ignore[misc]
class TemporaryDirectory(Generic[AnyStr]):
"""
An asynchronous temporary directory that is created and cleaned up automatically.
This class provides an asynchronous context manager for creating a temporary
directory. It wraps Python's standard :class:`~tempfile.TemporaryDirectory` to
perform directory creation and cleanup operations in a background thread.
:param suffix: Suffix to be added to the temporary directory name.
:param prefix: Prefix to be added to the temporary directory name.
:param dir: The parent directory where the temporary directory is created.
:param ignore_cleanup_errors: Whether to ignore errors during cleanup
(Python 3.10+).
:param delete: Whether to delete the directory upon closing (Python 3.12+).
"""
def __init__(
self,
suffix: AnyStr | None = None,
prefix: AnyStr | None = None,
dir: AnyStr | None = None,
*,
ignore_cleanup_errors: bool = False,
delete: bool = True,
) -> None:
self.suffix: AnyStr | None = suffix
self.prefix: AnyStr | None = prefix
self.dir: AnyStr | None = dir
self.ignore_cleanup_errors = ignore_cleanup_errors
self.delete = delete
self._tempdir: tempfile.TemporaryDirectory | None = None
async def __aenter__(self) -> str:
params: dict[str, Any] = {
"suffix": self.suffix,
"prefix": self.prefix,
"dir": self.dir,
}
if sys.version_info >= (3, 10):
params["ignore_cleanup_errors"] = self.ignore_cleanup_errors
if sys.version_info >= (3, 12):
params["delete"] = self.delete
self._tempdir = await to_thread.run_sync(
lambda: tempfile.TemporaryDirectory(**params)
)
return await to_thread.run_sync(self._tempdir.__enter__)
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
if self._tempdir is not None:
await to_thread.run_sync(
self._tempdir.__exit__, exc_type, exc_value, traceback
)
async def cleanup(self) -> None:
if self._tempdir is not None:
await to_thread.run_sync(self._tempdir.cleanup)
@overload
async def mkstemp(
suffix: str | None = None,
prefix: str | None = None,
dir: str | None = None,
text: bool = False,
) -> tuple[int, str]: ...
@overload
async def mkstemp(
suffix: bytes | None = None,
prefix: bytes | None = None,
dir: bytes | None = None,
text: bool = False,
) -> tuple[int, bytes]: ...
async def mkstemp(
suffix: AnyStr | None = None,
prefix: AnyStr | None = None,
dir: AnyStr | None = None,
text: bool = False,
) -> tuple[int, str | bytes]:
"""
Asynchronously create a temporary file and return an OS-level handle and the file
name.
This function wraps `tempfile.mkstemp` and executes it in a background thread.
:param suffix: Suffix to be added to the file name.
:param prefix: Prefix to be added to the file name.
:param dir: Directory in which the temporary file is created.
:param text: Whether the file is opened in text mode.
:return: A tuple containing the file descriptor and the file name.
"""
return await to_thread.run_sync(tempfile.mkstemp, suffix, prefix, dir, text)
@overload
async def mkdtemp(
suffix: str | None = None,
prefix: str | None = None,
dir: str | None = None,
) -> str: ...
@overload
async def mkdtemp(
suffix: bytes | None = None,
prefix: bytes | None = None,
dir: bytes | None = None,
) -> bytes: ...
async def mkdtemp(
suffix: AnyStr | None = None,
prefix: AnyStr | None = None,
dir: AnyStr | None = None,
) -> str | bytes:
"""
Asynchronously create a temporary directory and return its path.
This function wraps `tempfile.mkdtemp` and executes it in a background thread.
:param suffix: Suffix to be added to the directory name.
:param prefix: Prefix to be added to the directory name.
:param dir: Parent directory where the temporary directory is created.
:return: The path of the created temporary directory.
"""
return await to_thread.run_sync(tempfile.mkdtemp, suffix, prefix, dir)
async def gettempdir() -> str:
"""
Asynchronously return the name of the directory used for temporary files.
This function wraps `tempfile.gettempdir` and executes it in a background thread.
:return: The path of the temporary directory as a string.
"""
return await to_thread.run_sync(tempfile.gettempdir)
async def gettempdirb() -> bytes:
"""
Asynchronously return the name of the directory used for temporary files in bytes.
This function wraps `tempfile.gettempdirb` and executes it in a background thread.
:return: The path of the temporary directory as bytes.
"""
return await to_thread.run_sync(tempfile.gettempdirb)
anyio-4.11.0/src/anyio/_core/_testing.py 0000664 0000000 0000000 00000004106 15064462627 0020114 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from collections.abc import Awaitable, Generator
from typing import Any, cast
from ._eventloop import get_async_backend
class TaskInfo:
"""
Represents an asynchronous task.
:ivar int id: the unique identifier of the task
:ivar parent_id: the identifier of the parent task, if any
:vartype parent_id: Optional[int]
:ivar str name: the description of the task (if any)
:ivar ~collections.abc.Coroutine coro: the coroutine object of the task
"""
__slots__ = "_name", "id", "parent_id", "name", "coro"
def __init__(
self,
id: int,
parent_id: int | None,
name: str | None,
coro: Generator[Any, Any, Any] | Awaitable[Any],
):
func = get_current_task
self._name = f"{func.__module__}.{func.__qualname__}"
self.id: int = id
self.parent_id: int | None = parent_id
self.name: str | None = name
self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
def __eq__(self, other: object) -> bool:
if isinstance(other, TaskInfo):
return self.id == other.id
return NotImplemented
def __hash__(self) -> int:
return hash(self.id)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
def has_pending_cancellation(self) -> bool:
"""
Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
"""
return False
def get_current_task() -> TaskInfo:
"""
Return the current task.
:return: a representation of the current task
"""
return get_async_backend().get_current_task()
def get_running_tasks() -> list[TaskInfo]:
"""
Return a list of running tasks in the current event loop.
:return: a list of task info objects
"""
return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
async def wait_all_tasks_blocked() -> None:
"""Wait until all other tasks are waiting for something."""
await get_async_backend().wait_all_tasks_blocked()
anyio-4.11.0/src/anyio/_core/_typedattr.py 0000664 0000000 0000000 00000004714 15064462627 0020464 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from collections.abc import Callable, Mapping
from typing import Any, TypeVar, final, overload
from ._exceptions import TypedAttributeLookupError
T_Attr = TypeVar("T_Attr")
T_Default = TypeVar("T_Default")
undefined = object()
def typed_attribute() -> Any:
"""Return a unique object, used to mark typed attributes."""
return object()
class TypedAttributeSet:
"""
Superclass for typed attribute collections.
Checks that every public attribute of every subclass has a type annotation.
"""
def __init_subclass__(cls) -> None:
annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
for attrname in dir(cls):
if not attrname.startswith("_") and attrname not in annotations:
raise TypeError(
f"Attribute {attrname!r} is missing its type annotation"
)
super().__init_subclass__()
class TypedAttributeProvider:
"""Base class for classes that wish to provide typed extra attributes."""
@property
def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
"""
A mapping of the extra attributes to callables that return the corresponding
values.
If the provider wraps another provider, the attributes from that wrapper should
also be included in the returned mapping (but the wrapper may override the
callables from the wrapped instance).
"""
return {}
@overload
def extra(self, attribute: T_Attr) -> T_Attr: ...
@overload
def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...
@final
def extra(self, attribute: Any, default: object = undefined) -> object:
"""
extra(attribute, default=undefined)
Return the value of the given typed extra attribute.
:param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
look for
:param default: the value that should be returned if no value is found for the
attribute
:raises ~anyio.TypedAttributeLookupError: if the search failed and no default
value was given
"""
try:
getter = self.extra_attributes[attribute]
except KeyError:
if default is undefined:
raise TypedAttributeLookupError("Attribute not found") from None
else:
return default
return getter()
anyio-4.11.0/src/anyio/abc/ 0000775 0000000 0000000 00000000000 15064462627 0015363 5 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/abc/__init__.py 0000664 0000000 0000000 00000005465 15064462627 0017506 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from ._eventloop import AsyncBackend as AsyncBackend
from ._resources import AsyncResource as AsyncResource
from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket
from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket
from ._sockets import IPAddressType as IPAddressType
from ._sockets import IPSockAddrType as IPSockAddrType
from ._sockets import SocketAttribute as SocketAttribute
from ._sockets import SocketListener as SocketListener
from ._sockets import SocketStream as SocketStream
from ._sockets import UDPPacketType as UDPPacketType
from ._sockets import UDPSocket as UDPSocket
from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType
from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket
from ._sockets import UNIXSocketStream as UNIXSocketStream
from ._streams import AnyByteReceiveStream as AnyByteReceiveStream
from ._streams import AnyByteSendStream as AnyByteSendStream
from ._streams import AnyByteStream as AnyByteStream
from ._streams import AnyByteStreamConnectable as AnyByteStreamConnectable
from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream
from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream
from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream
from ._streams import ByteReceiveStream as ByteReceiveStream
from ._streams import ByteSendStream as ByteSendStream
from ._streams import ByteStream as ByteStream
from ._streams import ByteStreamConnectable as ByteStreamConnectable
from ._streams import Listener as Listener
from ._streams import ObjectReceiveStream as ObjectReceiveStream
from ._streams import ObjectSendStream as ObjectSendStream
from ._streams import ObjectStream as ObjectStream
from ._streams import ObjectStreamConnectable as ObjectStreamConnectable
from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream
from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream
from ._streams import UnreliableObjectStream as UnreliableObjectStream
from ._subprocesses import Process as Process
from ._tasks import TaskGroup as TaskGroup
from ._tasks import TaskStatus as TaskStatus
from ._testing import TestRunner as TestRunner
# Re-exported here, for backwards compatibility
# isort: off
from .._core._synchronization import (
CapacityLimiter as CapacityLimiter,
Condition as Condition,
Event as Event,
Lock as Lock,
Semaphore as Semaphore,
)
from .._core._tasks import CancelScope as CancelScope
from ..from_thread import BlockingPortal as BlockingPortal
# Re-export imports so they look like they live directly in this package
for __value in list(locals().values()):
if getattr(__value, "__module__", "").startswith("anyio.abc."):
__value.__module__ = __name__
del __value
anyio-4.11.0/src/anyio/abc/_eventloop.py 0000664 0000000 0000000 00000025224 15064462627 0020114 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import math
import sys
from abc import ABCMeta, abstractmethod
from collections.abc import AsyncIterator, Awaitable, Callable, Sequence
from contextlib import AbstractContextManager
from os import PathLike
from signal import Signals
from socket import AddressFamily, SocketKind, socket
from typing import (
IO,
TYPE_CHECKING,
Any,
TypeVar,
Union,
overload,
)
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
if TYPE_CHECKING:
from _typeshed import FileDescriptorLike
from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore
from .._core._tasks import CancelScope
from .._core._testing import TaskInfo
from ..from_thread import BlockingPortal
from ._sockets import (
ConnectedUDPSocket,
ConnectedUNIXDatagramSocket,
IPSockAddrType,
SocketListener,
SocketStream,
UDPSocket,
UNIXDatagramSocket,
UNIXSocketStream,
)
from ._subprocesses import Process
from ._tasks import TaskGroup
from ._testing import TestRunner
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
class AsyncBackend(metaclass=ABCMeta):
@classmethod
@abstractmethod
def run(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
options: dict[str, Any],
) -> T_Retval:
"""
Run the given coroutine function in an asynchronous event loop.
The current thread must not be already running an event loop.
:param func: a coroutine function
:param args: positional arguments to ``func``
:param kwargs: positional arguments to ``func``
:param options: keyword arguments to call the backend ``run()`` implementation
with
:return: the return value of the coroutine function
"""
@classmethod
@abstractmethod
def current_token(cls) -> object:
"""
Return an object that allows other threads to run code inside the event loop.
:return: a token object, specific to the event loop running in the current
thread
"""
@classmethod
@abstractmethod
def current_time(cls) -> float:
"""
Return the current value of the event loop's internal clock.
:return: the clock value (seconds)
"""
@classmethod
@abstractmethod
def cancelled_exception_class(cls) -> type[BaseException]:
"""Return the exception class that is raised in a task if it's cancelled."""
@classmethod
@abstractmethod
async def checkpoint(cls) -> None:
"""
Check if the task has been cancelled, and allow rescheduling of other tasks.
This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
:meth:`cancel_shielded_checkpoint`.
"""
@classmethod
async def checkpoint_if_cancelled(cls) -> None:
"""
Check if the current task group has been cancelled.
This will check if the task has been cancelled, but will not allow other tasks
to be scheduled if not.
"""
if cls.current_effective_deadline() == -math.inf:
await cls.checkpoint()
@classmethod
async def cancel_shielded_checkpoint(cls) -> None:
"""
Allow the rescheduling of other tasks.
This will give other tasks the opportunity to run, but without checking if the
current task group has been cancelled, unlike with :meth:`checkpoint`.
"""
with cls.create_cancel_scope(shield=True):
await cls.sleep(0)
@classmethod
@abstractmethod
async def sleep(cls, delay: float) -> None:
"""
Pause the current task for the specified duration.
:param delay: the duration, in seconds
"""
@classmethod
@abstractmethod
def create_cancel_scope(
cls, *, deadline: float = math.inf, shield: bool = False
) -> CancelScope:
pass
@classmethod
@abstractmethod
def current_effective_deadline(cls) -> float:
"""
Return the nearest deadline among all the cancel scopes effective for the
current task.
:return:
- a clock value from the event loop's internal clock
- ``inf`` if there is no deadline in effect
- ``-inf`` if the current scope has been cancelled
:rtype: float
"""
@classmethod
@abstractmethod
def create_task_group(cls) -> TaskGroup:
pass
@classmethod
@abstractmethod
def create_event(cls) -> Event:
pass
@classmethod
@abstractmethod
def create_lock(cls, *, fast_acquire: bool) -> Lock:
pass
@classmethod
@abstractmethod
def create_semaphore(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> Semaphore:
pass
@classmethod
@abstractmethod
def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
pass
@classmethod
@abstractmethod
async def run_sync_in_worker_thread(
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
abandon_on_cancel: bool = False,
limiter: CapacityLimiter | None = None,
) -> T_Retval:
pass
@classmethod
@abstractmethod
def check_cancelled(cls) -> None:
pass
@classmethod
@abstractmethod
def run_async_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
pass
@classmethod
@abstractmethod
def run_sync_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
pass
@classmethod
@abstractmethod
def create_blocking_portal(cls) -> BlockingPortal:
pass
@classmethod
@abstractmethod
async def open_process(
cls,
command: StrOrBytesPath | Sequence[StrOrBytesPath],
*,
stdin: int | IO[Any] | None,
stdout: int | IO[Any] | None,
stderr: int | IO[Any] | None,
**kwargs: Any,
) -> Process:
pass
@classmethod
@abstractmethod
def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
pass
@classmethod
@abstractmethod
async def connect_tcp(
cls, host: str, port: int, local_address: IPSockAddrType | None = None
) -> SocketStream:
pass
@classmethod
@abstractmethod
async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
pass
@classmethod
@abstractmethod
def create_tcp_listener(cls, sock: socket) -> SocketListener:
pass
@classmethod
@abstractmethod
def create_unix_listener(cls, sock: socket) -> SocketListener:
pass
@classmethod
@abstractmethod
async def create_udp_socket(
cls,
family: AddressFamily,
local_address: IPSockAddrType | None,
remote_address: IPSockAddrType | None,
reuse_port: bool,
) -> UDPSocket | ConnectedUDPSocket:
pass
@classmethod
@overload
async def create_unix_datagram_socket(
cls, raw_socket: socket, remote_path: None
) -> UNIXDatagramSocket: ...
@classmethod
@overload
async def create_unix_datagram_socket(
cls, raw_socket: socket, remote_path: str | bytes
) -> ConnectedUNIXDatagramSocket: ...
@classmethod
@abstractmethod
async def create_unix_datagram_socket(
cls, raw_socket: socket, remote_path: str | bytes | None
) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
pass
@classmethod
@abstractmethod
async def getaddrinfo(
cls,
host: bytes | str | None,
port: str | int | None,
*,
family: int | AddressFamily = 0,
type: int | SocketKind = 0,
proto: int = 0,
flags: int = 0,
) -> Sequence[
tuple[
AddressFamily,
SocketKind,
int,
str,
tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes],
]
]:
pass
@classmethod
@abstractmethod
async def getnameinfo(
cls, sockaddr: IPSockAddrType, flags: int = 0
) -> tuple[str, str]:
pass
@classmethod
@abstractmethod
async def wait_readable(cls, obj: FileDescriptorLike) -> None:
pass
@classmethod
@abstractmethod
async def wait_writable(cls, obj: FileDescriptorLike) -> None:
pass
@classmethod
@abstractmethod
def notify_closing(cls, obj: FileDescriptorLike) -> None:
pass
@classmethod
@abstractmethod
async def wrap_listener_socket(cls, sock: socket) -> SocketListener:
pass
@classmethod
@abstractmethod
async def wrap_stream_socket(cls, sock: socket) -> SocketStream:
pass
@classmethod
@abstractmethod
async def wrap_unix_stream_socket(cls, sock: socket) -> UNIXSocketStream:
pass
@classmethod
@abstractmethod
async def wrap_udp_socket(cls, sock: socket) -> UDPSocket:
pass
@classmethod
@abstractmethod
async def wrap_connected_udp_socket(cls, sock: socket) -> ConnectedUDPSocket:
pass
@classmethod
@abstractmethod
async def wrap_unix_datagram_socket(cls, sock: socket) -> UNIXDatagramSocket:
pass
@classmethod
@abstractmethod
async def wrap_connected_unix_datagram_socket(
cls, sock: socket
) -> ConnectedUNIXDatagramSocket:
pass
@classmethod
@abstractmethod
def current_default_thread_limiter(cls) -> CapacityLimiter:
pass
@classmethod
@abstractmethod
def open_signal_receiver(
cls, *signals: Signals
) -> AbstractContextManager[AsyncIterator[Signals]]:
pass
@classmethod
@abstractmethod
def get_current_task(cls) -> TaskInfo:
pass
@classmethod
@abstractmethod
def get_running_tasks(cls) -> Sequence[TaskInfo]:
pass
@classmethod
@abstractmethod
async def wait_all_tasks_blocked(cls) -> None:
pass
@classmethod
@abstractmethod
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
pass
anyio-4.11.0/src/anyio/abc/_resources.py 0000664 0000000 0000000 00000001417 15064462627 0020111 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from abc import ABCMeta, abstractmethod
from types import TracebackType
from typing import TypeVar
T = TypeVar("T")
class AsyncResource(metaclass=ABCMeta):
"""
Abstract base class for all closeable asynchronous resources.
Works as an asynchronous context manager which returns the instance itself on enter,
and calls :meth:`aclose` on exit.
"""
__slots__ = ()
async def __aenter__(self: T) -> T:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.aclose()
@abstractmethod
async def aclose(self) -> None:
"""Close the resource."""
anyio-4.11.0/src/anyio/abc/_sockets.py 0000664 0000000 0000000 00000031712 15064462627 0017553 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import errno
import socket
import sys
from abc import abstractmethod
from collections.abc import Callable, Collection, Mapping
from contextlib import AsyncExitStack
from io import IOBase
from ipaddress import IPv4Address, IPv6Address
from socket import AddressFamily
from typing import Any, TypeVar, Union
from .._core._eventloop import get_async_backend
from .._core._typedattr import (
TypedAttributeProvider,
TypedAttributeSet,
typed_attribute,
)
from ._streams import ByteStream, Listener, UnreliableObjectStream
from ._tasks import TaskGroup
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
IPAddressType: TypeAlias = Union[str, IPv4Address, IPv6Address]
IPSockAddrType: TypeAlias = tuple[str, int]
SockAddrType: TypeAlias = Union[IPSockAddrType, str]
UDPPacketType: TypeAlias = tuple[bytes, IPSockAddrType]
UNIXDatagramPacketType: TypeAlias = tuple[bytes, str]
T_Retval = TypeVar("T_Retval")
def _validate_socket(
sock_or_fd: socket.socket | int,
sock_type: socket.SocketKind,
addr_family: socket.AddressFamily = socket.AF_UNSPEC,
*,
require_connected: bool = False,
require_bound: bool = False,
) -> socket.socket:
if isinstance(sock_or_fd, int):
try:
sock = socket.socket(fileno=sock_or_fd)
except OSError as exc:
if exc.errno == errno.ENOTSOCK:
raise ValueError(
"the file descriptor does not refer to a socket"
) from exc
elif require_connected:
raise ValueError("the socket must be connected") from exc
elif require_bound:
raise ValueError("the socket must be bound to a local address") from exc
else:
raise
elif isinstance(sock_or_fd, socket.socket):
sock = sock_or_fd
else:
raise TypeError(
f"expected an int or socket, got {type(sock_or_fd).__qualname__} instead"
)
try:
if require_connected:
try:
sock.getpeername()
except OSError as exc:
raise ValueError("the socket must be connected") from exc
if require_bound:
try:
if sock.family in (socket.AF_INET, socket.AF_INET6):
bound_addr = sock.getsockname()[1]
else:
bound_addr = sock.getsockname()
except OSError:
bound_addr = None
if not bound_addr:
raise ValueError("the socket must be bound to a local address")
if addr_family != socket.AF_UNSPEC and sock.family != addr_family:
raise ValueError(
f"address family mismatch: expected {addr_family.name}, got "
f"{sock.family.name}"
)
if sock.type != sock_type:
raise ValueError(
f"socket type mismatch: expected {sock_type.name}, got {sock.type.name}"
)
except BaseException:
# Avoid ResourceWarning from the locally constructed socket object
if isinstance(sock_or_fd, int):
sock.detach()
raise
sock.setblocking(False)
return sock
class SocketAttribute(TypedAttributeSet):
"""
.. attribute:: family
:type: socket.AddressFamily
the address family of the underlying socket
.. attribute:: local_address
:type: tuple[str, int] | str
the local address the underlying socket is connected to
.. attribute:: local_port
:type: int
for IP based sockets, the local port the underlying socket is bound to
.. attribute:: raw_socket
:type: socket.socket
the underlying stdlib socket object
.. attribute:: remote_address
:type: tuple[str, int] | str
the remote address the underlying socket is connected to
.. attribute:: remote_port
:type: int
for IP based sockets, the remote port the underlying socket is connected to
"""
family: AddressFamily = typed_attribute()
local_address: SockAddrType = typed_attribute()
local_port: int = typed_attribute()
raw_socket: socket.socket = typed_attribute()
remote_address: SockAddrType = typed_attribute()
remote_port: int = typed_attribute()
class _SocketProvider(TypedAttributeProvider):
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
from .._core._sockets import convert_ipv6_sockaddr as convert
attributes: dict[Any, Callable[[], Any]] = {
SocketAttribute.family: lambda: self._raw_socket.family,
SocketAttribute.local_address: lambda: convert(
self._raw_socket.getsockname()
),
SocketAttribute.raw_socket: lambda: self._raw_socket,
}
try:
peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
except OSError:
peername = None
# Provide the remote address for connected sockets
if peername is not None:
attributes[SocketAttribute.remote_address] = lambda: peername
# Provide local and remote ports for IP based sockets
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
attributes[SocketAttribute.local_port] = (
lambda: self._raw_socket.getsockname()[1]
)
if peername is not None:
remote_port = peername[1]
attributes[SocketAttribute.remote_port] = lambda: remote_port
return attributes
@property
@abstractmethod
def _raw_socket(self) -> socket.socket:
pass
class SocketStream(ByteStream, _SocketProvider):
"""
Transports bytes over a socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@classmethod
async def from_socket(cls, sock_or_fd: socket.socket | int) -> SocketStream:
"""
Wrap an existing socket object or file descriptor as a socket stream.
The newly created socket wrapper takes ownership of the socket being passed in.
The existing socket must already be connected.
:param sock_or_fd: a socket object or file descriptor
:return: a socket stream
"""
sock = _validate_socket(sock_or_fd, socket.SOCK_STREAM, require_connected=True)
return await get_async_backend().wrap_stream_socket(sock)
class UNIXSocketStream(SocketStream):
@classmethod
async def from_socket(cls, sock_or_fd: socket.socket | int) -> UNIXSocketStream:
"""
Wrap an existing socket object or file descriptor as a UNIX socket stream.
The newly created socket wrapper takes ownership of the socket being passed in.
The existing socket must already be connected.
:param sock_or_fd: a socket object or file descriptor
:return: a UNIX socket stream
"""
sock = _validate_socket(
sock_or_fd, socket.SOCK_STREAM, socket.AF_UNIX, require_connected=True
)
return await get_async_backend().wrap_unix_stream_socket(sock)
@abstractmethod
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
"""
Send file descriptors along with a message to the peer.
:param message: a non-empty bytestring
:param fds: a collection of files (either numeric file descriptors or open file
or socket objects)
"""
@abstractmethod
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
"""
Receive file descriptors along with a message from the peer.
:param msglen: length of the message to expect from the peer
:param maxfds: maximum number of file descriptors to expect from the peer
:return: a tuple of (message, file descriptors)
"""
class SocketListener(Listener[SocketStream], _SocketProvider):
"""
Listens to incoming socket connections.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@classmethod
async def from_socket(
cls,
sock_or_fd: socket.socket | int,
) -> SocketListener:
"""
Wrap an existing socket object or file descriptor as a socket listener.
The newly created listener takes ownership of the socket being passed in.
:param sock_or_fd: a socket object or file descriptor
:return: a socket listener
"""
sock = _validate_socket(sock_or_fd, socket.SOCK_STREAM, require_bound=True)
return await get_async_backend().wrap_listener_socket(sock)
@abstractmethod
async def accept(self) -> SocketStream:
"""Accept an incoming connection."""
async def serve(
self,
handler: Callable[[SocketStream], Any],
task_group: TaskGroup | None = None,
) -> None:
from .. import create_task_group
async with AsyncExitStack() as stack:
if task_group is None:
task_group = await stack.enter_async_context(create_task_group())
while True:
stream = await self.accept()
task_group.start_soon(handler, stream)
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
"""
Represents an unconnected UDP socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@classmethod
async def from_socket(cls, sock_or_fd: socket.socket | int) -> UDPSocket:
"""
Wrap an existing socket object or file descriptor as a UDP socket.
The newly created socket wrapper takes ownership of the socket being passed in.
The existing socket must be bound to a local address.
:param sock_or_fd: a socket object or file descriptor
:return: a UDP socket
"""
sock = _validate_socket(sock_or_fd, socket.SOCK_DGRAM, require_bound=True)
return await get_async_backend().wrap_udp_socket(sock)
async def sendto(self, data: bytes, host: str, port: int) -> None:
"""
Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).
"""
return await self.send((data, (host, port)))
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
"""
Represents an connected UDP socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@classmethod
async def from_socket(cls, sock_or_fd: socket.socket | int) -> ConnectedUDPSocket:
"""
Wrap an existing socket object or file descriptor as a connected UDP socket.
The newly created socket wrapper takes ownership of the socket being passed in.
The existing socket must already be connected.
:param sock_or_fd: a socket object or file descriptor
:return: a connected UDP socket
"""
sock = _validate_socket(
sock_or_fd,
socket.SOCK_DGRAM,
require_connected=True,
)
return await get_async_backend().wrap_connected_udp_socket(sock)
class UNIXDatagramSocket(
UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider
):
"""
Represents an unconnected Unix datagram socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@classmethod
async def from_socket(
cls,
sock_or_fd: socket.socket | int,
) -> UNIXDatagramSocket:
"""
Wrap an existing socket object or file descriptor as a UNIX datagram
socket.
The newly created socket wrapper takes ownership of the socket being passed in.
:param sock_or_fd: a socket object or file descriptor
:return: a UNIX datagram socket
"""
sock = _validate_socket(sock_or_fd, socket.SOCK_DGRAM, socket.AF_UNIX)
return await get_async_backend().wrap_unix_datagram_socket(sock)
async def sendto(self, data: bytes, path: str) -> None:
"""Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""
return await self.send((data, path))
class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):
"""
Represents a connected Unix datagram socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@classmethod
async def from_socket(
cls,
sock_or_fd: socket.socket | int,
) -> ConnectedUNIXDatagramSocket:
"""
Wrap an existing socket object or file descriptor as a connected UNIX datagram
socket.
The newly created socket wrapper takes ownership of the socket being passed in.
The existing socket must already be connected.
:param sock_or_fd: a socket object or file descriptor
:return: a connected UNIX datagram socket
"""
sock = _validate_socket(
sock_or_fd, socket.SOCK_DGRAM, socket.AF_UNIX, require_connected=True
)
return await get_async_backend().wrap_connected_unix_datagram_socket(sock)
anyio-4.11.0/src/anyio/abc/_streams.py 0000664 0000000 0000000 00000016730 15064462627 0017561 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from abc import ABCMeta, abstractmethod
from collections.abc import Callable
from typing import Any, Generic, TypeVar, Union
from .._core._exceptions import EndOfStream
from .._core._typedattr import TypedAttributeProvider
from ._resources import AsyncResource
from ._tasks import TaskGroup
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
T_Item = TypeVar("T_Item")
T_co = TypeVar("T_co", covariant=True)
T_contra = TypeVar("T_contra", contravariant=True)
class UnreliableObjectReceiveStream(
Generic[T_co], AsyncResource, TypedAttributeProvider
):
"""
An interface for receiving objects.
This interface makes no guarantees that the received messages arrive in the order in
which they were sent, or that no messages are missed.
Asynchronously iterating over objects of this type will yield objects matching the
given type parameter.
"""
def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:
return self
async def __anext__(self) -> T_co:
try:
return await self.receive()
except EndOfStream:
raise StopAsyncIteration from None
@abstractmethod
async def receive(self) -> T_co:
"""
Receive the next item.
:raises ~anyio.ClosedResourceError: if the receive stream has been explicitly
closed
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
due to external causes
"""
class UnreliableObjectSendStream(
Generic[T_contra], AsyncResource, TypedAttributeProvider
):
"""
An interface for sending objects.
This interface makes no guarantees that the messages sent will reach the
recipient(s) in the same order in which they were sent, or at all.
"""
@abstractmethod
async def send(self, item: T_contra) -> None:
"""
Send an item to the peer(s).
:param item: the item to send
:raises ~anyio.ClosedResourceError: if the send stream has been explicitly
closed
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
due to external causes
"""
class UnreliableObjectStream(
UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]
):
"""
A bidirectional message stream which does not guarantee the order or reliability of
message delivery.
"""
class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):
"""
A receive message stream which guarantees that messages are received in the same
order in which they were sent, and that no messages are missed.
"""
class ObjectSendStream(UnreliableObjectSendStream[T_contra]):
"""
A send message stream which guarantees that messages are delivered in the same order
in which they were sent, without missing any messages in the middle.
"""
class ObjectStream(
ObjectReceiveStream[T_Item],
ObjectSendStream[T_Item],
UnreliableObjectStream[T_Item],
):
"""
A bidirectional message stream which guarantees the order and reliability of message
delivery.
"""
@abstractmethod
async def send_eof(self) -> None:
"""
Send an end-of-file indication to the peer.
You should not try to send any further data to this stream after calling this
method. This method is idempotent (does nothing on successive calls).
"""
class ByteReceiveStream(AsyncResource, TypedAttributeProvider):
"""
An interface for receiving bytes from a single peer.
Iterating this byte stream will yield a byte string of arbitrary length, but no more
than 65536 bytes.
"""
def __aiter__(self) -> ByteReceiveStream:
return self
async def __anext__(self) -> bytes:
try:
return await self.receive()
except EndOfStream:
raise StopAsyncIteration from None
@abstractmethod
async def receive(self, max_bytes: int = 65536) -> bytes:
"""
Receive at most ``max_bytes`` bytes from the peer.
.. note:: Implementers of this interface should not return an empty
:class:`bytes` object, and users should ignore them.
:param max_bytes: maximum number of bytes to receive
:return: the received bytes
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
"""
class ByteSendStream(AsyncResource, TypedAttributeProvider):
"""An interface for sending bytes to a single peer."""
@abstractmethod
async def send(self, item: bytes) -> None:
"""
Send the given bytes to the peer.
:param item: the bytes to send
"""
class ByteStream(ByteReceiveStream, ByteSendStream):
"""A bidirectional byte stream."""
@abstractmethod
async def send_eof(self) -> None:
"""
Send an end-of-file indication to the peer.
You should not try to send any further data to this stream after calling this
method. This method is idempotent (does nothing on successive calls).
"""
#: Type alias for all unreliable bytes-oriented receive streams.
AnyUnreliableByteReceiveStream: TypeAlias = Union[
UnreliableObjectReceiveStream[bytes], ByteReceiveStream
]
#: Type alias for all unreliable bytes-oriented send streams.
AnyUnreliableByteSendStream: TypeAlias = Union[
UnreliableObjectSendStream[bytes], ByteSendStream
]
#: Type alias for all unreliable bytes-oriented streams.
AnyUnreliableByteStream: TypeAlias = Union[UnreliableObjectStream[bytes], ByteStream]
#: Type alias for all bytes-oriented receive streams.
AnyByteReceiveStream: TypeAlias = Union[ObjectReceiveStream[bytes], ByteReceiveStream]
#: Type alias for all bytes-oriented send streams.
AnyByteSendStream: TypeAlias = Union[ObjectSendStream[bytes], ByteSendStream]
#: Type alias for all bytes-oriented streams.
AnyByteStream: TypeAlias = Union[ObjectStream[bytes], ByteStream]
class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):
"""An interface for objects that let you accept incoming connections."""
@abstractmethod
async def serve(
self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None
) -> None:
"""
Accept incoming connections as they come in and start tasks to handle them.
:param handler: a callable that will be used to handle each accepted connection
:param task_group: the task group that will be used to start tasks for handling
each accepted connection (if omitted, an ad-hoc task group will be created)
"""
class ObjectStreamConnectable(Generic[T_co], metaclass=ABCMeta):
@abstractmethod
async def connect(self) -> ObjectStream[T_co]:
"""
Connect to the remote endpoint.
:return: an object stream connected to the remote end
:raises ConnectionFailed: if the connection fails
"""
class ByteStreamConnectable(metaclass=ABCMeta):
@abstractmethod
async def connect(self) -> ByteStream:
"""
Connect to the remote endpoint.
:return: a bytestream connected to the remote end
:raises ConnectionFailed: if the connection fails
"""
#: Type alias for all connectables returning bytestreams or bytes-oriented object streams
AnyByteStreamConnectable: TypeAlias = Union[
ObjectStreamConnectable[bytes], ByteStreamConnectable
]
anyio-4.11.0/src/anyio/abc/_subprocesses.py 0000664 0000000 0000000 00000004023 15064462627 0020613 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from abc import abstractmethod
from signal import Signals
from ._resources import AsyncResource
from ._streams import ByteReceiveStream, ByteSendStream
class Process(AsyncResource):
"""An asynchronous version of :class:`subprocess.Popen`."""
@abstractmethod
async def wait(self) -> int:
"""
Wait until the process exits.
:return: the exit code of the process
"""
@abstractmethod
def terminate(self) -> None:
"""
Terminates the process, gracefully if possible.
On Windows, this calls ``TerminateProcess()``.
On POSIX systems, this sends ``SIGTERM`` to the process.
.. seealso:: :meth:`subprocess.Popen.terminate`
"""
@abstractmethod
def kill(self) -> None:
"""
Kills the process.
On Windows, this calls ``TerminateProcess()``.
On POSIX systems, this sends ``SIGKILL`` to the process.
.. seealso:: :meth:`subprocess.Popen.kill`
"""
@abstractmethod
def send_signal(self, signal: Signals) -> None:
"""
Send a signal to the subprocess.
.. seealso:: :meth:`subprocess.Popen.send_signal`
:param signal: the signal number (e.g. :data:`signal.SIGHUP`)
"""
@property
@abstractmethod
def pid(self) -> int:
"""The process ID of the process."""
@property
@abstractmethod
def returncode(self) -> int | None:
"""
The return code of the process. If the process has not yet terminated, this will
be ``None``.
"""
@property
@abstractmethod
def stdin(self) -> ByteSendStream | None:
"""The stream for the standard input of the process."""
@property
@abstractmethod
def stdout(self) -> ByteReceiveStream | None:
"""The stream for the standard output of the process."""
@property
@abstractmethod
def stderr(self) -> ByteReceiveStream | None:
"""The stream for the standard error output of the process."""
anyio-4.11.0/src/anyio/abc/_tasks.py 0000664 0000000 0000000 00000007211 15064462627 0017222 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from abc import ABCMeta, abstractmethod
from collections.abc import Awaitable, Callable
from types import TracebackType
from typing import TYPE_CHECKING, Any, Protocol, overload
if sys.version_info >= (3, 13):
from typing import TypeVar
else:
from typing_extensions import TypeVar
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
if TYPE_CHECKING:
from .._core._tasks import CancelScope
T_Retval = TypeVar("T_Retval")
T_contra = TypeVar("T_contra", contravariant=True, default=None)
PosArgsT = TypeVarTuple("PosArgsT")
class TaskStatus(Protocol[T_contra]):
@overload
def started(self: TaskStatus[None]) -> None: ...
@overload
def started(self, value: T_contra) -> None: ...
def started(self, value: T_contra | None = None) -> None:
"""
Signal that the task has started.
:param value: object passed back to the starter of the task
"""
class TaskGroup(metaclass=ABCMeta):
"""
Groups several asynchronous tasks together.
:ivar cancel_scope: the cancel scope inherited by all child tasks
:vartype cancel_scope: CancelScope
.. note:: On asyncio, support for eager task factories is considered to be
**experimental**. In particular, they don't follow the usual semantics of new
tasks being scheduled on the next iteration of the event loop, and may thus
cause unexpected behavior in code that wasn't written with such semantics in
mind.
"""
cancel_scope: CancelScope
@abstractmethod
def start_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
*args: Unpack[PosArgsT],
name: object = None,
) -> None:
"""
Start a new task in this task group.
:param func: a coroutine function
:param args: positional arguments to call the function with
:param name: name of the task, for the purposes of introspection and debugging
.. versionadded:: 3.0
"""
@abstractmethod
async def start(
self,
func: Callable[..., Awaitable[Any]],
*args: object,
name: object = None,
) -> Any:
"""
Start a new task and wait until it signals for readiness.
The target callable must accept a keyword argument ``task_status`` (of type
:class:`TaskStatus`). Awaiting on this method will return whatever was passed to
``task_status.started()`` (``None`` by default).
.. note:: The :class:`TaskStatus` class is generic, and the type argument should
indicate the type of the value that will be passed to
``task_status.started()``.
:param func: a coroutine function that accepts the ``task_status`` keyword
argument
:param args: positional arguments to call the function with
:param name: an optional name for the task, for introspection and debugging
:return: the value passed to ``task_status.started()``
:raises RuntimeError: if the task finishes without calling
``task_status.started()``
.. seealso:: :ref:`start_initialize`
.. versionadded:: 3.0
"""
@abstractmethod
async def __aenter__(self) -> TaskGroup:
"""Enter the task group context and allow starting new tasks."""
@abstractmethod
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
"""Exit the task group context waiting for all tasks to finish."""
anyio-4.11.0/src/anyio/abc/_testing.py 0000664 0000000 0000000 00000003435 15064462627 0017556 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import types
from abc import ABCMeta, abstractmethod
from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable
from typing import Any, TypeVar
_T = TypeVar("_T")
class TestRunner(metaclass=ABCMeta):
"""
Encapsulates a running event loop. Every call made through this object will use the
same event loop.
"""
def __enter__(self) -> TestRunner:
return self
@abstractmethod
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: types.TracebackType | None,
) -> bool | None: ...
@abstractmethod
def run_asyncgen_fixture(
self,
fixture_func: Callable[..., AsyncGenerator[_T, Any]],
kwargs: dict[str, Any],
) -> Iterable[_T]:
"""
Run an async generator fixture.
:param fixture_func: the fixture function
:param kwargs: keyword arguments to call the fixture function with
:return: an iterator yielding the value yielded from the async generator
"""
@abstractmethod
def run_fixture(
self,
fixture_func: Callable[..., Coroutine[Any, Any, _T]],
kwargs: dict[str, Any],
) -> _T:
"""
Run an async fixture.
:param fixture_func: the fixture function
:param kwargs: keyword arguments to call the fixture function with
:return: the return value of the fixture function
"""
@abstractmethod
def run_test(
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
) -> None:
"""
Run an async test function.
:param test_func: the test function
:param kwargs: keyword arguments to call the test function with
"""
anyio-4.11.0/src/anyio/from_thread.py 0000664 0000000 0000000 00000044757 15064462627 0017523 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from collections.abc import Awaitable, Callable, Generator
from concurrent.futures import Future
from contextlib import (
AbstractAsyncContextManager,
AbstractContextManager,
contextmanager,
)
from dataclasses import dataclass, field
from inspect import isawaitable
from threading import Lock, Thread, current_thread, get_ident
from types import TracebackType
from typing import (
Any,
Generic,
TypeVar,
cast,
overload,
)
from ._core._eventloop import (
get_async_backend,
get_cancelled_exc_class,
threadlocals,
)
from ._core._eventloop import run as run_eventloop
from ._core._exceptions import NoEventLoopError
from ._core._synchronization import Event
from ._core._tasks import CancelScope, create_task_group
from .abc._tasks import TaskStatus
from .lowlevel import EventLoopToken
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
T_Retval = TypeVar("T_Retval")
T_co = TypeVar("T_co", covariant=True)
PosArgsT = TypeVarTuple("PosArgsT")
def _token_or_error(token: EventLoopToken | None) -> EventLoopToken:
if token is not None:
return token
try:
return threadlocals.current_token
except AttributeError:
raise NoEventLoopError(
"Not running inside an AnyIO worker thread, and no event loop token was "
"provided"
) from None
def run(
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
*args: Unpack[PosArgsT],
token: EventLoopToken | None = None,
) -> T_Retval:
"""
Call a coroutine function from a worker thread.
:param func: a coroutine function
:param args: positional arguments for the callable
:param token: an event loop token to use to get back to the event loop thread
(required if calling this function from outside an AnyIO worker thread)
:return: the return value of the coroutine function
:raises MissingTokenError: if no token was provided and called from outside an
AnyIO worker thread
:raises RunFinishedError: if the event loop tied to ``token`` is no longer running
.. versionchanged:: 4.11.0
Added the ``token`` parameter.
"""
explicit_token = token is not None
token = _token_or_error(token)
return token.backend_class.run_async_from_thread(
func, args, token=token.native_token if explicit_token else None
)
def run_sync(
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
token: EventLoopToken | None = None,
) -> T_Retval:
"""
Call a function in the event loop thread from a worker thread.
:param func: a callable
:param args: positional arguments for the callable
:param token: an event loop token to use to get back to the event loop thread
(required if calling this function from outside an AnyIO worker thread)
:return: the return value of the callable
:raises MissingTokenError: if no token was provided and called from outside an
AnyIO worker thread
:raises RunFinishedError: if the event loop tied to ``token`` is no longer running
.. versionchanged:: 4.11.0
Added the ``token`` parameter.
"""
explicit_token = token is not None
token = _token_or_error(token)
return token.backend_class.run_sync_from_thread(
func, args, token=token.native_token if explicit_token else None
)
class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
_enter_future: Future[T_co]
_exit_future: Future[bool | None]
_exit_event: Event
_exit_exc_info: tuple[
type[BaseException] | None, BaseException | None, TracebackType | None
] = (None, None, None)
def __init__(
self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal
):
self._async_cm = async_cm
self._portal = portal
async def run_async_cm(self) -> bool | None:
try:
self._exit_event = Event()
value = await self._async_cm.__aenter__()
except BaseException as exc:
self._enter_future.set_exception(exc)
raise
else:
self._enter_future.set_result(value)
try:
# Wait for the sync context manager to exit.
# This next statement can raise `get_cancelled_exc_class()` if
# something went wrong in a task group in this async context
# manager.
await self._exit_event.wait()
finally:
# In case of cancellation, it could be that we end up here before
# `_BlockingAsyncContextManager.__exit__` is called, and an
# `_exit_exc_info` has been set.
result = await self._async_cm.__aexit__(*self._exit_exc_info)
return result
def __enter__(self) -> T_co:
self._enter_future = Future()
self._exit_future = self._portal.start_task_soon(self.run_async_cm)
return self._enter_future.result()
def __exit__(
self,
__exc_type: type[BaseException] | None,
__exc_value: BaseException | None,
__traceback: TracebackType | None,
) -> bool | None:
self._exit_exc_info = __exc_type, __exc_value, __traceback
self._portal.call(self._exit_event.set)
return self._exit_future.result()
class _BlockingPortalTaskStatus(TaskStatus):
def __init__(self, future: Future):
self._future = future
def started(self, value: object = None) -> None:
self._future.set_result(value)
class BlockingPortal:
"""An object that lets external threads run code in an asynchronous event loop."""
def __new__(cls) -> BlockingPortal:
return get_async_backend().create_blocking_portal()
def __init__(self) -> None:
self._event_loop_thread_id: int | None = get_ident()
self._stop_event = Event()
self._task_group = create_task_group()
self._cancelled_exc_class = get_cancelled_exc_class()
async def __aenter__(self) -> BlockingPortal:
await self._task_group.__aenter__()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
await self.stop()
return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
def _check_running(self) -> None:
if self._event_loop_thread_id is None:
raise RuntimeError("This portal is not running")
if self._event_loop_thread_id == get_ident():
raise RuntimeError(
"This method cannot be called from the event loop thread"
)
async def sleep_until_stopped(self) -> None:
"""Sleep until :meth:`stop` is called."""
await self._stop_event.wait()
async def stop(self, cancel_remaining: bool = False) -> None:
"""
Signal the portal to shut down.
This marks the portal as no longer accepting new calls and exits from
:meth:`sleep_until_stopped`.
:param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
to let them finish before returning
"""
self._event_loop_thread_id = None
self._stop_event.set()
if cancel_remaining:
self._task_group.cancel_scope.cancel("the blocking portal is shutting down")
async def _call_func(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
future: Future[T_Retval],
) -> None:
def callback(f: Future[T_Retval]) -> None:
if f.cancelled() and self._event_loop_thread_id not in (
None,
get_ident(),
):
self.call(scope.cancel, "the future was cancelled")
try:
retval_or_awaitable = func(*args, **kwargs)
if isawaitable(retval_or_awaitable):
with CancelScope() as scope:
if future.cancelled():
scope.cancel("the future was cancelled")
else:
future.add_done_callback(callback)
retval = await retval_or_awaitable
else:
retval = retval_or_awaitable
except self._cancelled_exc_class:
future.cancel()
future.set_running_or_notify_cancel()
except BaseException as exc:
if not future.cancelled():
future.set_exception(exc)
# Let base exceptions fall through
if not isinstance(exc, Exception):
raise
else:
if not future.cancelled():
future.set_result(retval)
finally:
scope = None # type: ignore[assignment]
def _spawn_task_from_thread(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
name: object,
future: Future[T_Retval],
) -> None:
"""
Spawn a new task using the given callable.
Implementers must ensure that the future is resolved when the task finishes.
:param func: a callable
:param args: positional arguments to be passed to the callable
:param kwargs: keyword arguments to be passed to the callable
:param name: name of the task (will be coerced to a string if not ``None``)
:param future: a future that will resolve to the return value of the callable,
or the exception raised during its execution
"""
raise NotImplementedError
@overload
def call(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
*args: Unpack[PosArgsT],
) -> T_Retval: ...
@overload
def call(
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
) -> T_Retval: ...
def call(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
*args: Unpack[PosArgsT],
) -> T_Retval:
"""
Call the given function in the event loop thread.
If the callable returns a coroutine object, it is awaited on.
:param func: any callable
:raises RuntimeError: if the portal is not running or if this method is called
from within the event loop thread
"""
return cast(T_Retval, self.start_task_soon(func, *args).result())
@overload
def start_task_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
*args: Unpack[PosArgsT],
name: object = None,
) -> Future[T_Retval]: ...
@overload
def start_task_soon(
self,
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
name: object = None,
) -> Future[T_Retval]: ...
def start_task_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
*args: Unpack[PosArgsT],
name: object = None,
) -> Future[T_Retval]:
"""
Start a task in the portal's task group.
The task will be run inside a cancel scope which can be cancelled by cancelling
the returned future.
:param func: the target function
:param args: positional arguments passed to ``func``
:param name: name of the task (will be coerced to a string if not ``None``)
:return: a future that resolves with the return value of the callable if the
task completes successfully, or with the exception raised in the task
:raises RuntimeError: if the portal is not running or if this method is called
from within the event loop thread
:rtype: concurrent.futures.Future[T_Retval]
.. versionadded:: 3.0
"""
self._check_running()
f: Future[T_Retval] = Future()
self._spawn_task_from_thread(func, args, {}, name, f)
return f
def start_task(
self,
func: Callable[..., Awaitable[T_Retval]],
*args: object,
name: object = None,
) -> tuple[Future[T_Retval], Any]:
"""
Start a task in the portal's task group and wait until it signals for readiness.
This method works the same way as :meth:`.abc.TaskGroup.start`.
:param func: the target function
:param args: positional arguments passed to ``func``
:param name: name of the task (will be coerced to a string if not ``None``)
:return: a tuple of (future, task_status_value) where the ``task_status_value``
is the value passed to ``task_status.started()`` from within the target
function
:rtype: tuple[concurrent.futures.Future[T_Retval], Any]
.. versionadded:: 3.0
"""
def task_done(future: Future[T_Retval]) -> None:
if not task_status_future.done():
if future.cancelled():
task_status_future.cancel()
elif future.exception():
task_status_future.set_exception(future.exception())
else:
exc = RuntimeError(
"Task exited without calling task_status.started()"
)
task_status_future.set_exception(exc)
self._check_running()
task_status_future: Future = Future()
task_status = _BlockingPortalTaskStatus(task_status_future)
f: Future = Future()
f.add_done_callback(task_done)
self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
return f, task_status_future.result()
def wrap_async_context_manager(
self, cm: AbstractAsyncContextManager[T_co]
) -> AbstractContextManager[T_co]:
"""
Wrap an async context manager as a synchronous context manager via this portal.
Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
in the middle until the synchronous context manager exits.
:param cm: an asynchronous context manager
:return: a synchronous context manager
.. versionadded:: 2.1
"""
return _BlockingAsyncContextManager(cm, self)
@dataclass
class BlockingPortalProvider:
"""
A manager for a blocking portal. Used as a context manager. The first thread to
enter this context manager causes a blocking portal to be started with the specific
parameters, and the last thread to exit causes the portal to be shut down. Thus,
there will be exactly one blocking portal running in this context as long as at
least one thread has entered this context manager.
The parameters are the same as for :func:`~anyio.run`.
:param backend: name of the backend
:param backend_options: backend options
.. versionadded:: 4.4
"""
backend: str = "asyncio"
backend_options: dict[str, Any] | None = None
_lock: Lock = field(init=False, default_factory=Lock)
_leases: int = field(init=False, default=0)
_portal: BlockingPortal = field(init=False)
_portal_cm: AbstractContextManager[BlockingPortal] | None = field(
init=False, default=None
)
def __enter__(self) -> BlockingPortal:
with self._lock:
if self._portal_cm is None:
self._portal_cm = start_blocking_portal(
self.backend, self.backend_options
)
self._portal = self._portal_cm.__enter__()
self._leases += 1
return self._portal
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
portal_cm: AbstractContextManager[BlockingPortal] | None = None
with self._lock:
assert self._portal_cm
assert self._leases > 0
self._leases -= 1
if not self._leases:
portal_cm = self._portal_cm
self._portal_cm = None
del self._portal
if portal_cm:
portal_cm.__exit__(None, None, None)
@contextmanager
def start_blocking_portal(
backend: str = "asyncio",
backend_options: dict[str, Any] | None = None,
*,
name: str | None = None,
) -> Generator[BlockingPortal, Any, None]:
"""
Start a new event loop in a new thread and run a blocking portal in its main task.
The parameters are the same as for :func:`~anyio.run`.
:param backend: name of the backend
:param backend_options: backend options
:param name: name of the thread
:return: a context manager that yields a blocking portal
.. versionchanged:: 3.0
Usage as a context manager is now required.
"""
async def run_portal() -> None:
async with BlockingPortal() as portal_:
if name is None:
current_thread().name = f"{backend}-portal-{id(portal_):x}"
future.set_result(portal_)
await portal_.sleep_until_stopped()
def run_blocking_portal() -> None:
if future.set_running_or_notify_cancel():
try:
run_eventloop(
run_portal, backend=backend, backend_options=backend_options
)
except BaseException as exc:
if not future.done():
future.set_exception(exc)
future: Future[BlockingPortal] = Future()
thread = Thread(target=run_blocking_portal, daemon=True, name=name)
thread.start()
try:
cancel_remaining_tasks = False
portal = future.result()
try:
yield portal
except BaseException:
cancel_remaining_tasks = True
raise
finally:
try:
portal.call(portal.stop, cancel_remaining_tasks)
except RuntimeError:
pass
finally:
thread.join()
def check_cancelled() -> None:
"""
Check if the cancel scope of the host task's running the current worker thread has
been cancelled.
If the host task's current cancel scope has indeed been cancelled, the
backend-specific cancellation exception will be raised.
:raises RuntimeError: if the current thread was not spawned by
:func:`.to_thread.run_sync`
"""
try:
token: EventLoopToken = threadlocals.current_token
except AttributeError:
raise NoEventLoopError(
"This function can only be called inside an AnyIO worker thread"
) from None
token.backend_class.check_cancelled()
anyio-4.11.0/src/anyio/lowlevel.py 0000664 0000000 0000000 00000010465 15064462627 0017047 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import enum
from dataclasses import dataclass
from typing import Any, Generic, Literal, TypeVar, final, overload
from weakref import WeakKeyDictionary
from ._core._eventloop import get_async_backend
from .abc import AsyncBackend
T = TypeVar("T")
D = TypeVar("D")
async def checkpoint() -> None:
"""
Check for cancellation and allow the scheduler to switch to another task.
Equivalent to (but more efficient than)::
await checkpoint_if_cancelled()
await cancel_shielded_checkpoint()
.. versionadded:: 3.0
"""
await get_async_backend().checkpoint()
async def checkpoint_if_cancelled() -> None:
"""
Enter a checkpoint if the enclosing cancel scope has been cancelled.
This does not allow the scheduler to switch to a different task.
.. versionadded:: 3.0
"""
await get_async_backend().checkpoint_if_cancelled()
async def cancel_shielded_checkpoint() -> None:
"""
Allow the scheduler to switch to another task but without checking for cancellation.
Equivalent to (but potentially more efficient than)::
with CancelScope(shield=True):
await checkpoint()
.. versionadded:: 3.0
"""
await get_async_backend().cancel_shielded_checkpoint()
@final
@dataclass(frozen=True, repr=False)
class EventLoopToken:
"""
An opaque object that holds a reference to an event loop.
.. versionadded:: 4.11.0
"""
backend_class: type[AsyncBackend]
native_token: object
def current_token() -> EventLoopToken:
"""
Return a token object that can be used to call code in the current event loop from
another thread.
.. versionadded:: 4.11.0
"""
backend_class = get_async_backend()
raw_token = backend_class.current_token()
return EventLoopToken(backend_class, raw_token)
_run_vars: WeakKeyDictionary[object, dict[RunVar[Any], Any]] = WeakKeyDictionary()
class _NoValueSet(enum.Enum):
NO_VALUE_SET = enum.auto()
class RunvarToken(Generic[T]):
__slots__ = "_var", "_value", "_redeemed"
def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
self._var = var
self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
self._redeemed = False
class RunVar(Generic[T]):
"""
Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
"""
__slots__ = "_name", "_default"
NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
def __init__(
self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
):
self._name = name
self._default = default
@property
def _current_vars(self) -> dict[RunVar[T], T]:
native_token = current_token().native_token
try:
return _run_vars[native_token]
except KeyError:
run_vars = _run_vars[native_token] = {}
return run_vars
@overload
def get(self, default: D) -> T | D: ...
@overload
def get(self) -> T: ...
def get(
self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
) -> T | D:
try:
return self._current_vars[self]
except KeyError:
if default is not RunVar.NO_VALUE_SET:
return default
elif self._default is not RunVar.NO_VALUE_SET:
return self._default
raise LookupError(
f'Run variable "{self._name}" has no value and no default set'
)
def set(self, value: T) -> RunvarToken[T]:
current_vars = self._current_vars
token = RunvarToken(self, current_vars.get(self, RunVar.NO_VALUE_SET))
current_vars[self] = value
return token
def reset(self, token: RunvarToken[T]) -> None:
if token._var is not self:
raise ValueError("This token does not belong to this RunVar")
if token._redeemed:
raise ValueError("This token has already been used")
if token._value is _NoValueSet.NO_VALUE_SET:
try:
del self._current_vars[self]
except KeyError:
pass
else:
self._current_vars[self] = token._value
token._redeemed = True
def __repr__(self) -> str:
return f""
anyio-4.11.0/src/anyio/py.typed 0000664 0000000 0000000 00000000000 15064462627 0016323 0 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/pytest_plugin.py 0000664 0000000 0000000 00000023734 15064462627 0020127 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import socket
import sys
from collections.abc import Callable, Generator, Iterator
from contextlib import ExitStack, contextmanager
from inspect import isasyncgenfunction, iscoroutinefunction, ismethod
from typing import Any, cast
import pytest
import sniffio
from _pytest.fixtures import SubRequest
from _pytest.outcomes import Exit
from ._core._eventloop import get_all_backends, get_async_backend
from ._core._exceptions import iterate_exceptions
from .abc import TestRunner
if sys.version_info < (3, 11):
from exceptiongroup import ExceptionGroup
_current_runner: TestRunner | None = None
_runner_stack: ExitStack | None = None
_runner_leases = 0
def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
if isinstance(backend, str):
return backend, {}
elif isinstance(backend, tuple) and len(backend) == 2:
if isinstance(backend[0], str) and isinstance(backend[1], dict):
return cast(tuple[str, dict[str, Any]], backend)
raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
@contextmanager
def get_runner(
backend_name: str, backend_options: dict[str, Any]
) -> Iterator[TestRunner]:
global _current_runner, _runner_leases, _runner_stack
if _current_runner is None:
asynclib = get_async_backend(backend_name)
_runner_stack = ExitStack()
if sniffio.current_async_library_cvar.get(None) is None:
# Since we're in control of the event loop, we can cache the name of the
# async library
token = sniffio.current_async_library_cvar.set(backend_name)
_runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
backend_options = backend_options or {}
_current_runner = _runner_stack.enter_context(
asynclib.create_test_runner(backend_options)
)
_runner_leases += 1
try:
yield _current_runner
finally:
_runner_leases -= 1
if not _runner_leases:
assert _runner_stack is not None
_runner_stack.close()
_runner_stack = _current_runner = None
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addini(
"anyio_mode",
default="strict",
help='AnyIO plugin mode (either "strict" or "auto")',
type="string",
)
def pytest_configure(config: pytest.Config) -> None:
config.addinivalue_line(
"markers",
"anyio: mark the (coroutine function) test to be run asynchronously via anyio.",
)
if (
config.getini("anyio_mode") == "auto"
and config.pluginmanager.has_plugin("asyncio")
and config.getini("asyncio_mode") == "auto"
):
config.issue_config_time_warning(
pytest.PytestConfigWarning(
"AnyIO auto mode has been enabled together with pytest-asyncio auto "
"mode. This may cause unexpected behavior."
),
1,
)
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(fixturedef: Any, request: Any) -> Generator[Any]:
def wrapper(anyio_backend: Any, request: SubRequest, **kwargs: Any) -> Any:
# Rebind any fixture methods to the request instance
if (
request.instance
and ismethod(func)
and type(func.__self__) is type(request.instance)
):
local_func = func.__func__.__get__(request.instance)
else:
local_func = func
backend_name, backend_options = extract_backend_and_options(anyio_backend)
if has_backend_arg:
kwargs["anyio_backend"] = anyio_backend
if has_request_arg:
kwargs["request"] = request
with get_runner(backend_name, backend_options) as runner:
if isasyncgenfunction(local_func):
yield from runner.run_asyncgen_fixture(local_func, kwargs)
else:
yield runner.run_fixture(local_func, kwargs)
# Only apply this to coroutine functions and async generator functions in requests
# that involve the anyio_backend fixture
func = fixturedef.func
if isasyncgenfunction(func) or iscoroutinefunction(func):
if "anyio_backend" in request.fixturenames:
fixturedef.func = wrapper
original_argname = fixturedef.argnames
if not (has_backend_arg := "anyio_backend" in fixturedef.argnames):
fixturedef.argnames += ("anyio_backend",)
if not (has_request_arg := "request" in fixturedef.argnames):
fixturedef.argnames += ("request",)
try:
return (yield)
finally:
fixturedef.func = func
fixturedef.argnames = original_argname
return (yield)
@pytest.hookimpl(tryfirst=True)
def pytest_pycollect_makeitem(
collector: pytest.Module | pytest.Class, name: str, obj: object
) -> None:
if collector.istestfunction(obj, name):
inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
if iscoroutinefunction(inner_func):
anyio_auto_mode = collector.config.getini("anyio_mode") == "auto"
marker = collector.get_closest_marker("anyio")
own_markers = getattr(obj, "pytestmark", ())
if (
anyio_auto_mode
or marker
or any(marker.name == "anyio" for marker in own_markers)
):
pytest.mark.usefixtures("anyio_backend")(obj)
@pytest.hookimpl(tryfirst=True)
def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
def run_with_hypothesis(**kwargs: Any) -> None:
with get_runner(backend_name, backend_options) as runner:
runner.run_test(original_func, kwargs)
backend = pyfuncitem.funcargs.get("anyio_backend")
if backend:
backend_name, backend_options = extract_backend_and_options(backend)
if hasattr(pyfuncitem.obj, "hypothesis"):
# Wrap the inner test function unless it's already wrapped
original_func = pyfuncitem.obj.hypothesis.inner_test
if original_func.__qualname__ != run_with_hypothesis.__qualname__:
if iscoroutinefunction(original_func):
pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
return None
if iscoroutinefunction(pyfuncitem.obj):
funcargs = pyfuncitem.funcargs
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
with get_runner(backend_name, backend_options) as runner:
try:
runner.run_test(pyfuncitem.obj, testargs)
except ExceptionGroup as excgrp:
for exc in iterate_exceptions(excgrp):
if isinstance(exc, (Exit, KeyboardInterrupt, SystemExit)):
raise exc from excgrp
raise
return True
return None
@pytest.fixture(scope="module", params=get_all_backends())
def anyio_backend(request: Any) -> Any:
return request.param
@pytest.fixture
def anyio_backend_name(anyio_backend: Any) -> str:
if isinstance(anyio_backend, str):
return anyio_backend
else:
return anyio_backend[0]
@pytest.fixture
def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
if isinstance(anyio_backend, str):
return {}
else:
return anyio_backend[1]
class FreePortFactory:
"""
Manages port generation based on specified socket kind, ensuring no duplicate
ports are generated.
This class provides functionality for generating available free ports on the
system. It is initialized with a specific socket kind and can generate ports
for given address families while avoiding reuse of previously generated ports.
Users should not instantiate this class directly, but use the
``free_tcp_port_factory`` and ``free_udp_port_factory`` fixtures instead. For simple
uses cases, ``free_tcp_port`` and ``free_udp_port`` can be used instead.
"""
def __init__(self, kind: socket.SocketKind) -> None:
self._kind = kind
self._generated = set[int]()
@property
def kind(self) -> socket.SocketKind:
"""
The type of socket connection (e.g., :data:`~socket.SOCK_STREAM` or
:data:`~socket.SOCK_DGRAM`) used to bind for checking port availability
"""
return self._kind
def __call__(self, family: socket.AddressFamily | None = None) -> int:
"""
Return an unbound port for the given address family.
:param family: if omitted, both IPv4 and IPv6 addresses will be tried
:return: a port number
"""
if family is not None:
families = [family]
else:
families = [socket.AF_INET]
if socket.has_ipv6:
families.append(socket.AF_INET6)
while True:
port = 0
with ExitStack() as stack:
for family in families:
sock = stack.enter_context(socket.socket(family, self._kind))
addr = "::1" if family == socket.AF_INET6 else "127.0.0.1"
try:
sock.bind((addr, port))
except OSError:
break
if not port:
port = sock.getsockname()[1]
else:
if port not in self._generated:
self._generated.add(port)
return port
@pytest.fixture(scope="session")
def free_tcp_port_factory() -> FreePortFactory:
return FreePortFactory(socket.SOCK_STREAM)
@pytest.fixture(scope="session")
def free_udp_port_factory() -> FreePortFactory:
return FreePortFactory(socket.SOCK_DGRAM)
@pytest.fixture
def free_tcp_port(free_tcp_port_factory: Callable[[], int]) -> int:
return free_tcp_port_factory()
@pytest.fixture
def free_udp_port(free_udp_port_factory: Callable[[], int]) -> int:
return free_udp_port_factory()
anyio-4.11.0/src/anyio/streams/ 0000775 0000000 0000000 00000000000 15064462627 0016314 5 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/streams/__init__.py 0000664 0000000 0000000 00000000000 15064462627 0020413 0 ustar 00root root 0000000 0000000 anyio-4.11.0/src/anyio/streams/buffered.py 0000664 0000000 0000000 00000014022 15064462627 0020447 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from collections.abc import Callable, Iterable, Mapping
from dataclasses import dataclass, field
from typing import Any, SupportsIndex
from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
from ..abc import (
AnyByteReceiveStream,
AnyByteStream,
AnyByteStreamConnectable,
ByteReceiveStream,
ByteStream,
ByteStreamConnectable,
)
if sys.version_info >= (3, 12):
from typing import override
else:
from typing_extensions import override
@dataclass(eq=False)
class BufferedByteReceiveStream(ByteReceiveStream):
"""
Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
receiving capabilities in the form of a byte stream.
"""
receive_stream: AnyByteReceiveStream
_buffer: bytearray = field(init=False, default_factory=bytearray)
_closed: bool = field(init=False, default=False)
async def aclose(self) -> None:
await self.receive_stream.aclose()
self._closed = True
@property
def buffer(self) -> bytes:
"""The bytes currently in the buffer."""
return bytes(self._buffer)
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return self.receive_stream.extra_attributes
def feed_data(self, data: Iterable[SupportsIndex], /) -> None:
"""
Append data directly into the buffer.
Any data in the buffer will be consumed by receive operations before receiving
anything from the wrapped stream.
:param data: the data to append to the buffer (can be bytes or anything else
that supports ``__index__()``)
"""
self._buffer.extend(data)
async def receive(self, max_bytes: int = 65536) -> bytes:
if self._closed:
raise ClosedResourceError
if self._buffer:
chunk = bytes(self._buffer[:max_bytes])
del self._buffer[:max_bytes]
return chunk
elif isinstance(self.receive_stream, ByteReceiveStream):
return await self.receive_stream.receive(max_bytes)
else:
# With a bytes-oriented object stream, we need to handle any surplus bytes
# we get from the receive() call
chunk = await self.receive_stream.receive()
if len(chunk) > max_bytes:
# Save the surplus bytes in the buffer
self._buffer.extend(chunk[max_bytes:])
return chunk[:max_bytes]
else:
return chunk
async def receive_exactly(self, nbytes: int) -> bytes:
"""
Read exactly the given amount of bytes from the stream.
:param nbytes: the number of bytes to read
:return: the bytes read
:raises ~anyio.IncompleteRead: if the stream was closed before the requested
amount of bytes could be read from the stream
"""
while True:
remaining = nbytes - len(self._buffer)
if remaining <= 0:
retval = self._buffer[:nbytes]
del self._buffer[:nbytes]
return bytes(retval)
try:
if isinstance(self.receive_stream, ByteReceiveStream):
chunk = await self.receive_stream.receive(remaining)
else:
chunk = await self.receive_stream.receive()
except EndOfStream as exc:
raise IncompleteRead from exc
self._buffer.extend(chunk)
async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
"""
Read from the stream until the delimiter is found or max_bytes have been read.
:param delimiter: the marker to look for in the stream
:param max_bytes: maximum number of bytes that will be read before raising
:exc:`~anyio.DelimiterNotFound`
:return: the bytes read (not including the delimiter)
:raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
was found
:raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
bytes read up to the maximum allowed
"""
delimiter_size = len(delimiter)
offset = 0
while True:
# Check if the delimiter can be found in the current buffer
index = self._buffer.find(delimiter, offset)
if index >= 0:
found = self._buffer[:index]
del self._buffer[: index + len(delimiter) :]
return bytes(found)
# Check if the buffer is already at or over the limit
if len(self._buffer) >= max_bytes:
raise DelimiterNotFound(max_bytes)
# Read more data into the buffer from the socket
try:
data = await self.receive_stream.receive()
except EndOfStream as exc:
raise IncompleteRead from exc
# Move the offset forward and add the new data to the buffer
offset = max(len(self._buffer) - delimiter_size + 1, 0)
self._buffer.extend(data)
class BufferedByteStream(BufferedByteReceiveStream, ByteStream):
"""
A full-duplex variant of :class:`BufferedByteReceiveStream`. All writes are passed
through to the wrapped stream as-is.
"""
def __init__(self, stream: AnyByteStream):
"""
:param stream: the stream to be wrapped
"""
super().__init__(stream)
self._stream = stream
@override
async def send_eof(self) -> None:
await self._stream.send_eof()
@override
async def send(self, item: bytes) -> None:
await self._stream.send(item)
class BufferedConnectable(ByteStreamConnectable):
def __init__(self, connectable: AnyByteStreamConnectable):
"""
:param connectable: the connectable to wrap
"""
self.connectable = connectable
@override
async def connect(self) -> BufferedByteStream:
stream = await self.connectable.connect()
return BufferedByteStream(stream)
anyio-4.11.0/src/anyio/streams/file.py 0000664 0000000 0000000 00000010437 15064462627 0017612 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from collections.abc import Callable, Mapping
from io import SEEK_SET, UnsupportedOperation
from os import PathLike
from pathlib import Path
from typing import Any, BinaryIO, cast
from .. import (
BrokenResourceError,
ClosedResourceError,
EndOfStream,
TypedAttributeSet,
to_thread,
typed_attribute,
)
from ..abc import ByteReceiveStream, ByteSendStream
class FileStreamAttribute(TypedAttributeSet):
#: the open file descriptor
file: BinaryIO = typed_attribute()
#: the path of the file on the file system, if available (file must be a real file)
path: Path = typed_attribute()
#: the file number, if available (file must be a real file or a TTY)
fileno: int = typed_attribute()
class _BaseFileStream:
def __init__(self, file: BinaryIO):
self._file = file
async def aclose(self) -> None:
await to_thread.run_sync(self._file.close)
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
attributes: dict[Any, Callable[[], Any]] = {
FileStreamAttribute.file: lambda: self._file,
}
if hasattr(self._file, "name"):
attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)
try:
self._file.fileno()
except UnsupportedOperation:
pass
else:
attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()
return attributes
class FileReadStream(_BaseFileStream, ByteReceiveStream):
"""
A byte stream that reads from a file in the file system.
:param file: a file that has been opened for reading in binary mode
.. versionadded:: 3.0
"""
@classmethod
async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:
"""
Create a file read stream by opening the given file.
:param path: path of the file to read from
"""
file = await to_thread.run_sync(Path(path).open, "rb")
return cls(cast(BinaryIO, file))
async def receive(self, max_bytes: int = 65536) -> bytes:
try:
data = await to_thread.run_sync(self._file.read, max_bytes)
except ValueError:
raise ClosedResourceError from None
except OSError as exc:
raise BrokenResourceError from exc
if data:
return data
else:
raise EndOfStream
async def seek(self, position: int, whence: int = SEEK_SET) -> int:
"""
Seek the file to the given position.
.. seealso:: :meth:`io.IOBase.seek`
.. note:: Not all file descriptors are seekable.
:param position: position to seek the file to
:param whence: controls how ``position`` is interpreted
:return: the new absolute position
:raises OSError: if the file is not seekable
"""
return await to_thread.run_sync(self._file.seek, position, whence)
async def tell(self) -> int:
"""
Return the current stream position.
.. note:: Not all file descriptors are seekable.
:return: the current absolute position
:raises OSError: if the file is not seekable
"""
return await to_thread.run_sync(self._file.tell)
class FileWriteStream(_BaseFileStream, ByteSendStream):
"""
A byte stream that writes to a file in the file system.
:param file: a file that has been opened for writing in binary mode
.. versionadded:: 3.0
"""
@classmethod
async def from_path(
cls, path: str | PathLike[str], append: bool = False
) -> FileWriteStream:
"""
Create a file write stream by opening the given file for writing.
:param path: path of the file to write to
:param append: if ``True``, open the file for appending; if ``False``, any
existing file at the given path will be truncated
"""
mode = "ab" if append else "wb"
file = await to_thread.run_sync(Path(path).open, mode)
return cls(cast(BinaryIO, file))
async def send(self, item: bytes) -> None:
try:
await to_thread.run_sync(self._file.write, item)
except ValueError:
raise ClosedResourceError from None
except OSError as exc:
raise BrokenResourceError from exc
anyio-4.11.0/src/anyio/streams/memory.py 0000664 0000000 0000000 00000024574 15064462627 0020212 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import warnings
from collections import OrderedDict, deque
from dataclasses import dataclass, field
from types import TracebackType
from typing import Generic, NamedTuple, TypeVar
from .. import (
BrokenResourceError,
ClosedResourceError,
EndOfStream,
WouldBlock,
)
from .._core._testing import TaskInfo, get_current_task
from ..abc import Event, ObjectReceiveStream, ObjectSendStream
from ..lowlevel import checkpoint
T_Item = TypeVar("T_Item")
T_co = TypeVar("T_co", covariant=True)
T_contra = TypeVar("T_contra", contravariant=True)
class MemoryObjectStreamStatistics(NamedTuple):
current_buffer_used: int #: number of items stored in the buffer
#: maximum number of items that can be stored on this stream (or :data:`math.inf`)
max_buffer_size: float
open_send_streams: int #: number of unclosed clones of the send stream
open_receive_streams: int #: number of unclosed clones of the receive stream
#: number of tasks blocked on :meth:`MemoryObjectSendStream.send`
tasks_waiting_send: int
#: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive`
tasks_waiting_receive: int
@dataclass(eq=False)
class MemoryObjectItemReceiver(Generic[T_Item]):
task_info: TaskInfo = field(init=False, default_factory=get_current_task)
item: T_Item = field(init=False)
def __repr__(self) -> str:
# When item is not defined, we get following error with default __repr__:
# AttributeError: 'MemoryObjectItemReceiver' object has no attribute 'item'
item = getattr(self, "item", None)
return f"{self.__class__.__name__}(task_info={self.task_info}, item={item!r})"
@dataclass(eq=False)
class MemoryObjectStreamState(Generic[T_Item]):
max_buffer_size: float = field()
buffer: deque[T_Item] = field(init=False, default_factory=deque)
open_send_channels: int = field(init=False, default=0)
open_receive_channels: int = field(init=False, default=0)
waiting_receivers: OrderedDict[Event, MemoryObjectItemReceiver[T_Item]] = field(
init=False, default_factory=OrderedDict
)
waiting_senders: OrderedDict[Event, T_Item] = field(
init=False, default_factory=OrderedDict
)
def statistics(self) -> MemoryObjectStreamStatistics:
return MemoryObjectStreamStatistics(
len(self.buffer),
self.max_buffer_size,
self.open_send_channels,
self.open_receive_channels,
len(self.waiting_senders),
len(self.waiting_receivers),
)
@dataclass(eq=False)
class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]):
_state: MemoryObjectStreamState[T_co]
_closed: bool = field(init=False, default=False)
def __post_init__(self) -> None:
self._state.open_receive_channels += 1
def receive_nowait(self) -> T_co:
"""
Receive the next item if it can be done without waiting.
:return: the received item
:raises ~anyio.ClosedResourceError: if this send stream has been closed
:raises ~anyio.EndOfStream: if the buffer is empty and this stream has been
closed from the sending end
:raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks
waiting to send
"""
if self._closed:
raise ClosedResourceError
if self._state.waiting_senders:
# Get the item from the next sender
send_event, item = self._state.waiting_senders.popitem(last=False)
self._state.buffer.append(item)
send_event.set()
if self._state.buffer:
return self._state.buffer.popleft()
elif not self._state.open_send_channels:
raise EndOfStream
raise WouldBlock
async def receive(self) -> T_co:
await checkpoint()
try:
return self.receive_nowait()
except WouldBlock:
# Add ourselves in the queue
receive_event = Event()
receiver = MemoryObjectItemReceiver[T_co]()
self._state.waiting_receivers[receive_event] = receiver
try:
await receive_event.wait()
finally:
self._state.waiting_receivers.pop(receive_event, None)
try:
return receiver.item
except AttributeError:
raise EndOfStream from None
def clone(self) -> MemoryObjectReceiveStream[T_co]:
"""
Create a clone of this receive stream.
Each clone can be closed separately. Only when all clones have been closed will
the receiving end of the memory stream be considered closed by the sending ends.
:return: the cloned stream
"""
if self._closed:
raise ClosedResourceError
return MemoryObjectReceiveStream(_state=self._state)
def close(self) -> None:
"""
Close the stream.
This works the exact same way as :meth:`aclose`, but is provided as a special
case for the benefit of synchronous callbacks.
"""
if not self._closed:
self._closed = True
self._state.open_receive_channels -= 1
if self._state.open_receive_channels == 0:
send_events = list(self._state.waiting_senders.keys())
for event in send_events:
event.set()
async def aclose(self) -> None:
self.close()
def statistics(self) -> MemoryObjectStreamStatistics:
"""
Return statistics about the current state of this stream.
.. versionadded:: 3.0
"""
return self._state.statistics()
def __enter__(self) -> MemoryObjectReceiveStream[T_co]:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def __del__(self) -> None:
if not self._closed:
warnings.warn(
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
ResourceWarning,
stacklevel=1,
source=self,
)
@dataclass(eq=False)
class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]):
_state: MemoryObjectStreamState[T_contra]
_closed: bool = field(init=False, default=False)
def __post_init__(self) -> None:
self._state.open_send_channels += 1
def send_nowait(self, item: T_contra) -> None:
"""
Send an item immediately if it can be done without waiting.
:param item: the item to send
:raises ~anyio.ClosedResourceError: if this send stream has been closed
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
receiving end
:raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting
to receive
"""
if self._closed:
raise ClosedResourceError
if not self._state.open_receive_channels:
raise BrokenResourceError
while self._state.waiting_receivers:
receive_event, receiver = self._state.waiting_receivers.popitem(last=False)
if not receiver.task_info.has_pending_cancellation():
receiver.item = item
receive_event.set()
return
if len(self._state.buffer) < self._state.max_buffer_size:
self._state.buffer.append(item)
else:
raise WouldBlock
async def send(self, item: T_contra) -> None:
"""
Send an item to the stream.
If the buffer is full, this method blocks until there is again room in the
buffer or the item can be sent directly to a receiver.
:param item: the item to send
:raises ~anyio.ClosedResourceError: if this send stream has been closed
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
receiving end
"""
await checkpoint()
try:
self.send_nowait(item)
except WouldBlock:
# Wait until there's someone on the receiving end
send_event = Event()
self._state.waiting_senders[send_event] = item
try:
await send_event.wait()
except BaseException:
self._state.waiting_senders.pop(send_event, None)
raise
if send_event in self._state.waiting_senders:
del self._state.waiting_senders[send_event]
raise BrokenResourceError from None
def clone(self) -> MemoryObjectSendStream[T_contra]:
"""
Create a clone of this send stream.
Each clone can be closed separately. Only when all clones have been closed will
the sending end of the memory stream be considered closed by the receiving ends.
:return: the cloned stream
"""
if self._closed:
raise ClosedResourceError
return MemoryObjectSendStream(_state=self._state)
def close(self) -> None:
"""
Close the stream.
This works the exact same way as :meth:`aclose`, but is provided as a special
case for the benefit of synchronous callbacks.
"""
if not self._closed:
self._closed = True
self._state.open_send_channels -= 1
if self._state.open_send_channels == 0:
receive_events = list(self._state.waiting_receivers.keys())
self._state.waiting_receivers.clear()
for event in receive_events:
event.set()
async def aclose(self) -> None:
self.close()
def statistics(self) -> MemoryObjectStreamStatistics:
"""
Return statistics about the current state of this stream.
.. versionadded:: 3.0
"""
return self._state.statistics()
def __enter__(self) -> MemoryObjectSendStream[T_contra]:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def __del__(self) -> None:
if not self._closed:
warnings.warn(
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
ResourceWarning,
stacklevel=1,
source=self,
)
anyio-4.11.0/src/anyio/streams/stapled.py 0000664 0000000 0000000 00000010316 15064462627 0020323 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from collections.abc import Callable, Mapping, Sequence
from dataclasses import dataclass
from typing import Any, Generic, TypeVar
from ..abc import (
ByteReceiveStream,
ByteSendStream,
ByteStream,
Listener,
ObjectReceiveStream,
ObjectSendStream,
ObjectStream,
TaskGroup,
)
T_Item = TypeVar("T_Item")
T_Stream = TypeVar("T_Stream")
@dataclass(eq=False)
class StapledByteStream(ByteStream):
"""
Combines two byte streams into a single, bidirectional byte stream.
Extra attributes will be provided from both streams, with the receive stream
providing the values in case of a conflict.
:param ByteSendStream send_stream: the sending byte stream
:param ByteReceiveStream receive_stream: the receiving byte stream
"""
send_stream: ByteSendStream
receive_stream: ByteReceiveStream
async def receive(self, max_bytes: int = 65536) -> bytes:
return await self.receive_stream.receive(max_bytes)
async def send(self, item: bytes) -> None:
await self.send_stream.send(item)
async def send_eof(self) -> None:
await self.send_stream.aclose()
async def aclose(self) -> None:
await self.send_stream.aclose()
await self.receive_stream.aclose()
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return {
**self.send_stream.extra_attributes,
**self.receive_stream.extra_attributes,
}
@dataclass(eq=False)
class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
"""
Combines two object streams into a single, bidirectional object stream.
Extra attributes will be provided from both streams, with the receive stream
providing the values in case of a conflict.
:param ObjectSendStream send_stream: the sending object stream
:param ObjectReceiveStream receive_stream: the receiving object stream
"""
send_stream: ObjectSendStream[T_Item]
receive_stream: ObjectReceiveStream[T_Item]
async def receive(self) -> T_Item:
return await self.receive_stream.receive()
async def send(self, item: T_Item) -> None:
await self.send_stream.send(item)
async def send_eof(self) -> None:
await self.send_stream.aclose()
async def aclose(self) -> None:
await self.send_stream.aclose()
await self.receive_stream.aclose()
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return {
**self.send_stream.extra_attributes,
**self.receive_stream.extra_attributes,
}
@dataclass(eq=False)
class MultiListener(Generic[T_Stream], Listener[T_Stream]):
"""
Combines multiple listeners into one, serving connections from all of them at once.
Any MultiListeners in the given collection of listeners will have their listeners
moved into this one.
Extra attributes are provided from each listener, with each successive listener
overriding any conflicting attributes from the previous one.
:param listeners: listeners to serve
:type listeners: Sequence[Listener[T_Stream]]
"""
listeners: Sequence[Listener[T_Stream]]
def __post_init__(self) -> None:
listeners: list[Listener[T_Stream]] = []
for listener in self.listeners:
if isinstance(listener, MultiListener):
listeners.extend(listener.listeners)
del listener.listeners[:] # type: ignore[attr-defined]
else:
listeners.append(listener)
self.listeners = listeners
async def serve(
self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
) -> None:
from .. import create_task_group
async with create_task_group() as tg:
for listener in self.listeners:
tg.start_soon(listener.serve, handler, task_group)
async def aclose(self) -> None:
for listener in self.listeners:
await listener.aclose()
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
attributes: dict = {}
for listener in self.listeners:
attributes.update(listener.extra_attributes)
return attributes
anyio-4.11.0/src/anyio/streams/text.py 0000664 0000000 0000000 00000013036 15064462627 0017655 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import codecs
import sys
from collections.abc import Callable, Mapping
from dataclasses import InitVar, dataclass, field
from typing import Any
from ..abc import (
AnyByteReceiveStream,
AnyByteSendStream,
AnyByteStream,
AnyByteStreamConnectable,
ObjectReceiveStream,
ObjectSendStream,
ObjectStream,
ObjectStreamConnectable,
)
if sys.version_info >= (3, 12):
from typing import override
else:
from typing_extensions import override
@dataclass(eq=False)
class TextReceiveStream(ObjectReceiveStream[str]):
"""
Stream wrapper that decodes bytes to strings using the given encoding.
Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any
completely received unicode characters as soon as they come in.
:param transport_stream: any bytes-based receive stream
:param encoding: character encoding to use for decoding bytes to strings (defaults
to ``utf-8``)
:param errors: handling scheme for decoding errors (defaults to ``strict``; see the
`codecs module documentation`_ for a comprehensive list of options)
.. _codecs module documentation:
https://docs.python.org/3/library/codecs.html#codec-objects
"""
transport_stream: AnyByteReceiveStream
encoding: InitVar[str] = "utf-8"
errors: InitVar[str] = "strict"
_decoder: codecs.IncrementalDecoder = field(init=False)
def __post_init__(self, encoding: str, errors: str) -> None:
decoder_class = codecs.getincrementaldecoder(encoding)
self._decoder = decoder_class(errors=errors)
async def receive(self) -> str:
while True:
chunk = await self.transport_stream.receive()
decoded = self._decoder.decode(chunk)
if decoded:
return decoded
async def aclose(self) -> None:
await self.transport_stream.aclose()
self._decoder.reset()
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return self.transport_stream.extra_attributes
@dataclass(eq=False)
class TextSendStream(ObjectSendStream[str]):
"""
Sends strings to the wrapped stream as bytes using the given encoding.
:param AnyByteSendStream transport_stream: any bytes-based send stream
:param str encoding: character encoding to use for encoding strings to bytes
(defaults to ``utf-8``)
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
the `codecs module documentation`_ for a comprehensive list of options)
.. _codecs module documentation:
https://docs.python.org/3/library/codecs.html#codec-objects
"""
transport_stream: AnyByteSendStream
encoding: InitVar[str] = "utf-8"
errors: str = "strict"
_encoder: Callable[..., tuple[bytes, int]] = field(init=False)
def __post_init__(self, encoding: str) -> None:
self._encoder = codecs.getencoder(encoding)
async def send(self, item: str) -> None:
encoded = self._encoder(item, self.errors)[0]
await self.transport_stream.send(encoded)
async def aclose(self) -> None:
await self.transport_stream.aclose()
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return self.transport_stream.extra_attributes
@dataclass(eq=False)
class TextStream(ObjectStream[str]):
"""
A bidirectional stream that decodes bytes to strings on receive and encodes strings
to bytes on send.
Extra attributes will be provided from both streams, with the receive stream
providing the values in case of a conflict.
:param AnyByteStream transport_stream: any bytes-based stream
:param str encoding: character encoding to use for encoding/decoding strings to/from
bytes (defaults to ``utf-8``)
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
the `codecs module documentation`_ for a comprehensive list of options)
.. _codecs module documentation:
https://docs.python.org/3/library/codecs.html#codec-objects
"""
transport_stream: AnyByteStream
encoding: InitVar[str] = "utf-8"
errors: InitVar[str] = "strict"
_receive_stream: TextReceiveStream = field(init=False)
_send_stream: TextSendStream = field(init=False)
def __post_init__(self, encoding: str, errors: str) -> None:
self._receive_stream = TextReceiveStream(
self.transport_stream, encoding=encoding, errors=errors
)
self._send_stream = TextSendStream(
self.transport_stream, encoding=encoding, errors=errors
)
async def receive(self) -> str:
return await self._receive_stream.receive()
async def send(self, item: str) -> None:
await self._send_stream.send(item)
async def send_eof(self) -> None:
await self.transport_stream.send_eof()
async def aclose(self) -> None:
await self._send_stream.aclose()
await self._receive_stream.aclose()
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return {
**self._send_stream.extra_attributes,
**self._receive_stream.extra_attributes,
}
class TextConnectable(ObjectStreamConnectable[str]):
def __init__(self, connectable: AnyByteStreamConnectable):
"""
:param connectable: the bytestream endpoint to wrap
"""
self.connectable = connectable
@override
async def connect(self) -> TextStream:
stream = await self.connectable.connect()
return TextStream(stream)
anyio-4.11.0/src/anyio/streams/tls.py 0000664 0000000 0000000 00000035653 15064462627 0017504 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import logging
import re
import ssl
import sys
from collections.abc import Callable, Mapping
from dataclasses import dataclass
from functools import wraps
from ssl import SSLContext
from typing import Any, TypeVar
from .. import (
BrokenResourceError,
EndOfStream,
aclose_forcefully,
get_cancelled_exc_class,
to_thread,
)
from .._core._typedattr import TypedAttributeSet, typed_attribute
from ..abc import (
AnyByteStream,
AnyByteStreamConnectable,
ByteStream,
ByteStreamConnectable,
Listener,
TaskGroup,
)
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
if sys.version_info >= (3, 12):
from typing import override
else:
from typing_extensions import override
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
_PCTRTT: TypeAlias = tuple[tuple[str, str], ...]
_PCTRTTT: TypeAlias = tuple[_PCTRTT, ...]
class TLSAttribute(TypedAttributeSet):
"""Contains Transport Layer Security related attributes."""
#: the selected ALPN protocol
alpn_protocol: str | None = typed_attribute()
#: the channel binding for type ``tls-unique``
channel_binding_tls_unique: bytes = typed_attribute()
#: the selected cipher
cipher: tuple[str, str, int] = typed_attribute()
#: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert`
# for more information)
peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute()
#: the peer certificate in binary form
peer_certificate_binary: bytes | None = typed_attribute()
#: ``True`` if this is the server side of the connection
server_side: bool = typed_attribute()
#: ciphers shared by the client during the TLS handshake (``None`` if this is the
#: client side)
shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute()
#: the :class:`~ssl.SSLObject` used for encryption
ssl_object: ssl.SSLObject = typed_attribute()
#: ``True`` if this stream does (and expects) a closing TLS handshake when the
#: stream is being closed
standard_compatible: bool = typed_attribute()
#: the TLS protocol version (e.g. ``TLSv1.2``)
tls_version: str = typed_attribute()
@dataclass(eq=False)
class TLSStream(ByteStream):
"""
A stream wrapper that encrypts all sent data and decrypts received data.
This class has no public initializer; use :meth:`wrap` instead.
All extra attributes from :class:`~TLSAttribute` are supported.
:var AnyByteStream transport_stream: the wrapped stream
"""
transport_stream: AnyByteStream
standard_compatible: bool
_ssl_object: ssl.SSLObject
_read_bio: ssl.MemoryBIO
_write_bio: ssl.MemoryBIO
@classmethod
async def wrap(
cls,
transport_stream: AnyByteStream,
*,
server_side: bool | None = None,
hostname: str | None = None,
ssl_context: ssl.SSLContext | None = None,
standard_compatible: bool = True,
) -> TLSStream:
"""
Wrap an existing stream with Transport Layer Security.
This performs a TLS handshake with the peer.
:param transport_stream: a bytes-transporting stream to wrap
:param server_side: ``True`` if this is the server side of the connection,
``False`` if this is the client side (if omitted, will be set to ``False``
if ``hostname`` has been provided, ``False`` otherwise). Used only to create
a default context when an explicit context has not been provided.
:param hostname: host name of the peer (if host name checking is desired)
:param ssl_context: the SSLContext object to use (if not provided, a secure
default will be created)
:param standard_compatible: if ``False``, skip the closing handshake when
closing the connection, and don't raise an exception if the peer does the
same
:raises ~ssl.SSLError: if the TLS handshake fails
"""
if server_side is None:
server_side = not hostname
if not ssl_context:
purpose = (
ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH
)
ssl_context = ssl.create_default_context(purpose)
# Re-enable detection of unexpected EOFs if it was disabled by Python
if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
bio_in = ssl.MemoryBIO()
bio_out = ssl.MemoryBIO()
# External SSLContext implementations may do blocking I/O in wrap_bio(),
# but the standard library implementation won't
if type(ssl_context) is ssl.SSLContext:
ssl_object = ssl_context.wrap_bio(
bio_in, bio_out, server_side=server_side, server_hostname=hostname
)
else:
ssl_object = await to_thread.run_sync(
ssl_context.wrap_bio,
bio_in,
bio_out,
server_side,
hostname,
None,
)
wrapper = cls(
transport_stream=transport_stream,
standard_compatible=standard_compatible,
_ssl_object=ssl_object,
_read_bio=bio_in,
_write_bio=bio_out,
)
await wrapper._call_sslobject_method(ssl_object.do_handshake)
return wrapper
async def _call_sslobject_method(
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
) -> T_Retval:
while True:
try:
result = func(*args)
except ssl.SSLWantReadError:
try:
# Flush any pending writes first
if self._write_bio.pending:
await self.transport_stream.send(self._write_bio.read())
data = await self.transport_stream.receive()
except EndOfStream:
self._read_bio.write_eof()
except OSError as exc:
self._read_bio.write_eof()
self._write_bio.write_eof()
raise BrokenResourceError from exc
else:
self._read_bio.write(data)
except ssl.SSLWantWriteError:
await self.transport_stream.send(self._write_bio.read())
except ssl.SSLSyscallError as exc:
self._read_bio.write_eof()
self._write_bio.write_eof()
raise BrokenResourceError from exc
except ssl.SSLError as exc:
self._read_bio.write_eof()
self._write_bio.write_eof()
if isinstance(exc, ssl.SSLEOFError) or (
exc.strerror and "UNEXPECTED_EOF_WHILE_READING" in exc.strerror
):
if self.standard_compatible:
raise BrokenResourceError from exc
else:
raise EndOfStream from None
raise
else:
# Flush any pending writes first
if self._write_bio.pending:
await self.transport_stream.send(self._write_bio.read())
return result
async def unwrap(self) -> tuple[AnyByteStream, bytes]:
"""
Does the TLS closing handshake.
:return: a tuple of (wrapped byte stream, bytes left in the read buffer)
"""
await self._call_sslobject_method(self._ssl_object.unwrap)
self._read_bio.write_eof()
self._write_bio.write_eof()
return self.transport_stream, self._read_bio.read()
async def aclose(self) -> None:
if self.standard_compatible:
try:
await self.unwrap()
except BaseException:
await aclose_forcefully(self.transport_stream)
raise
await self.transport_stream.aclose()
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._call_sslobject_method(self._ssl_object.read, max_bytes)
if not data:
raise EndOfStream
return data
async def send(self, item: bytes) -> None:
await self._call_sslobject_method(self._ssl_object.write, item)
async def send_eof(self) -> None:
tls_version = self.extra(TLSAttribute.tls_version)
match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version)
if match:
major, minor = int(match.group(1)), int(match.group(2) or 0)
if (major, minor) < (1, 3):
raise NotImplementedError(
f"send_eof() requires at least TLSv1.3; current "
f"session uses {tls_version}"
)
raise NotImplementedError(
"send_eof() has not yet been implemented for TLS streams"
)
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return {
**self.transport_stream.extra_attributes,
TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol,
TLSAttribute.channel_binding_tls_unique: (
self._ssl_object.get_channel_binding
),
TLSAttribute.cipher: self._ssl_object.cipher,
TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False),
TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(
True
),
TLSAttribute.server_side: lambda: self._ssl_object.server_side,
TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers()
if self._ssl_object.server_side
else None,
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
TLSAttribute.ssl_object: lambda: self._ssl_object,
TLSAttribute.tls_version: self._ssl_object.version,
}
@dataclass(eq=False)
class TLSListener(Listener[TLSStream]):
"""
A convenience listener that wraps another listener and auto-negotiates a TLS session
on every accepted connection.
If the TLS handshake times out or raises an exception,
:meth:`handle_handshake_error` is called to do whatever post-mortem processing is
deemed necessary.
Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute.
:param Listener listener: the listener to wrap
:param ssl_context: the SSL context object
:param standard_compatible: a flag passed through to :meth:`TLSStream.wrap`
:param handshake_timeout: time limit for the TLS handshake
(passed to :func:`~anyio.fail_after`)
"""
listener: Listener[Any]
ssl_context: ssl.SSLContext
standard_compatible: bool = True
handshake_timeout: float = 30
@staticmethod
async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None:
"""
Handle an exception raised during the TLS handshake.
This method does 3 things:
#. Forcefully closes the original stream
#. Logs the exception (unless it was a cancellation exception) using the
``anyio.streams.tls`` logger
#. Reraises the exception if it was a base exception or a cancellation exception
:param exc: the exception
:param stream: the original stream
"""
await aclose_forcefully(stream)
# Log all except cancellation exceptions
if not isinstance(exc, get_cancelled_exc_class()):
# CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using
# any asyncio implementation, so we explicitly pass the exception to log
# (https://github.com/python/cpython/issues/108668). Trio does not have this
# issue because it works around the CPython bug.
logging.getLogger(__name__).exception(
"Error during TLS handshake", exc_info=exc
)
# Only reraise base exceptions and cancellation exceptions
if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()):
raise
async def serve(
self,
handler: Callable[[TLSStream], Any],
task_group: TaskGroup | None = None,
) -> None:
@wraps(handler)
async def handler_wrapper(stream: AnyByteStream) -> None:
from .. import fail_after
try:
with fail_after(self.handshake_timeout):
wrapped_stream = await TLSStream.wrap(
stream,
ssl_context=self.ssl_context,
standard_compatible=self.standard_compatible,
)
except BaseException as exc:
await self.handle_handshake_error(exc, stream)
else:
await handler(wrapped_stream)
await self.listener.serve(handler_wrapper, task_group)
async def aclose(self) -> None:
await self.listener.aclose()
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return {
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
}
class TLSConnectable(ByteStreamConnectable):
"""
Wraps another connectable and does TLS negotiation after a successful connection.
:param connectable: the connectable to wrap
:param hostname: host name of the server (if host name checking is desired)
:param ssl_context: the SSLContext object to use (if not provided, a secure default
will be created)
:param standard_compatible: if ``False``, skip the closing handshake when closing
the connection, and don't raise an exception if the server does the same
"""
def __init__(
self,
connectable: AnyByteStreamConnectable,
*,
hostname: str | None = None,
ssl_context: ssl.SSLContext | None = None,
standard_compatible: bool = True,
) -> None:
self.connectable = connectable
self.ssl_context: SSLContext = ssl_context or ssl.create_default_context(
ssl.Purpose.SERVER_AUTH
)
if not isinstance(self.ssl_context, ssl.SSLContext):
raise TypeError(
"ssl_context must be an instance of ssl.SSLContext, not "
f"{type(self.ssl_context).__name__}"
)
self.hostname = hostname
self.standard_compatible = standard_compatible
@override
async def connect(self) -> TLSStream:
stream = await self.connectable.connect()
try:
return await TLSStream.wrap(
stream,
hostname=self.hostname,
ssl_context=self.ssl_context,
standard_compatible=self.standard_compatible,
)
except BaseException:
await aclose_forcefully(stream)
raise
anyio-4.11.0/src/anyio/to_interpreter.py 0000664 0000000 0000000 00000015512 15064462627 0020261 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import atexit
import os
import sys
from collections import deque
from collections.abc import Callable
from typing import Any, Final, TypeVar
from . import current_time, to_thread
from ._core._exceptions import BrokenWorkerInterpreter
from ._core._synchronization import CapacityLimiter
from .lowlevel import RunVar
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
if sys.version_info >= (3, 14):
from concurrent.interpreters import ExecutionFailed, create
def _interp_call(func: Callable[..., Any], args: tuple[Any, ...]):
try:
retval = func(*args)
except BaseException as exc:
return exc, True
else:
return retval, False
class Worker:
last_used: float = 0
def __init__(self) -> None:
self._interpreter = create()
def destroy(self) -> None:
self._interpreter.close()
def call(
self,
func: Callable[..., T_Retval],
args: tuple[Any, ...],
) -> T_Retval:
try:
res, is_exception = self._interpreter.call(_interp_call, func, args)
except ExecutionFailed as exc:
raise BrokenWorkerInterpreter(exc.excinfo) from exc
if is_exception:
raise res
return res
elif sys.version_info >= (3, 13):
import _interpqueues
import _interpreters
UNBOUND: Final = 2 # I have no clue how this works, but it was used in the stdlib
FMT_UNPICKLED: Final = 0
FMT_PICKLED: Final = 1
QUEUE_PICKLE_ARGS: Final = (FMT_PICKLED, UNBOUND)
QUEUE_UNPICKLE_ARGS: Final = (FMT_UNPICKLED, UNBOUND)
_run_func = compile(
"""
import _interpqueues
from _interpreters import NotShareableError
from pickle import loads, dumps, HIGHEST_PROTOCOL
QUEUE_PICKLE_ARGS = (1, 2)
QUEUE_UNPICKLE_ARGS = (0, 2)
item = _interpqueues.get(queue_id)[0]
try:
func, args = loads(item)
retval = func(*args)
except BaseException as exc:
is_exception = True
retval = exc
else:
is_exception = False
try:
_interpqueues.put(queue_id, (retval, is_exception), *QUEUE_UNPICKLE_ARGS)
except NotShareableError:
retval = dumps(retval, HIGHEST_PROTOCOL)
_interpqueues.put(queue_id, (retval, is_exception), *QUEUE_PICKLE_ARGS)
""",
"",
"exec",
)
class Worker:
last_used: float = 0
def __init__(self) -> None:
self._interpreter_id = _interpreters.create()
self._queue_id = _interpqueues.create(1, *QUEUE_UNPICKLE_ARGS)
_interpreters.set___main___attrs(
self._interpreter_id, {"queue_id": self._queue_id}
)
def destroy(self) -> None:
_interpqueues.destroy(self._queue_id)
_interpreters.destroy(self._interpreter_id)
def call(
self,
func: Callable[..., T_Retval],
args: tuple[Any, ...],
) -> T_Retval:
import pickle
item = pickle.dumps((func, args), pickle.HIGHEST_PROTOCOL)
_interpqueues.put(self._queue_id, item, *QUEUE_PICKLE_ARGS)
exc_info = _interpreters.exec(self._interpreter_id, _run_func)
if exc_info:
raise BrokenWorkerInterpreter(exc_info)
res = _interpqueues.get(self._queue_id)
(res, is_exception), fmt = res[:2]
if fmt == FMT_PICKLED:
res = pickle.loads(res)
if is_exception:
raise res
return res
else:
class Worker:
last_used: float = 0
def __init__(self) -> None:
raise RuntimeError("subinterpreters require at least Python 3.13")
def call(
self,
func: Callable[..., T_Retval],
args: tuple[Any, ...],
) -> T_Retval:
raise NotImplementedError
def destroy(self) -> None:
pass
DEFAULT_CPU_COUNT: Final = 8 # this is just an arbitrarily selected value
MAX_WORKER_IDLE_TIME = (
30 # seconds a subinterpreter can be idle before becoming eligible for pruning
)
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
_idle_workers = RunVar[deque[Worker]]("_available_workers")
_default_interpreter_limiter = RunVar[CapacityLimiter]("_default_interpreter_limiter")
def _stop_workers(workers: deque[Worker]) -> None:
for worker in workers:
worker.destroy()
workers.clear()
async def run_sync(
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
limiter: CapacityLimiter | None = None,
) -> T_Retval:
"""
Call the given function with the given arguments in a subinterpreter.
.. warning:: On Python 3.13, the :mod:`concurrent.interpreters` module was not yet
available, so the code path for that Python version relies on an undocumented,
private API. As such, it is recommended to not rely on this function for anything
mission-critical on Python 3.13.
:param func: a callable
:param args: the positional arguments for the callable
:param limiter: capacity limiter to use to limit the total number of subinterpreters
running (if omitted, the default limiter is used)
:return: the result of the call
:raises BrokenWorkerInterpreter: if there's an internal error in a subinterpreter
"""
if limiter is None:
limiter = current_default_interpreter_limiter()
try:
idle_workers = _idle_workers.get()
except LookupError:
idle_workers = deque()
_idle_workers.set(idle_workers)
atexit.register(_stop_workers, idle_workers)
async with limiter:
try:
worker = idle_workers.pop()
except IndexError:
worker = Worker()
try:
return await to_thread.run_sync(
worker.call,
func,
args,
limiter=limiter,
)
finally:
# Prune workers that have been idle for too long
now = current_time()
while idle_workers:
if now - idle_workers[0].last_used <= MAX_WORKER_IDLE_TIME:
break
await to_thread.run_sync(idle_workers.popleft().destroy, limiter=limiter)
worker.last_used = current_time()
idle_workers.append(worker)
def current_default_interpreter_limiter() -> CapacityLimiter:
"""
Return the capacity limiter used by default to limit the number of concurrently
running subinterpreters.
Defaults to the number of CPU cores.
:return: a capacity limiter object
"""
try:
return _default_interpreter_limiter.get()
except LookupError:
limiter = CapacityLimiter(os.cpu_count() or DEFAULT_CPU_COUNT)
_default_interpreter_limiter.set(limiter)
return limiter
anyio-4.11.0/src/anyio/to_process.py 0000664 0000000 0000000 00000022573 15064462627 0017401 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import pickle
import subprocess
import sys
from collections import deque
from collections.abc import Callable
from importlib.util import module_from_spec, spec_from_file_location
from typing import TypeVar, cast
from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class
from ._core._exceptions import BrokenWorkerProcess
from ._core._subprocesses import open_process
from ._core._synchronization import CapacityLimiter
from ._core._tasks import CancelScope, fail_after
from .abc import ByteReceiveStream, ByteSendStream, Process
from .lowlevel import RunVar, checkpoint_if_cancelled
from .streams.buffered import BufferedByteReceiveStream
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
WORKER_MAX_IDLE_TIME = 300 # 5 minutes
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
"_process_pool_idle_workers"
)
_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
async def run_sync( # type: ignore[return]
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
cancellable: bool = False,
limiter: CapacityLimiter | None = None,
) -> T_Retval:
"""
Call the given function with the given arguments in a worker process.
If the ``cancellable`` option is enabled and the task waiting for its completion is
cancelled, the worker process running it will be abruptly terminated using SIGKILL
(or ``terminateProcess()`` on Windows).
:param func: a callable
:param args: positional arguments for the callable
:param cancellable: ``True`` to allow cancellation of the operation while it's
running
:param limiter: capacity limiter to use to limit the total amount of processes
running (if omitted, the default limiter is used)
:return: an awaitable that yields the return value of the function.
"""
async def send_raw_command(pickled_cmd: bytes) -> object:
try:
await stdin.send(pickled_cmd)
response = await buffered.receive_until(b"\n", 50)
status, length = response.split(b" ")
if status not in (b"RETURN", b"EXCEPTION"):
raise RuntimeError(
f"Worker process returned unexpected response: {response!r}"
)
pickled_response = await buffered.receive_exactly(int(length))
except BaseException as exc:
workers.discard(process)
try:
process.kill()
with CancelScope(shield=True):
await process.aclose()
except ProcessLookupError:
pass
if isinstance(exc, get_cancelled_exc_class()):
raise
else:
raise BrokenWorkerProcess from exc
retval = pickle.loads(pickled_response)
if status == b"EXCEPTION":
assert isinstance(retval, BaseException)
raise retval
else:
return retval
# First pickle the request before trying to reserve a worker process
await checkpoint_if_cancelled()
request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
# If this is the first run in this event loop thread, set up the necessary variables
try:
workers = _process_pool_workers.get()
idle_workers = _process_pool_idle_workers.get()
except LookupError:
workers = set()
idle_workers = deque()
_process_pool_workers.set(workers)
_process_pool_idle_workers.set(idle_workers)
get_async_backend().setup_process_pool_exit_at_shutdown(workers)
async with limiter or current_default_process_limiter():
# Pop processes from the pool (starting from the most recently used) until we
# find one that hasn't exited yet
process: Process
while idle_workers:
process, idle_since = idle_workers.pop()
if process.returncode is None:
stdin = cast(ByteSendStream, process.stdin)
buffered = BufferedByteReceiveStream(
cast(ByteReceiveStream, process.stdout)
)
# Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME
# seconds or longer
now = current_time()
killed_processes: list[Process] = []
while idle_workers:
if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
break
process_to_kill, idle_since = idle_workers.popleft()
process_to_kill.kill()
workers.remove(process_to_kill)
killed_processes.append(process_to_kill)
with CancelScope(shield=True):
for killed_process in killed_processes:
await killed_process.aclose()
break
workers.remove(process)
else:
command = [sys.executable, "-u", "-m", __name__]
process = await open_process(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
try:
stdin = cast(ByteSendStream, process.stdin)
buffered = BufferedByteReceiveStream(
cast(ByteReceiveStream, process.stdout)
)
with fail_after(20):
message = await buffered.receive(6)
if message != b"READY\n":
raise BrokenWorkerProcess(
f"Worker process returned unexpected response: {message!r}"
)
main_module_path = getattr(sys.modules["__main__"], "__file__", None)
pickled = pickle.dumps(
("init", sys.path, main_module_path),
protocol=pickle.HIGHEST_PROTOCOL,
)
await send_raw_command(pickled)
except (BrokenWorkerProcess, get_cancelled_exc_class()):
raise
except BaseException as exc:
process.kill()
raise BrokenWorkerProcess(
"Error during worker process initialization"
) from exc
workers.add(process)
with CancelScope(shield=not cancellable):
try:
return cast(T_Retval, await send_raw_command(request))
finally:
if process in workers:
idle_workers.append((process, current_time()))
def current_default_process_limiter() -> CapacityLimiter:
"""
Return the capacity limiter that is used by default to limit the number of worker
processes.
:return: a capacity limiter object
"""
try:
return _default_process_limiter.get()
except LookupError:
limiter = CapacityLimiter(os.cpu_count() or 2)
_default_process_limiter.set(limiter)
return limiter
def process_worker() -> None:
# Redirect standard streams to os.devnull so that user code won't interfere with the
# parent-worker communication
stdin = sys.stdin
stdout = sys.stdout
sys.stdin = open(os.devnull)
sys.stdout = open(os.devnull, "w")
stdout.buffer.write(b"READY\n")
while True:
retval = exception = None
try:
command, *args = pickle.load(stdin.buffer)
except EOFError:
return
except BaseException as exc:
exception = exc
else:
if command == "run":
func, args = args
try:
retval = func(*args)
except BaseException as exc:
exception = exc
elif command == "init":
main_module_path: str | None
sys.path, main_module_path = args
del sys.modules["__main__"]
if main_module_path and os.path.isfile(main_module_path):
# Load the parent's main module but as __mp_main__ instead of
# __main__ (like multiprocessing does) to avoid infinite recursion
try:
spec = spec_from_file_location("__mp_main__", main_module_path)
if spec and spec.loader:
main = module_from_spec(spec)
spec.loader.exec_module(main)
sys.modules["__main__"] = main
except BaseException as exc:
exception = exc
try:
if exception is not None:
status = b"EXCEPTION"
pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
else:
status = b"RETURN"
pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
except BaseException as exc:
exception = exc
status = b"EXCEPTION"
pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
stdout.buffer.write(pickled)
# Respect SIGTERM
if isinstance(exception, SystemExit):
raise exception
if __name__ == "__main__":
process_worker()
anyio-4.11.0/src/anyio/to_thread.py 0000664 0000000 0000000 00000004534 15064462627 0017167 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from collections.abc import Callable
from typing import TypeVar
from warnings import warn
from ._core._eventloop import get_async_backend
from .abc import CapacityLimiter
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
async def run_sync(
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
abandon_on_cancel: bool = False,
cancellable: bool | None = None,
limiter: CapacityLimiter | None = None,
) -> T_Retval:
"""
Call the given function with the given arguments in a worker thread.
If the ``cancellable`` option is enabled and the task waiting for its completion is
cancelled, the thread will still run its course but its return value (or any raised
exception) will be ignored.
:param func: a callable
:param args: positional arguments for the callable
:param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run
unchecked on own) if the host task is cancelled, ``False`` to ignore
cancellations in the host task until the operation has completed in the worker
thread
:param cancellable: deprecated alias of ``abandon_on_cancel``; will override
``abandon_on_cancel`` if both parameters are passed
:param limiter: capacity limiter to use to limit the total amount of threads running
(if omitted, the default limiter is used)
:return: an awaitable that yields the return value of the function.
"""
if cancellable is not None:
abandon_on_cancel = cancellable
warn(
"The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "
"deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",
DeprecationWarning,
stacklevel=2,
)
return await get_async_backend().run_sync_in_worker_thread(
func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter
)
def current_default_thread_limiter() -> CapacityLimiter:
"""
Return the capacity limiter that is used by default to limit the number of
concurrent threads.
:return: a capacity limiter object
"""
return get_async_backend().current_default_thread_limiter()
anyio-4.11.0/tests/ 0000775 0000000 0000000 00000000000 15064462627 0014072 5 ustar 00root root 0000000 0000000 anyio-4.11.0/tests/__init__.py 0000664 0000000 0000000 00000000000 15064462627 0016171 0 ustar 00root root 0000000 0000000 anyio-4.11.0/tests/conftest.py 0000664 0000000 0000000 00000007470 15064462627 0016301 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import ssl
import sys
from collections.abc import Generator, Iterator
from ssl import SSLContext
from typing import TYPE_CHECKING, Any
from unittest.mock import Mock
import pytest
import trustme
from _pytest.fixtures import SubRequest
from trustme import CA
if TYPE_CHECKING:
from blockbuster import BlockBuster
uvloop_marks = []
try:
import uvloop
except ImportError:
uvloop_marks.append(pytest.mark.skip(reason="uvloop not available"))
uvloop = Mock()
else:
if hasattr(asyncio.AbstractEventLoop, "shutdown_default_executor") and not hasattr(
uvloop.loop.Loop, "shutdown_default_executor"
):
uvloop_marks.append(
pytest.mark.skip(reason="uvloop is missing shutdown_default_executor()")
)
pytest_plugins = ["pytester"]
asyncio_params = [
pytest.param(("asyncio", {"debug": True}), id="asyncio"),
pytest.param(
("asyncio", {"debug": True, "loop_factory": uvloop.new_event_loop}),
marks=uvloop_marks,
id="asyncio+uvloop",
),
]
if sys.version_info >= (3, 12):
def eager_task_loop_factory() -> asyncio.AbstractEventLoop:
loop = asyncio.new_event_loop()
loop.set_task_factory(asyncio.eager_task_factory)
return loop
asyncio_params.append(
pytest.param(
("asyncio", {"debug": True, "loop_factory": eager_task_loop_factory}),
id="asyncio+eager",
),
)
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[BlockBuster | None]:
try:
from blockbuster import blockbuster_ctx
except ImportError:
yield None
return
with blockbuster_ctx(
"anyio", excluded_modules=["anyio.pytest_plugin", "anyio._backends._asyncio"]
) as bb:
bb.functions["socket.socket.accept"].can_block_in(
"anyio/_core/_asyncio_selector_thread.py", {"get_selector"}
)
for func in ["os.stat", "os.unlink"]:
bb.functions[func].can_block_in(
"anyio/_core/_sockets.py", "setup_unix_local_socket"
)
yield bb
@pytest.fixture
def deactivate_blockbuster(blockbuster: BlockBuster | None) -> None:
if blockbuster is not None:
blockbuster.deactivate()
@pytest.fixture(params=[*asyncio_params, pytest.param("trio")])
def anyio_backend(request: SubRequest) -> tuple[str, dict[str, Any]]:
return request.param
@pytest.fixture(scope="session")
def ca() -> CA:
return trustme.CA()
@pytest.fixture(scope="session")
def server_context(ca: CA) -> SSLContext:
server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
server_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
ca.issue_cert("localhost").configure_cert(server_context)
return server_context
@pytest.fixture(scope="session")
def client_context(ca: CA) -> SSLContext:
client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
client_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
ca.configure_trust(client_context)
return client_context
@pytest.fixture
def asyncio_event_loop() -> Generator[asyncio.AbstractEventLoop, None, None]:
if sys.version_info >= (3, 13):
loop = asyncio.EventLoop()
else:
loop = asyncio.new_event_loop()
if sys.version_info < (3, 10):
asyncio.set_event_loop(loop)
yield loop
if sys.version_info < (3, 10):
asyncio.set_event_loop(None)
loop.close()
if sys.version_info >= (3, 14):
def no_other_refs() -> list[object]:
return [sys._getframe(1).f_generator]
elif sys.version_info >= (3, 11):
def no_other_refs() -> list[object]:
return []
else:
def no_other_refs() -> list[object]:
return [sys._getframe(1)]
anyio-4.11.0/tests/streams/ 0000775 0000000 0000000 00000000000 15064462627 0015550 5 ustar 00root root 0000000 0000000 anyio-4.11.0/tests/streams/__init__.py 0000664 0000000 0000000 00000000000 15064462627 0017647 0 ustar 00root root 0000000 0000000 anyio-4.11.0/tests/streams/test_buffered.py 0000664 0000000 0000000 00000010206 15064462627 0020742 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import pytest
from anyio import (
ClosedResourceError,
EndOfStream,
IncompleteRead,
create_memory_object_stream,
)
from anyio.abc import ObjectStream, ObjectStreamConnectable
from anyio.streams.buffered import (
BufferedByteReceiveStream,
BufferedByteStream,
BufferedConnectable,
)
from anyio.streams.stapled import StapledObjectStream
async def test_receive_exactly() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](2)
buffered_stream = BufferedByteReceiveStream(receive_stream)
await send_stream.send(b"abcd")
await send_stream.send(b"efgh")
result = await buffered_stream.receive_exactly(8)
assert result == b"abcdefgh"
assert isinstance(result, bytes)
send_stream.close()
receive_stream.close()
async def test_receive_exactly_incomplete() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
buffered_stream = BufferedByteReceiveStream(receive_stream)
await send_stream.send(b"abcd")
await send_stream.aclose()
with pytest.raises(IncompleteRead):
await buffered_stream.receive_exactly(8)
send_stream.close()
receive_stream.close()
async def test_receive_until() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](2)
buffered_stream = BufferedByteReceiveStream(receive_stream)
await send_stream.send(b"abcd")
await send_stream.send(b"efgh")
result = await buffered_stream.receive_until(b"de", 10)
assert result == b"abc"
assert isinstance(result, bytes)
result = await buffered_stream.receive_until(b"h", 10)
assert result == b"fg"
assert isinstance(result, bytes)
send_stream.close()
receive_stream.close()
async def test_receive_until_incomplete() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
buffered_stream = BufferedByteReceiveStream(receive_stream)
await send_stream.send(b"abcd")
await send_stream.aclose()
with pytest.raises(IncompleteRead):
assert await buffered_stream.receive_until(b"de", 10)
assert buffered_stream.buffer == b"abcd"
send_stream.close()
receive_stream.close()
async def test_buffered_stream() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
buffered_stream = BufferedByteStream(
StapledObjectStream(send_stream, receive_stream)
)
await send_stream.send(b"abcd")
assert await buffered_stream.receive_exactly(2) == b"ab"
assert await buffered_stream.receive_exactly(2) == b"cd"
# send_eof() should close only the sending end
await buffered_stream.send_eof()
pytest.raises(ClosedResourceError, send_stream.send_nowait, b"abc")
pytest.raises(EndOfStream, receive_stream.receive_nowait)
# aclose() closes the receive stream too
await buffered_stream.aclose()
pytest.raises(ClosedResourceError, receive_stream.receive_nowait)
async def test_buffered_connectable() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
memory_stream = StapledObjectStream(send_stream, receive_stream)
class MemoryObjectConnectable(ObjectStreamConnectable[bytes]):
async def connect(self) -> ObjectStream[bytes]:
return memory_stream
connectable = BufferedConnectable(MemoryObjectConnectable())
async with await connectable.connect() as stream:
assert isinstance(stream, BufferedByteStream)
await stream.send(b"abcd")
assert await stream.receive_exactly(2) == b"ab"
assert await stream.receive_exactly(2) == b"cd"
async def test_feed_data() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
buffered_stream = BufferedByteStream(
StapledObjectStream(send_stream, receive_stream)
)
send_stream.send_nowait(b"abcd")
# The stream has not received the sent data yet, so b"xxx" should come out of the
# buffer first, despite this order of data input
buffered_stream.feed_data(b"xxx")
buffered_stream.feed_data(b"foo")
assert await buffered_stream.receive_exactly(10) == b"xxxfooabcd"
await buffered_stream.aclose()
anyio-4.11.0/tests/streams/test_file.py 0000664 0000000 0000000 00000007465 15064462627 0020114 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from pathlib import Path
import pytest
from _pytest.fixtures import SubRequest
from _pytest.tmpdir import TempPathFactory
from anyio import ClosedResourceError, EndOfStream
from anyio.abc import ByteReceiveStream
from anyio.streams.file import FileReadStream, FileStreamAttribute, FileWriteStream
class TestFileReadStream:
@pytest.fixture(scope="class")
def file_path(self, tmp_path_factory: TempPathFactory) -> Path:
path = tmp_path_factory.mktemp("filestream") / "data.txt"
path.write_text("Hello")
return path
@pytest.fixture(params=[False, True], ids=["str", "path"])
def file_path_or_str(self, request: SubRequest, file_path: Path) -> Path | str:
return file_path if request.param else str(file_path)
async def _run_filestream_test(self, stream: ByteReceiveStream) -> None:
assert await stream.receive(3) == b"Hel"
assert await stream.receive(3) == b"lo"
with pytest.raises(EndOfStream):
await stream.receive(1)
async def test_read_file_as_path(self, file_path_or_str: Path | str) -> None:
async with await FileReadStream.from_path(file_path_or_str) as stream:
await self._run_filestream_test(stream)
async def test_read_file(self, file_path: Path) -> None:
with file_path.open("rb") as file:
async with FileReadStream(file) as stream:
await self._run_filestream_test(stream)
async def test_read_after_close(self, file_path: Path) -> None:
async with await FileReadStream.from_path(file_path) as stream:
pass
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_seek(self, file_path: Path) -> None:
with file_path.open("rb") as file:
async with FileReadStream(file) as stream:
await stream.seek(2)
assert await stream.tell() == 2
data = await stream.receive()
assert data == b"llo"
assert await stream.tell() == 5
async def test_extra_attributes(self, file_path: Path) -> None:
async with await FileReadStream.from_path(file_path) as stream:
path = stream.extra(FileStreamAttribute.path)
assert path == file_path
fileno = stream.extra(FileStreamAttribute.fileno)
assert fileno > 2
file = stream.extra(FileStreamAttribute.file)
assert file.fileno() == fileno
class TestFileWriteStream:
@pytest.fixture
def file_path(self, tmp_path: Path) -> Path:
return tmp_path / "written_data.txt"
async def test_write_file(self, file_path: Path) -> None:
async with await FileWriteStream.from_path(file_path) as stream:
await stream.send(b"Hel")
await stream.send(b"lo")
assert file_path.read_text() == "Hello"
async def test_append_file(self, file_path: Path) -> None:
file_path.write_text("Hello")
async with await FileWriteStream.from_path(file_path, True) as stream:
await stream.send(b", World!")
assert file_path.read_text() == "Hello, World!"
async def test_write_after_close(self, file_path: Path) -> None:
async with await FileWriteStream.from_path(file_path, True) as stream:
pass
with pytest.raises(ClosedResourceError):
await stream.send(b"foo")
async def test_extra_attributes(self, file_path: Path) -> None:
async with await FileWriteStream.from_path(file_path) as stream:
path = stream.extra(FileStreamAttribute.path)
assert path == file_path
fileno = stream.extra(FileStreamAttribute.fileno)
assert fileno > 2
file = stream.extra(FileStreamAttribute.file)
assert file.fileno() == fileno
anyio-4.11.0/tests/streams/test_memory.py 0000664 0000000 0000000 00000037061 15064462627 0020500 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import gc
import sys
from typing import NoReturn
import pytest
from anyio import (
BrokenResourceError,
CancelScope,
ClosedResourceError,
EndOfStream,
WouldBlock,
create_memory_object_stream,
create_task_group,
fail_after,
wait_all_tasks_blocked,
)
from anyio.abc import ObjectReceiveStream, ObjectSendStream, TaskStatus
from anyio.streams.memory import (
MemoryObjectItemReceiver,
MemoryObjectReceiveStream,
MemoryObjectSendStream,
)
from ..conftest import asyncio_params
if sys.version_info < (3, 11):
from exceptiongroup import ExceptionGroup
def test_invalid_max_buffer() -> None:
pytest.raises(ValueError, create_memory_object_stream, 1.0).match(
"max_buffer_size must be either an integer or math.inf"
)
def test_negative_max_buffer() -> None:
pytest.raises(ValueError, create_memory_object_stream, -1).match(
"max_buffer_size cannot be negative"
)
async def test_receive_then_send() -> None:
async def receiver() -> None:
received_objects.append(await receive.receive())
received_objects.append(await receive.receive())
send, receive = create_memory_object_stream[str](0)
received_objects: list[str] = []
async with create_task_group() as tg:
tg.start_soon(receiver)
await wait_all_tasks_blocked()
await send.send("hello")
await send.send("anyio")
assert received_objects == ["hello", "anyio"]
send.close()
receive.close()
async def test_receive_then_send_nowait() -> None:
async def receiver() -> None:
received_objects.append(await receive.receive())
send, receive = create_memory_object_stream[str](0)
received_objects: list[str] = []
async with create_task_group() as tg:
tg.start_soon(receiver)
tg.start_soon(receiver)
await wait_all_tasks_blocked()
send.send_nowait("hello")
send.send_nowait("anyio")
assert sorted(received_objects, reverse=True) == ["hello", "anyio"]
send.close()
receive.close()
async def test_send_then_receive_nowait() -> None:
send, receive = create_memory_object_stream[str](0)
async with create_task_group() as tg:
tg.start_soon(send.send, "hello")
await wait_all_tasks_blocked()
assert receive.receive_nowait() == "hello"
send.close()
receive.close()
async def test_send_is_unblocked_after_receive_nowait() -> None:
send, receive = create_memory_object_stream[str](1)
send.send_nowait("hello")
with fail_after(1):
async with create_task_group() as tg:
tg.start_soon(send.send, "anyio")
await wait_all_tasks_blocked()
assert receive.receive_nowait() == "hello"
assert receive.receive_nowait() == "anyio"
send.close()
receive.close()
async def test_send_nowait_then_receive_nowait() -> None:
send, receive = create_memory_object_stream[str](2)
send.send_nowait("hello")
send.send_nowait("anyio")
assert receive.receive_nowait() == "hello"
assert receive.receive_nowait() == "anyio"
send.close()
receive.close()
async def test_iterate() -> None:
async def receiver() -> None:
received_objects.extend([item async for item in receive])
send, receive = create_memory_object_stream[str]()
received_objects: list[str] = []
async with create_task_group() as tg:
tg.start_soon(receiver)
await send.send("hello")
await send.send("anyio")
await send.aclose()
assert received_objects == ["hello", "anyio"]
send.close()
receive.close()
async def test_receive_send_closed_send_stream() -> None:
send, receive = create_memory_object_stream[None]()
await send.aclose()
with pytest.raises(EndOfStream):
receive.receive_nowait()
with pytest.raises(ClosedResourceError):
await send.send(None)
receive.close()
async def test_receive_send_closed_receive_stream() -> None:
send, receive = create_memory_object_stream[None]()
await receive.aclose()
with pytest.raises(ClosedResourceError):
receive.receive_nowait()
with pytest.raises(BrokenResourceError):
await send.send(None)
send.close()
async def test_cancel_receive() -> None:
send, receive = create_memory_object_stream[str]()
async with create_task_group() as tg:
tg.start_soon(receive.receive)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
with pytest.raises(WouldBlock):
send.send_nowait("hello")
send.close()
receive.close()
async def test_cancel_send() -> None:
send, receive = create_memory_object_stream[str]()
async with create_task_group() as tg:
tg.start_soon(send.send, "hello")
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
with pytest.raises(WouldBlock):
receive.receive_nowait()
send.close()
receive.close()
async def test_clone() -> None:
send1, receive1 = create_memory_object_stream[str](1)
send2 = send1.clone()
receive2 = receive1.clone()
await send1.aclose()
await receive1.aclose()
send2.send_nowait("hello")
assert receive2.receive_nowait() == "hello"
send1.close()
receive1.close()
send2.close()
receive2.close()
async def test_clone_closed() -> None:
send, receive = create_memory_object_stream[NoReturn](1)
await send.aclose()
await receive.aclose()
pytest.raises(ClosedResourceError, send.clone)
pytest.raises(ClosedResourceError, receive.clone)
async def test_close_send_while_receiving() -> None:
send, receive = create_memory_object_stream[NoReturn](1)
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(receive.receive)
await wait_all_tasks_blocked()
await send.aclose()
assert len(exc.value.exceptions) == 1
assert isinstance(exc.value.exceptions[0], EndOfStream)
send.close()
receive.close()
async def test_close_receive_while_sending() -> None:
# We send None here as a regression test for #731
send, receive = create_memory_object_stream[None](0)
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(send.send, None)
await wait_all_tasks_blocked()
await receive.aclose()
assert len(exc.value.exceptions) == 1
assert isinstance(exc.value.exceptions[0], BrokenResourceError)
send.close()
receive.close()
async def test_receive_after_send_closed() -> None:
send, receive = create_memory_object_stream[str](1)
await send.send("hello")
await send.aclose()
assert await receive.receive() == "hello"
send.close()
receive.close()
async def test_receive_when_cancelled() -> None:
"""
Test that calling receive() in a cancelled scope prevents it from going through with
the operation.
"""
send, receive = create_memory_object_stream[str]()
async with create_task_group() as tg:
tg.start_soon(send.send, "hello")
await wait_all_tasks_blocked()
tg.start_soon(send.send, "world")
await wait_all_tasks_blocked()
with CancelScope() as scope:
scope.cancel()
await receive.receive()
assert await receive.receive() == "hello"
assert await receive.receive() == "world"
send.close()
receive.close()
async def test_send_when_cancelled() -> None:
"""
Test that calling send() in a cancelled scope prevents it from going through with
the operation.
"""
async def receiver() -> None:
received.append(await receive.receive())
received: list[str] = []
send, receive = create_memory_object_stream[str]()
async with create_task_group() as tg:
tg.start_soon(receiver)
with CancelScope() as scope:
scope.cancel()
await send.send("hello")
await send.send("world")
assert received == ["world"]
send.close()
receive.close()
async def test_cancel_during_receive() -> None:
"""
Test that cancelling a pending receive() operation does not cause an item in the
stream to be lost.
"""
async def scoped_receiver(task_status: TaskStatus[CancelScope]) -> None:
with CancelScope() as cancel_scope:
task_status.started(cancel_scope)
received.append(await receive.receive())
assert cancel_scope.cancel_called
received: list[str] = []
send, receive = create_memory_object_stream[str]()
with send, receive:
async with create_task_group() as tg:
receiver_scope = await tg.start(scoped_receiver)
await wait_all_tasks_blocked()
send.send_nowait("hello")
receiver_scope.cancel()
assert received == ["hello"]
async def test_cancel_during_receive_buffered() -> None:
"""
Test that sending an item to a memory object stream when the receiver that is next
in line has been cancelled will not result in the item being lost.
"""
async def scoped_receiver(
receive: MemoryObjectReceiveStream[str], task_status: TaskStatus[CancelScope]
) -> None:
with CancelScope() as cancel_scope:
task_status.started(cancel_scope)
await receive.receive()
send, receive = create_memory_object_stream[str](1)
with send, receive:
async with create_task_group() as tg:
cancel_scope = await tg.start(scoped_receiver, receive)
await wait_all_tasks_blocked()
cancel_scope.cancel()
send.send_nowait("item")
# Since the item was not sent to the cancelled task, it should be available here
assert receive.receive_nowait() == "item"
async def test_close_receive_after_send() -> None:
async def send() -> None:
async with send_stream:
await send_stream.send("test")
async def receive() -> None:
async with receive_stream:
assert await receive_stream.receive() == "test"
send_stream, receive_stream = create_memory_object_stream[str]()
async with create_task_group() as tg:
tg.start_soon(send)
tg.start_soon(receive)
send_stream.close()
receive_stream.close()
async def test_statistics() -> None:
send_stream, receive_stream = create_memory_object_stream[None](1)
streams: list[MemoryObjectReceiveStream[None] | MemoryObjectSendStream[None]] = [
send_stream,
receive_stream,
]
for stream in streams:
statistics = stream.statistics()
assert statistics.max_buffer_size == 1
assert statistics.current_buffer_used == 0
assert statistics.open_send_streams == 1
assert statistics.open_receive_streams == 1
assert statistics.tasks_waiting_send == 0
assert statistics.tasks_waiting_receive == 0
for stream in streams:
async with create_task_group() as tg:
# Test tasks_waiting_send
send_stream.send_nowait(None)
assert stream.statistics().current_buffer_used == 1
tg.start_soon(send_stream.send, None)
await wait_all_tasks_blocked()
assert stream.statistics().current_buffer_used == 1
assert stream.statistics().tasks_waiting_send == 1
receive_stream.receive_nowait()
assert stream.statistics().current_buffer_used == 1
assert stream.statistics().tasks_waiting_send == 0
receive_stream.receive_nowait()
assert stream.statistics().current_buffer_used == 0
# Test tasks_waiting_receive
tg.start_soon(receive_stream.receive)
await wait_all_tasks_blocked()
assert stream.statistics().tasks_waiting_receive == 1
send_stream.send_nowait(None)
assert stream.statistics().tasks_waiting_receive == 0
async with create_task_group() as tg:
# Test tasks_waiting_send
send_stream.send_nowait(None)
assert stream.statistics().tasks_waiting_send == 0
for _ in range(3):
tg.start_soon(send_stream.send, None)
await wait_all_tasks_blocked()
assert stream.statistics().tasks_waiting_send == 3
for i in range(2, -1, -1):
receive_stream.receive_nowait()
assert stream.statistics().tasks_waiting_send == i
receive_stream.receive_nowait()
assert stream.statistics().current_buffer_used == 0
assert stream.statistics().tasks_waiting_send == 0
assert stream.statistics().tasks_waiting_receive == 0
send_stream.close()
receive_stream.close()
async def test_sync_close() -> None:
send_stream, receive_stream = create_memory_object_stream[None](1)
with send_stream, receive_stream:
pass
with pytest.raises(ClosedResourceError):
send_stream.send_nowait(None)
with pytest.raises(ClosedResourceError):
receive_stream.receive_nowait()
async def test_type_variance() -> None:
"""
This test does not do anything at run time, but since the test suite is also checked
with a static type checker, it ensures that the memory object stream
co/contravariance works as intended. If it doesn't, one or both of the following
reassignments will trip the type checker.
"""
send, receive = create_memory_object_stream[float]()
receive1: MemoryObjectReceiveStream[complex] = receive # noqa: F841
receive2: ObjectReceiveStream[complex] = receive # noqa: F841
send1: MemoryObjectSendStream[int] = send # noqa: F841
send2: ObjectSendStream[int] = send # noqa: F841
send.close()
receive.close()
async def test_deprecated_item_type_parameter() -> None:
with pytest.warns(DeprecationWarning, match="item_type argument has been "):
send, receive = create_memory_object_stream(item_type=int)
send.close()
receive.close()
async def test_not_closed_warning() -> None:
send, receive = create_memory_object_stream[int]()
with pytest.warns(
ResourceWarning, match="Unclosed "
):
del send
gc.collect()
with pytest.warns(
ResourceWarning, match="Unclosed "
):
del receive
gc.collect()
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_send_to_natively_cancelled_receiver() -> None:
"""
Test that if a task waiting on receive.receive() is cancelled and then another
task sends an item, said item is not delivered to the task with a pending
cancellation, but rather to the next one in line.
"""
from asyncio import CancelledError, create_task
send, receive = create_memory_object_stream[str](1)
with send, receive:
receive_task = create_task(receive.receive())
await wait_all_tasks_blocked() # ensure that the task is waiting to receive
receive_task.cancel()
send.send_nowait("hello")
with pytest.raises(CancelledError):
await receive_task
assert receive.receive_nowait() == "hello"
async def test_memory_object_item_receiver_repr() -> None:
"""
Test the repr of `MemoryObjectItemReceiver`.
Since when `item` is not set, the default dataclass repr raises an AttributeError.
"""
receiver = MemoryObjectItemReceiver[str]()
assert str(receiver) is not None
receiver_repr = repr(receiver)
assert "item=None" in receiver_repr
assert str(receiver) is not None
receiver.item = "test_item"
receiver_repr = repr(receiver)
assert "item='test_item'" in receiver_repr
anyio-4.11.0/tests/streams/test_stapled.py 0000664 0000000 0000000 00000012745 15064462627 0020626 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from collections import deque
from collections.abc import Iterable
from dataclasses import InitVar, dataclass, field
from typing import TypeVar
import pytest
from anyio import ClosedResourceError, EndOfStream
from anyio.abc import (
ByteReceiveStream,
ByteSendStream,
ObjectReceiveStream,
ObjectSendStream,
)
from anyio.streams.stapled import StapledByteStream, StapledObjectStream
@dataclass
class DummyByteReceiveStream(ByteReceiveStream):
data: InitVar[bytes]
buffer: bytearray = field(init=False)
_closed: bool = field(init=False, default=False)
def __post_init__(self, data: bytes) -> None:
self.buffer = bytearray(data)
async def receive(self, max_bytes: int = 65536) -> bytes:
if self._closed:
raise ClosedResourceError
data = bytes(self.buffer[:max_bytes])
del self.buffer[:max_bytes]
return data
async def aclose(self) -> None:
self._closed = True
@dataclass
class DummyByteSendStream(ByteSendStream):
buffer: bytearray = field(init=False, default_factory=bytearray)
_closed: bool = field(init=False, default=False)
async def send(self, item: bytes) -> None:
if self._closed:
raise ClosedResourceError
self.buffer.extend(item)
async def aclose(self) -> None:
self._closed = True
class TestStapledByteStream:
@pytest.fixture
def send_stream(self) -> DummyByteSendStream:
return DummyByteSendStream()
@pytest.fixture
def receive_stream(self) -> DummyByteReceiveStream:
return DummyByteReceiveStream(b"hello, world")
@pytest.fixture
def stapled(
self, send_stream: DummyByteSendStream, receive_stream: DummyByteReceiveStream
) -> StapledByteStream:
return StapledByteStream(send_stream, receive_stream)
async def test_receive_send(
self, stapled: StapledByteStream, send_stream: DummyByteSendStream
) -> None:
assert await stapled.receive(3) == b"hel"
assert await stapled.receive() == b"lo, world"
assert await stapled.receive() == b""
await stapled.send(b"how are you ")
await stapled.send(b"today?")
assert stapled.send_stream is send_stream
assert bytes(send_stream.buffer) == b"how are you today?"
async def test_send_eof(self, stapled: StapledByteStream) -> None:
await stapled.send_eof()
await stapled.send_eof()
with pytest.raises(ClosedResourceError):
await stapled.send(b"world")
assert await stapled.receive() == b"hello, world"
async def test_aclose(self, stapled: StapledByteStream) -> None:
await stapled.aclose()
with pytest.raises(ClosedResourceError):
await stapled.receive()
with pytest.raises(ClosedResourceError):
await stapled.send(b"")
T_Item = TypeVar("T_Item")
@dataclass
class DummyObjectReceiveStream(ObjectReceiveStream[T_Item]):
data: InitVar[Iterable[T_Item]]
buffer: deque[T_Item] = field(init=False)
_closed: bool = field(init=False, default=False)
def __post_init__(self, data: Iterable[T_Item]) -> None:
self.buffer = deque(data)
async def receive(self) -> T_Item:
if self._closed:
raise ClosedResourceError
if not self.buffer:
raise EndOfStream
return self.buffer.popleft()
async def aclose(self) -> None:
self._closed = True
@dataclass
class DummyObjectSendStream(ObjectSendStream[T_Item]):
buffer: list[T_Item] = field(init=False, default_factory=list)
_closed: bool = field(init=False, default=False)
async def send(self, item: T_Item) -> None:
if self._closed:
raise ClosedResourceError
self.buffer.append(item)
async def aclose(self) -> None:
self._closed = True
class TestStapledObjectStream:
@pytest.fixture
def receive_stream(self) -> DummyObjectReceiveStream[str]:
return DummyObjectReceiveStream(["hello", "world"])
@pytest.fixture
def send_stream(self) -> DummyObjectSendStream[str]:
return DummyObjectSendStream[str]()
@pytest.fixture
def stapled(
self,
receive_stream: DummyObjectReceiveStream[str],
send_stream: DummyObjectSendStream[str],
) -> StapledObjectStream[str]:
return StapledObjectStream(send_stream, receive_stream)
async def test_receive_send(
self, stapled: StapledObjectStream[str], send_stream: DummyObjectSendStream[str]
) -> None:
assert await stapled.receive() == "hello"
assert await stapled.receive() == "world"
with pytest.raises(EndOfStream):
await stapled.receive()
await stapled.send("how are you ")
await stapled.send("today?")
assert stapled.send_stream is send_stream
assert send_stream.buffer == ["how are you ", "today?"]
async def test_send_eof(self, stapled: StapledObjectStream[str]) -> None:
await stapled.send_eof()
await stapled.send_eof()
with pytest.raises(ClosedResourceError):
await stapled.send("world")
assert await stapled.receive() == "hello"
assert await stapled.receive() == "world"
async def test_aclose(self, stapled: StapledObjectStream[str]) -> None:
await stapled.aclose()
with pytest.raises(ClosedResourceError):
await stapled.receive()
with pytest.raises(ClosedResourceError):
await stapled.send(b"") # type: ignore[arg-type]
anyio-4.11.0/tests/streams/test_text.py 0000664 0000000 0000000 00000006147 15064462627 0020155 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import platform
import sys
import pytest
from anyio import create_memory_object_stream
from anyio.abc import ObjectStream, ObjectStreamConnectable
from anyio.streams.stapled import StapledObjectStream
from anyio.streams.text import (
TextConnectable,
TextReceiveStream,
TextSendStream,
TextStream,
)
async def test_receive() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
text_stream = TextReceiveStream(receive_stream)
await send_stream.send(b"\xc3\xa5\xc3\xa4\xc3") # ends with half of the "ö" letter
assert await text_stream.receive() == "åä"
# Send the missing byte for "ö"
await send_stream.send(b"\xb6")
assert await text_stream.receive() == "ö"
send_stream.close()
receive_stream.close()
async def test_send() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
text_stream = TextSendStream(send_stream)
await text_stream.send("åäö")
assert await receive_stream.receive() == b"\xc3\xa5\xc3\xa4\xc3\xb6"
send_stream.close()
receive_stream.close()
@pytest.mark.xfail(
platform.python_implementation() == "PyPy" and sys.pypy_version_info < (7, 3, 2), # type: ignore[attr-defined]
reason="PyPy has a bug in its incremental UTF-8 decoder (#3274)",
)
async def test_receive_encoding_error() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
text_stream = TextReceiveStream(receive_stream, errors="replace")
await send_stream.send(b"\xe5\xe4\xf6") # "åäö" in latin-1
assert await text_stream.receive() == "���"
send_stream.close()
receive_stream.close()
async def test_send_encoding_error() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
text_stream = TextSendStream(send_stream, encoding="iso-8859-1", errors="replace")
await text_stream.send("€")
assert await receive_stream.receive() == b"?"
send_stream.close()
receive_stream.close()
async def test_bidirectional_stream() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
stapled_stream = StapledObjectStream(send_stream, receive_stream)
text_stream = TextStream(stapled_stream)
await text_stream.send("åäö")
assert await receive_stream.receive() == b"\xc3\xa5\xc3\xa4\xc3\xb6"
await send_stream.send(b"\xc3\xa6\xc3\xb8")
assert await text_stream.receive() == "æø"
assert text_stream.extra_attributes == {}
send_stream.close()
receive_stream.close()
async def test_text_connectable() -> None:
send_stream, receive_stream = create_memory_object_stream[bytes](1)
memory_stream = StapledObjectStream(send_stream, receive_stream)
class MemoryConnectable(ObjectStreamConnectable[bytes]):
async def connect(self) -> ObjectStream[bytes]:
return memory_stream
connectable = TextConnectable(MemoryConnectable())
async with await connectable.connect() as stream:
assert isinstance(stream, TextStream)
await stream.send("hello")
assert await stream.receive() == "hello"
anyio-4.11.0/tests/streams/test_tls.py 0000664 0000000 0000000 00000051527 15064462627 0017775 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import socket
import ssl
from contextlib import AbstractContextManager, ExitStack
from threading import Thread
from typing import NoReturn
from unittest import mock
import pytest
from trustme import CA
from anyio import (
BrokenResourceError,
EndOfStream,
Event,
connect_tcp,
create_memory_object_stream,
create_task_group,
create_tcp_listener,
to_thread,
)
from anyio.abc import (
AnyByteStream,
ObjectStream,
ObjectStreamConnectable,
SocketAttribute,
SocketStream,
)
from anyio.streams.stapled import StapledObjectStream
from anyio.streams.tls import TLSAttribute, TLSConnectable, TLSListener, TLSStream
class TestTLSStream:
async def test_send_receive(
self, server_context: ssl.SSLContext, client_context: ssl.SSLContext
) -> None:
def serve_sync() -> None:
conn, addr = server_sock.accept()
conn.settimeout(1)
data = conn.recv(10)
conn.send(data[::-1])
conn.close()
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=False
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(
stream, hostname="localhost", ssl_context=client_context
)
await wrapper.send(b"hello")
response = await wrapper.receive()
server_thread.join()
server_sock.close()
assert response == b"olleh"
async def test_extra_attributes(
self, server_context: ssl.SSLContext, client_context: ssl.SSLContext
) -> None:
def serve_sync() -> None:
conn, addr = server_sock.accept()
with conn:
conn.settimeout(1)
conn.recv(1)
server_context.set_alpn_protocols(["h2"])
client_context.set_alpn_protocols(["h2"])
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=True
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(
stream,
hostname="localhost",
ssl_context=client_context,
standard_compatible=False,
)
async with wrapper:
for name, attribute in SocketAttribute.__dict__.items():
if not name.startswith("_"):
assert wrapper.extra(attribute) == stream.extra(attribute)
assert wrapper.extra(TLSAttribute.alpn_protocol) == "h2"
assert isinstance(
wrapper.extra(TLSAttribute.channel_binding_tls_unique), bytes
)
assert isinstance(wrapper.extra(TLSAttribute.cipher), tuple)
assert isinstance(wrapper.extra(TLSAttribute.peer_certificate), dict)
assert isinstance(
wrapper.extra(TLSAttribute.peer_certificate_binary), bytes
)
assert wrapper.extra(TLSAttribute.server_side) is False
assert wrapper.extra(TLSAttribute.shared_ciphers) is None
assert isinstance(wrapper.extra(TLSAttribute.ssl_object), ssl.SSLObject)
assert wrapper.extra(TLSAttribute.standard_compatible) is False
assert wrapper.extra(TLSAttribute.tls_version).startswith("TLSv")
await wrapper.send(b"\x00")
server_thread.join()
server_sock.close()
async def test_unwrap(
self, server_context: ssl.SSLContext, client_context: ssl.SSLContext
) -> None:
def serve_sync() -> None:
conn, addr = server_sock.accept()
conn.settimeout(1)
conn.send(b"encrypted")
unencrypted = conn.unwrap()
unencrypted.send(b"unencrypted")
unencrypted.close()
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=False
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(
stream, hostname="localhost", ssl_context=client_context
)
msg1 = await wrapper.receive()
unwrapped_stream, msg2 = await wrapper.unwrap()
if msg2 != b"unencrypted":
msg2 += await unwrapped_stream.receive()
server_thread.join()
server_sock.close()
assert msg1 == b"encrypted"
assert msg2 == b"unencrypted"
@pytest.mark.skipif(not ssl.HAS_ALPN, reason="ALPN support not available")
async def test_alpn_negotiation(
self, server_context: ssl.SSLContext, client_context: ssl.SSLContext
) -> None:
def serve_sync() -> None:
conn, addr = server_sock.accept()
conn.settimeout(1)
selected_alpn_protocol = conn.selected_alpn_protocol()
assert selected_alpn_protocol is not None
conn.send(selected_alpn_protocol.encode())
conn.close()
server_context.set_alpn_protocols(["dummy1", "dummy2"])
client_context.set_alpn_protocols(["dummy2", "dummy3"])
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=False
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(
stream, hostname="localhost", ssl_context=client_context
)
assert wrapper.extra(TLSAttribute.alpn_protocol) == "dummy2"
server_alpn_protocol = await wrapper.receive()
server_thread.join()
server_sock.close()
assert server_alpn_protocol == b"dummy2"
@pytest.mark.parametrize(
"server_compatible, client_compatible",
[
pytest.param(True, True, id="both_standard"),
pytest.param(True, False, id="server_standard"),
pytest.param(False, True, id="client_standard"),
pytest.param(False, False, id="neither_standard"),
],
)
async def test_ragged_eofs(
self,
server_context: ssl.SSLContext,
client_context: ssl.SSLContext,
server_compatible: bool,
client_compatible: bool,
) -> None:
server_exc = None
def serve_sync() -> None:
nonlocal server_exc
conn, addr = server_sock.accept()
try:
conn.settimeout(1)
conn.sendall(b"hello")
if server_compatible:
conn.unwrap()
except BaseException as exc:
server_exc = exc
finally:
conn.close()
client_cm: AbstractContextManager = ExitStack()
if client_compatible and not server_compatible:
client_cm = pytest.raises(BrokenResourceError)
server_sock = server_context.wrap_socket(
socket.socket(),
server_side=True,
suppress_ragged_eofs=not server_compatible,
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync, daemon=True)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(
stream,
hostname="localhost",
ssl_context=client_context,
standard_compatible=client_compatible,
)
with client_cm:
assert await wrapper.receive() == b"hello"
await wrapper.aclose()
server_thread.join()
server_sock.close()
if not client_compatible and server_compatible:
assert isinstance(server_exc, OSError)
assert not isinstance(server_exc, socket.timeout)
else:
assert server_exc is None
async def test_ragged_eof_on_receive(
self, server_context: ssl.SSLContext, client_context: ssl.SSLContext
) -> None:
server_exc = None
def serve_sync() -> None:
nonlocal server_exc
conn, addr = server_sock.accept()
try:
conn.settimeout(1)
conn.sendall(b"hello")
except BaseException as exc:
server_exc = exc
finally:
conn.close()
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=True
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync, daemon=True)
server_thread.start()
try:
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(
stream,
hostname="localhost",
ssl_context=client_context,
standard_compatible=False,
)
assert await wrapper.receive() == b"hello"
with pytest.raises(EndOfStream):
await wrapper.receive()
finally:
server_thread.join()
server_sock.close()
assert server_exc is None
async def test_receive_send_after_eof(
self, server_context: ssl.SSLContext, client_context: ssl.SSLContext
) -> None:
def serve_sync() -> None:
conn, addr = server_sock.accept()
conn.sendall(b"hello")
conn.unwrap()
conn.close()
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=False
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync, daemon=True)
server_thread.start()
stream = await connect_tcp(*server_sock.getsockname())
async with await TLSStream.wrap(
stream, hostname="localhost", ssl_context=client_context
) as wrapper:
assert await wrapper.receive() == b"hello"
with pytest.raises(EndOfStream):
await wrapper.receive()
server_thread.join()
server_sock.close()
@pytest.mark.parametrize(
"force_tlsv12",
[
pytest.param(
False,
marks=[
pytest.mark.skipif(
not getattr(ssl, "HAS_TLSv1_3", False),
reason="No TLS 1.3 support",
)
],
),
pytest.param(True),
],
ids=["tlsv13", "tlsv12"],
)
async def test_send_eof_not_implemented(
self, server_context: ssl.SSLContext, ca: CA, force_tlsv12: bool
) -> None:
def serve_sync() -> None:
conn, addr = server_sock.accept()
conn.sendall(b"hello")
conn.unwrap()
conn.close()
client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ca.configure_trust(client_context)
if force_tlsv12:
expected_pattern = r"send_eof\(\) requires at least TLSv1.3"
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
else:
expected_pattern = (
r"send_eof\(\) has not yet been implemented for TLS streams"
)
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=False
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync, daemon=True)
server_thread.start()
stream = await connect_tcp(*server_sock.getsockname())
async with await TLSStream.wrap(
stream, hostname="localhost", ssl_context=client_context
) as wrapper:
assert await wrapper.receive() == b"hello"
with pytest.raises(NotImplementedError) as exc:
await wrapper.send_eof()
exc.match(expected_pattern)
server_thread.join()
server_sock.close()
@pytest.mark.skipif(
not hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"),
reason="The ssl module does not have the OP_IGNORE_UNEXPECTED_EOF attribute",
)
async def test_default_context_ignore_unexpected_eof_flag_off(self) -> None:
send1, receive1 = create_memory_object_stream[bytes]()
client_stream = StapledObjectStream(send1, receive1)
with mock.patch.object(TLSStream, "_call_sslobject_method"):
tls_stream = await TLSStream.wrap(client_stream)
ssl_context = tls_stream.extra(TLSAttribute.ssl_object).context
assert not ssl_context.options & ssl.OP_IGNORE_UNEXPECTED_EOF
send1.close()
receive1.close()
async def test_truststore_ssl(
self, request: pytest.FixtureRequest, server_context: ssl.SSLContext
) -> None:
# This test is only expected to fail on Windows without the associated patch
def serve_sync() -> None:
with server_sock, pytest.raises(ssl.SSLEOFError):
server_sock.accept()
# We deliberately skip making the client context trust the server context
truststore = pytest.importorskip("truststore")
client_context = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server_sock = server_context.wrap_socket(
socket.socket(), server_side=True, suppress_ragged_eofs=True
)
server_sock.settimeout(1)
server_sock.bind(("127.0.0.1", 0))
server_sock.listen()
server_thread = Thread(target=serve_sync, daemon=True)
server_thread.start()
request.addfinalizer(server_thread.join)
async with await connect_tcp(*server_sock.getsockname()) as stream:
with pytest.raises(ssl.SSLCertVerificationError):
await TLSStream.wrap(
stream, hostname="localhost", ssl_context=client_context
)
class TestTLSListener:
async def test_handshake_fail(
self, server_context: ssl.SSLContext, caplog: pytest.LogCaptureFixture
) -> None:
def handler(stream: object) -> NoReturn:
pytest.fail("This function should never be called in this scenario")
exception = None
class CustomTLSListener(TLSListener):
@staticmethod
async def handle_handshake_error(
exc: BaseException, stream: AnyByteStream
) -> None:
nonlocal exception
await TLSListener.handle_handshake_error(exc, stream)
# Regression test for #608
assert len(caplog.records) == 1
logged_exc_info = caplog.records[0].exc_info
logged_exc = logged_exc_info[1] if logged_exc_info is not None else None
assert logged_exc is exc
assert isinstance(stream, SocketStream)
exception = exc
event.set()
event = Event()
listener = await create_tcp_listener(local_host="127.0.0.1")
tls_listener = CustomTLSListener(listener, server_context)
async with tls_listener, create_task_group() as tg:
tg.start_soon(tls_listener.serve, handler)
sock = socket.socket()
sock.connect(listener.extra(SocketAttribute.local_address))
sock.close()
await event.wait()
tg.cancel_scope.cancel()
assert isinstance(exception, BrokenResourceError)
async def test_extra_attributes(
self, client_context: ssl.SSLContext, server_context: ssl.SSLContext, ca: CA
) -> None:
def connect_sync(addr: tuple[str, int]) -> None:
with socket.create_connection(addr) as plain_sock:
plain_sock.settimeout(2)
with client_context.wrap_socket(
plain_sock,
server_side=False,
server_hostname="localhost",
suppress_ragged_eofs=False,
) as conn:
conn.recv(1)
conn.unwrap()
class CustomTLSListener(TLSListener):
@staticmethod
async def handle_handshake_error(
exc: BaseException, stream: AnyByteStream
) -> None:
await TLSListener.handle_handshake_error(exc, stream)
pytest.fail("TLS handshake failed")
async def handler(stream: TLSStream) -> None:
async with stream:
try:
assert stream.extra(TLSAttribute.alpn_protocol) == "h2"
assert isinstance(
stream.extra(TLSAttribute.channel_binding_tls_unique), bytes
)
assert isinstance(stream.extra(TLSAttribute.cipher), tuple)
assert isinstance(stream.extra(TLSAttribute.peer_certificate), dict)
assert isinstance(
stream.extra(TLSAttribute.peer_certificate_binary), bytes
)
assert stream.extra(TLSAttribute.server_side) is True
shared_ciphers = stream.extra(TLSAttribute.shared_ciphers)
assert isinstance(shared_ciphers, list)
assert len(shared_ciphers) > 1
assert isinstance(
stream.extra(TLSAttribute.ssl_object), ssl.SSLObject
)
assert stream.extra(TLSAttribute.standard_compatible) is True
assert stream.extra(TLSAttribute.tls_version).startswith("TLSv")
finally:
event.set()
await stream.send(b"\x00")
# Issue a client certificate and make the server trust it
client_cert = ca.issue_cert("dummy-client")
client_cert.configure_cert(client_context)
ca.configure_trust(server_context)
server_context.verify_mode = ssl.CERT_REQUIRED
event = Event()
server_context.set_alpn_protocols(["h2"])
client_context.set_alpn_protocols(["h2"])
listener = await create_tcp_listener(local_host="127.0.0.1")
tls_listener = CustomTLSListener(listener, server_context)
async with tls_listener, create_task_group() as tg:
assert tls_listener.extra(TLSAttribute.standard_compatible) is True
tg.start_soon(tls_listener.serve, handler)
client_thread = Thread(
target=connect_sync,
args=[listener.extra(SocketAttribute.local_address)],
)
client_thread.start()
await event.wait()
await to_thread.run_sync(client_thread.join)
tg.cancel_scope.cancel()
async def test_tls_connectable(
client_context: ssl.SSLContext, server_context: ssl.SSLContext
) -> None:
server_send, server_receive = create_memory_object_stream[bytes](1)
client_send, client_receive = create_memory_object_stream[bytes](1)
client_stream = StapledObjectStream(client_send, server_receive)
server_stream = StapledObjectStream(server_send, client_receive)
async def server() -> None:
async with await TLSStream.wrap(
server_stream,
server_side=True,
hostname="localhost",
ssl_context=server_context,
) as server_tls_stream:
message = await server_tls_stream.receive()
await server_tls_stream.send(b"hello, " + message)
class MemoryConnectable(ObjectStreamConnectable[bytes]):
async def connect(self) -> ObjectStream[bytes]:
return client_stream
async with create_task_group() as tg:
tg.start_soon(server)
connectable = TLSConnectable(
MemoryConnectable(), ssl_context=client_context, hostname="localhost"
)
async with await connectable.connect() as client_tls_stream:
await client_tls_stream.send(b"world")
assert await client_tls_stream.receive() == b"hello, world"
anyio-4.11.0/tests/test_contextmanagers.py 0000664 0000000 0000000 00000020457 15064462627 0020715 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from collections.abc import AsyncGenerator, Generator
from contextlib import (
AbstractContextManager,
AsyncExitStack,
ExitStack,
asynccontextmanager,
contextmanager,
)
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
import pytest
from anyio import AsyncContextManagerMixin, ContextManagerMixin
class DummyContextManager(ContextManagerMixin):
def __init__(self, handle_exc: bool = False) -> None:
self.started = False
self.finished = False
self.handle_exc = handle_exc
@contextmanager
def __contextmanager__(self) -> Generator[Self]:
self.started = True
try:
yield self
except RuntimeError:
if not self.handle_exc:
raise
self.finished = True
class DummyAsyncContextManager(AsyncContextManagerMixin):
def __init__(self, handle_exc: bool = False) -> None:
self.started = False
self.finished = False
self.handle_exc = handle_exc
@asynccontextmanager
async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
self.started = True
try:
yield self
except RuntimeError:
if not self.handle_exc:
raise
self.finished = True
class TestContextManagerMixin:
def test_contextmanager(self) -> None:
with DummyContextManager() as cm:
assert cm.started
assert not cm.finished
assert cm.finished
@pytest.mark.parametrize("handle_exc", [True, False])
def test_exception(self, handle_exc: bool) -> None:
with ExitStack() as stack:
if not handle_exc:
stack.enter_context(pytest.raises(RuntimeError, match="^foo$"))
cm = stack.enter_context(DummyContextManager(handle_exc))
raise RuntimeError("foo")
assert cm.started
assert cm.finished == handle_exc
def test_return_bad_type(self) -> None:
class BadContextManager(ContextManagerMixin):
def __contextmanager__(self) -> AbstractContextManager[None]:
return None # type: ignore[return-value]
with pytest.raises(TypeError, match="did not return a context manager"):
with BadContextManager():
pass
def test_return_generator(self) -> None:
class BadContextManager(ContextManagerMixin):
def __contextmanager__(self): # type: ignore[no-untyped-def]
yield self
with pytest.raises(TypeError, match="returned a generator"):
with BadContextManager():
pass
def test_return_self(self) -> None:
class BadContextManager(ContextManagerMixin):
def __contextmanager__(self): # type: ignore[no-untyped-def]
return self
with pytest.raises(TypeError, match="returned self"):
with BadContextManager():
pass
def test_enter_twice(self) -> None:
with DummyContextManager() as cm:
with pytest.raises(
RuntimeError,
match="^this DummyContextManager has already been entered$",
):
with cm:
pass
def test_exit_before_enter(self) -> None:
cm = DummyContextManager()
with pytest.raises(
RuntimeError, match="^this DummyContextManager has not been entered yet$"
):
cm.__exit__(None, None, None)
def test_call_superclass_method(self) -> None:
class InheritedContextManager(DummyContextManager):
def __init__(self, handle_exc: bool = False) -> None:
super().__init__(handle_exc)
self.child_started = False
self.child_finished = False
@contextmanager
def __contextmanager__(self) -> Generator[Self]:
self.child_started = True
with super().__contextmanager__():
assert self.started
try:
yield self
except RuntimeError:
if not self.handle_exc:
raise
assert self.finished
self.child_finished = True
with InheritedContextManager() as cm:
assert cm.started
assert not cm.finished
assert cm.child_started
assert not cm.child_finished
assert cm.finished
assert cm.child_finished
class TestAsyncContextManagerMixin:
async def test_contextmanager(self) -> None:
async with DummyAsyncContextManager() as cm:
assert cm.started
assert not cm.finished
assert cm.finished
@pytest.mark.parametrize("handle_exc", [True, False])
async def test_exception(self, handle_exc: bool) -> None:
async with AsyncExitStack() as stack:
if not handle_exc:
stack.enter_context(pytest.raises(RuntimeError, match="^foo$"))
cm = await stack.enter_async_context(DummyAsyncContextManager(handle_exc))
raise RuntimeError("foo")
assert cm.started
assert cm.finished == handle_exc
async def test_return_bad_type(self) -> None:
class BadContextManager(AsyncContextManagerMixin):
def __asynccontextmanager__(self): # type: ignore[no-untyped-def]
return None
with pytest.raises(TypeError, match="did not return an async context manager"):
async with BadContextManager():
pass
async def test_return_async_generator(self) -> None:
class BadContextManager(AsyncContextManagerMixin):
async def __asynccontextmanager__(self): # type: ignore[no-untyped-def]
yield self
with pytest.raises(TypeError, match="returned an async generator"):
async with BadContextManager():
pass
async def test_return_self(self) -> None:
class BadContextManager(AsyncContextManagerMixin):
def __asynccontextmanager__(self): # type: ignore[no-untyped-def]
return self
with pytest.raises(TypeError, match="returned self"):
async with BadContextManager():
pass
async def test_return_coroutine(self) -> None:
class BadContextManager(AsyncContextManagerMixin):
async def __asynccontextmanager__(self): # type: ignore[no-untyped-def]
return self
with pytest.raises(TypeError, match="returned a coroutine object instead of"):
async with BadContextManager():
pass
async def test_enter_twice(self) -> None:
async with DummyAsyncContextManager() as cm:
with pytest.raises(
RuntimeError,
match="^this DummyAsyncContextManager has already been entered$",
):
async with cm:
pass
async def test_exit_before_enter(self) -> None:
cm = DummyAsyncContextManager()
with pytest.raises(
RuntimeError,
match="^this DummyAsyncContextManager has not been entered yet$",
):
await cm.__aexit__(None, None, None)
async def test_call_superclass_method(self) -> None:
class InheritedAsyncContextManager(DummyAsyncContextManager):
def __init__(self, handle_exc: bool = False) -> None:
super().__init__(handle_exc)
self.child_started = False
self.child_finished = False
@asynccontextmanager
async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
self.child_started = True
async with super().__asynccontextmanager__():
assert self.started
try:
yield self
except RuntimeError:
if not self.handle_exc:
raise
assert self.finished
self.child_finished = True
async with InheritedAsyncContextManager() as cm:
assert cm.started
assert not cm.finished
assert cm.child_started
assert not cm.child_finished
assert cm.finished
assert cm.child_finished
anyio-4.11.0/tests/test_debugging.py 0000664 0000000 0000000 00000011167 15064462627 0017444 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import sys
from collections.abc import AsyncGenerator, Generator
from types import CoroutineType, GeneratorType
from typing import Any, cast
import pytest
import anyio
from anyio import (
Event,
TaskInfo,
create_task_group,
get_current_task,
get_running_tasks,
move_on_after,
wait_all_tasks_blocked,
)
from anyio.abc import TaskStatus
from .conftest import asyncio_params
get_coro = asyncio.Task.get_coro
def test_main_task_name(
anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
task_name = None
async def main() -> None:
nonlocal task_name
task_name = get_current_task().name
anyio.run(main, backend=anyio_backend_name, backend_options=anyio_backend_options)
assert task_name == "tests.test_debugging.test_main_task_name..main"
# Work around sniffio/asyncio bug that leaves behind an unclosed event loop
if anyio_backend_name == "asyncio":
import asyncio
import gc
for loop in [
obj
for obj in gc.get_objects()
if isinstance(obj, asyncio.AbstractEventLoop)
]:
loop.close()
@pytest.mark.parametrize(
"name_input,expected",
[
(None, "tests.test_debugging.test_non_main_task_name..non_main"),
(b"name", "b'name'"),
("name", "name"),
("", ""),
],
)
async def test_non_main_task_name(
name_input: bytes | str | None, expected: str
) -> None:
async def non_main(*, task_status: TaskStatus[str | None]) -> None:
task_status.started(anyio.get_current_task().name)
async with anyio.create_task_group() as tg:
name = await tg.start(non_main, name=name_input)
assert name == expected
async def test_get_running_tasks() -> None:
async def inspect() -> None:
await wait_all_tasks_blocked()
new_tasks = set(get_running_tasks()) - existing_tasks
task_infos[:] = sorted(new_tasks, key=lambda info: info.name or "")
event.set()
event = Event()
task_infos: list[TaskInfo] = []
host_task = get_current_task()
async with create_task_group() as tg:
existing_tasks = set(get_running_tasks())
tg.start_soon(event.wait, name="task1")
tg.start_soon(event.wait, name="task2")
tg.start_soon(inspect)
assert len(task_infos) == 3
expected_names = [
"task1",
"task2",
"tests.test_debugging.test_get_running_tasks..inspect",
]
for task, expected_name in zip(task_infos, expected_names):
assert task.parent_id == host_task.id
assert task.name == expected_name
assert repr(task).endswith(f"TaskInfo(id={task.id}, name={expected_name!r})")
@pytest.mark.skipif(
sys.version_info >= (3, 11),
reason="Generator based coroutines have been removed in Python 3.11",
)
@pytest.mark.filterwarnings(
'ignore:"@coroutine" decorator is deprecated:DeprecationWarning'
)
def test_wait_generator_based_task_blocked(
asyncio_event_loop: asyncio.AbstractEventLoop,
) -> None:
async def native_coro_part() -> None:
await wait_all_tasks_blocked()
gen = cast(GeneratorType, get_coro(gen_task))
assert not gen.gi_running
coro = cast(CoroutineType, gen.gi_yieldfrom)
assert coro.cr_code.co_name == "wait"
event.set()
@asyncio.coroutine # type: ignore[attr-defined]
def generator_part() -> Generator[object, BaseException, None]:
yield from event.wait() # type: ignore[misc]
event = asyncio.Event()
gen_task: asyncio.Task[None] = asyncio_event_loop.create_task(generator_part())
asyncio_event_loop.run_until_complete(native_coro_part())
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_wait_all_tasks_blocked_asend(anyio_backend: str) -> None:
"""Test that wait_all_tasks_blocked() does not crash on an `asend()` object."""
async def agen_func() -> AsyncGenerator[None, None]:
yield
agen = agen_func()
coro = agen.asend(None)
loop = asyncio.get_running_loop()
task = loop.create_task(cast("CoroutineType[Any, Any, Any]", coro))
await wait_all_tasks_blocked()
await task
await agen.aclose()
async def test_wait_all_tasks_blocked_cancelled_task() -> None:
done = False
async def self_cancel(*, task_status: TaskStatus) -> None:
nonlocal done
task_status.started()
with move_on_after(-1):
await Event().wait()
done = True
async with create_task_group() as tg:
await tg.start(self_cancel)
await wait_all_tasks_blocked()
assert done
anyio-4.11.0/tests/test_deprecations.py 0000664 0000000 0000000 00000000432 15064462627 0020162 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import pytest
import anyio
def test_broken_worker_interpreter_deprecation() -> None:
with pytest.warns(DeprecationWarning):
DeprecatedClass = anyio.BrokenWorkerIntepreter
assert DeprecatedClass is anyio.BrokenWorkerInterpreter
anyio-4.11.0/tests/test_eventloop.py 0000664 0000000 0000000 00000005217 15064462627 0017523 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import math
from asyncio import get_running_loop
from collections.abc import Generator
from unittest import mock
from unittest.mock import AsyncMock
import pytest
from pytest import MonkeyPatch
from anyio import run, sleep_forever, sleep_until
fake_current_time = 1620581544.0
@pytest.fixture
def fake_sleep() -> Generator[AsyncMock, None, None]:
with mock.patch(
"anyio._core._eventloop.current_time", return_value=fake_current_time
):
with mock.patch("anyio._core._eventloop.sleep", AsyncMock()) as v:
yield v
async def test_sleep_until(fake_sleep: AsyncMock) -> None:
deadline = fake_current_time + 500.102352
await sleep_until(deadline)
fake_sleep.assert_called_once_with(deadline - fake_current_time)
async def test_sleep_until_in_past(fake_sleep: AsyncMock) -> None:
deadline = fake_current_time - 500.102352
await sleep_until(deadline)
fake_sleep.assert_called_once_with(0)
async def test_sleep_forever(fake_sleep: AsyncMock) -> None:
await sleep_forever()
fake_sleep.assert_called_once_with(math.inf)
def test_run_task() -> None:
"""Test that anyio.run() on asyncio will work with a callable returning a Future."""
async def async_add(x: int, y: int) -> int:
return x + y
result = run(asyncio.create_task, async_add(1, 2), backend="asyncio")
assert result == 3
class TestAsyncioOptions:
def test_debug(self) -> None:
async def main() -> bool:
return get_running_loop().get_debug()
debug = run(main, backend="asyncio", backend_options={"debug": True})
assert debug is True
def test_debug_via_env(self, monkeypatch: MonkeyPatch) -> None:
async def main() -> bool:
return get_running_loop().get_debug()
monkeypatch.setenv("PYTHONASYNCIODEBUG", "1")
debug = run(main, backend="asyncio")
assert debug is True
def test_loop_factory(self) -> None:
async def main() -> type:
return type(get_running_loop())
uvloop = pytest.importorskip("uvloop", reason="uvloop not installed")
loop_class = run(
main,
backend="asyncio",
backend_options={"loop_factory": uvloop.new_event_loop},
)
assert issubclass(loop_class, uvloop.Loop)
def test_use_uvloop(self) -> None:
async def main() -> type:
return type(get_running_loop())
uvloop = pytest.importorskip("uvloop", reason="uvloop not installed")
loop_class = run(main, backend="asyncio", backend_options={"use_uvloop": True})
assert issubclass(loop_class, uvloop.Loop)
anyio-4.11.0/tests/test_fileio.py 0000664 0000000 0000000 00000057274 15064462627 0016771 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import pathlib
import platform
import socket
import stat
import sys
import pytest
from _pytest.tmpdir import TempPathFactory
from anyio import AsyncFile, Path, open_file, wrap_file
class TestAsyncFile:
@pytest.fixture(scope="class")
def testdata(cls) -> bytes:
return b"".join(bytes([i] * 1000) for i in range(10))
@pytest.fixture
def testdatafile(
self, tmp_path_factory: TempPathFactory, testdata: bytes
) -> pathlib.Path:
file = tmp_path_factory.mktemp("file").joinpath("testdata")
file.write_bytes(testdata)
return file
async def test_open_close(self, testdatafile: pathlib.Path) -> None:
f = await open_file(testdatafile)
await f.aclose()
async def test_read(self, testdatafile: pathlib.Path, testdata: bytes) -> None:
async with await open_file(testdatafile, "rb") as f:
data = await f.read()
assert f.closed
assert data == testdata
async def test_readinto(self, testdatafile: pathlib.Path, testdata: bytes) -> None:
buffer = bytearray(100)
async with await open_file(testdatafile, "rb") as f:
assert await f.readinto(buffer) == 100
assert bytes(buffer) == testdata[:100]
async def test_readinto1(self, testdatafile: pathlib.Path, testdata: bytes) -> None:
buffer = bytearray(100)
async with await open_file(testdatafile, "rb") as f:
assert await f.readinto1(buffer) == 100
assert bytes(buffer) == testdata[:100]
async def test_write(self, testdatafile: pathlib.Path, testdata: bytes) -> None:
async with await open_file(testdatafile, "ab") as f:
await f.write(b"f" * 1000)
assert testdatafile.stat().st_size == len(testdata) + 1000
async def test_async_iteration(self, tmp_path: pathlib.Path) -> None:
lines = ["blah blah\n", "foo foo\n", "bar bar"]
testpath = tmp_path.joinpath("testfile")
testpath.write_text("".join(lines), "ascii")
async with await open_file(str(testpath)) as f:
lines_i = iter(lines)
async for line in f:
assert line == next(lines_i)
async def test_wrap_file(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testdata"
with path.open("w") as fp:
wrapped = wrap_file(fp)
await wrapped.write("dummydata")
assert path.read_text() == "dummydata"
class TestPath:
@pytest.fixture
def populated_tmpdir(self, tmp_path: pathlib.Path) -> pathlib.Path:
tmp_path.joinpath("testfile").touch()
tmp_path.joinpath("testfile2").touch()
subdir = tmp_path / "subdir"
sibdir = tmp_path / "sibdir"
for subpath in (subdir, sibdir):
subpath.mkdir()
subpath.joinpath("dummyfile1.txt").touch()
subpath.joinpath("dummyfile2.txt").touch()
return tmp_path
async def test_properties(self) -> None:
"""
Ensure that all public properties and methods are available on the async Path
class.
"""
path = pathlib.Path("/test/path/another/part")
stdlib_properties = {
p for p in dir(path) if p.startswith("__") or not p.startswith("_")
}
stdlib_properties.discard("link_to")
stdlib_properties.discard("__class_getitem__")
stdlib_properties.discard("__enter__")
stdlib_properties.discard("__exit__")
stdlib_properties.discard("__firstlineno__")
stdlib_properties.discard("__open_rb__")
stdlib_properties.discard("__open_wb__")
async_path = Path(path)
anyio_properties = {
p for p in dir(async_path) if p.startswith("__") or not p.startswith("_")
}
missing = stdlib_properties - anyio_properties
assert not missing
def test_repr(self) -> None:
assert repr(Path("/foo")) == "Path('/foo')"
def test_bytes(self) -> None:
assert bytes(Path("/foo-åäö")) == os.fsencode(f"{os.path.sep}foo-åäö")
def test_hash(self) -> None:
assert hash(Path("/foo")) == hash(pathlib.Path("/foo"))
def test_comparison(self) -> None:
path1 = Path("/foo1")
path2 = Path("/foo2")
assert path1 < path2
assert path1 <= path2
assert path2 > path1
assert path2 >= path1
def test_truediv(self) -> None:
result = Path("/foo") / "bar"
assert isinstance(result, Path)
assert result == pathlib.Path("/foo/bar")
def test_rtruediv(self) -> None:
result = "/foo" / Path("bar")
assert isinstance(result, Path)
assert result == pathlib.Path("/foo/bar")
def test_parts_property(self) -> None:
assert Path("/abc/xyz/foo.txt").parts == (os.path.sep, "abc", "xyz", "foo.txt")
@pytest.mark.skipif(
platform.system() != "Windows", reason="Drive only makes sense on Windows"
)
def test_drive_property(self) -> None:
assert Path("c:\\abc\\xyz").drive == "c:"
def test_root_property(self) -> None:
assert Path("/abc/xyz/foo.txt").root == os.path.sep
def test_anchor_property(self) -> None:
assert Path("/abc/xyz/foo.txt.zip").anchor == os.path.sep
def test_parents_property(self) -> None:
parents = Path("/abc/xyz/foo.txt").parents
assert len(parents) == 3
assert all(isinstance(parent, Path) for parent in parents)
assert str(parents[0]) == f"{os.path.sep}abc{os.path.sep}xyz"
assert str(parents[1]) == f"{os.path.sep}abc"
assert str(parents[2]) == os.path.sep
def test_parent_property(self) -> None:
parent = Path("/abc/xyz/foo.txt").parent
assert isinstance(parent, Path)
assert str(parent) == f"{os.path.sep}abc{os.path.sep}xyz"
def test_name_property(self) -> None:
assert Path("/abc/xyz/foo.txt.zip").name == "foo.txt.zip"
def test_suffix_property(self) -> None:
assert Path("/abc/xyz/foo.txt.zip").suffix == ".zip"
def test_suffixes_property(self) -> None:
assert Path("/abc/xyz/foo.tar.gz").suffixes == [".tar", ".gz"]
def test_stem_property(self) -> None:
assert Path("/abc/xyz/foo.txt.zip").stem == "foo.txt"
async def test_absolute(self) -> None:
result = await Path("../foo/bar").absolute()
assert isinstance(result, Path)
assert result == pathlib.Path.cwd() / "../foo/bar"
@pytest.mark.skipif(
platform.system() != "Windows", reason="Only makes sense on Windows"
)
def test_as_posix(self) -> None:
assert Path("c:\\foo\\bar").as_posix() == "c:/foo/bar"
def test_as_uri(self) -> None:
if platform.system() == "Windows":
assert Path("c:\\foo\\bar").as_uri() == "file:///c:/foo/bar"
else:
assert Path("/foo/bar").as_uri() == "file:///foo/bar"
@pytest.mark.skipif(
sys.version_info < (3, 13),
reason="Path.from_uri() is only available on Python 3.13+",
)
def test_from_uri(self) -> None:
if platform.system() == "Windows":
uri = "file:///C:/foo/bar"
else:
uri = "file:///foo/bar"
path = Path.from_uri(uri)
assert isinstance(path, Path)
assert path.as_uri() == uri
async def test_cwd(self) -> None:
result = await Path.cwd()
assert isinstance(result, Path)
assert result == pathlib.Path.cwd()
async def test_exists(self, tmp_path: pathlib.Path) -> None:
assert not await Path("~/btelkbee").exists()
assert await Path(tmp_path).exists()
async def test_expanduser(self) -> None:
result = await Path("~/btelkbee").expanduser()
assert isinstance(result, Path)
assert str(result) == os.path.expanduser(f"~{os.path.sep}btelkbee")
async def test_home(self) -> None:
result = await Path.home()
assert isinstance(result, Path)
assert result == pathlib.Path.home()
@pytest.mark.parametrize(
"arg, result",
[
("c:/xyz" if platform.system() == "Windows" else "/xyz", True),
("../xyz", False),
],
)
def test_is_absolute(self, arg: str, result: bool) -> None:
assert Path(arg).is_absolute() == result
@pytest.mark.skipif(
platform.system() == "Windows",
reason="Block devices are not available on Windows",
)
async def test_is_block_device(self) -> None:
assert not await Path("/btelkbee").is_block_device()
with os.scandir("/dev") as iterator:
for entry in iterator:
if stat.S_ISBLK(entry.stat(follow_symlinks=False).st_mode):
assert await Path(entry.path).is_block_device()
break
else:
pytest.skip("Could not find a suitable block device")
@pytest.mark.skipif(
platform.system() == "Windows",
reason="Character devices are not available on Windows",
)
async def test_is_char_device(self) -> None:
assert not await Path("/btelkbee").is_char_device()
assert await Path("/dev/random").is_char_device()
async def test_is_dir(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somedir"
assert not await Path(path).is_dir()
path.mkdir()
assert await Path(path).is_dir()
@pytest.mark.skipif(
platform.system() == "Windows", reason="mkfifo() is not available on Windows"
)
async def test_is_fifo(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somefifo"
assert not await Path(path).is_fifo()
os.mkfifo(path)
assert await Path(path).is_fifo()
async def test_is_file(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somefile"
assert not await Path(path).is_file()
path.touch()
assert await Path(path).is_file()
@pytest.mark.skipif(
sys.version_info < (3, 12),
reason="Path.is_junction() is only available on Python 3.12+",
)
async def test_is_junction(self, tmp_path: pathlib.Path) -> None:
assert not await Path(tmp_path).is_junction()
async def test_is_mount(self) -> None:
assert not await Path("/gfobj4ewiotj").is_mount()
assert await Path("/").is_mount()
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_is_reserved(self) -> None:
expected_result = platform.system() == "Windows"
assert Path("nul").is_reserved() == expected_result
@pytest.mark.skipif(
platform.system() == "Windows",
reason="UNIX sockets are not available on Windows",
)
async def test_is_socket(self, tmp_path_factory: TempPathFactory) -> None:
path = tmp_path_factory.mktemp("unix").joinpath("socket")
assert not await Path(path).is_socket()
with socket.socket(socket.AF_UNIX) as sock:
sock.bind(str(path))
assert await Path(path).is_socket()
@pytest.mark.skipif(
platform.system() == "Windows",
reason="symbolic links are not supported on Windows",
)
async def test_is_symlink(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
assert not await Path(path).is_symlink()
path.symlink_to("/foo")
assert await Path(path).is_symlink()
@pytest.mark.parametrize("arg, result", [("/xyz/abc", True), ("/xyz/baz", False)])
def test_is_relative_to(self, arg: str, result: bool) -> None:
assert Path("/xyz/abc/foo").is_relative_to(arg) == result
@pytest.mark.skipif(
sys.version_info < (3, 14),
reason="Path.copy() is only available on Python 3.14+",
)
async def test_copy(self, tmp_path: pathlib.Path) -> None:
source_path = Path(tmp_path) / "source"
destination_path = Path(tmp_path) / "destination"
await source_path.write_text("hello")
result = await source_path.copy(destination_path) # type: ignore[attr-defined]
assert await result.read_text() == "hello"
@pytest.mark.skipif(
sys.version_info < (3, 14),
reason="Path.copy() is only available on Python 3.14+",
)
async def test_copy_into(self, tmp_path: pathlib.Path) -> None:
source_path = Path(tmp_path) / "source"
destination_path = Path(tmp_path) / "destination"
await destination_path.mkdir()
await source_path.write_text("hello")
result = await source_path.copy_into(destination_path) # type: ignore[attr-defined]
assert await result.read_text() == "hello"
@pytest.mark.skipif(
sys.version_info < (3, 14),
reason="Path.copy() is only available on Python 3.14+",
)
async def test_move(self, tmp_path: pathlib.Path) -> None:
source_path = Path(tmp_path) / "source"
destination_path = Path(tmp_path) / "destination"
await source_path.write_text("hello")
result = await source_path.move(destination_path) # type: ignore[attr-defined]
assert await result.read_text() == "hello"
assert not await source_path.exists()
@pytest.mark.skipif(
sys.version_info < (3, 14),
reason="Path.copy() is only available on Python 3.14+",
)
async def test_move_into(self, tmp_path: pathlib.Path) -> None:
source_path = Path(tmp_path) / "source"
destination_path = Path(tmp_path) / "destination"
await destination_path.mkdir()
await source_path.write_text("hello")
result = await source_path.move_into(destination_path) # type: ignore[attr-defined]
assert await result.read_text() == "hello"
assert not await source_path.exists()
async def test_glob(self, populated_tmpdir: pathlib.Path) -> None:
all_paths = []
async for path in Path(populated_tmpdir).glob("**/*.txt"):
assert isinstance(path, Path)
all_paths.append(path.relative_to(populated_tmpdir))
all_paths.sort()
assert all_paths == [
Path("sibdir") / "dummyfile1.txt",
Path("sibdir") / "dummyfile2.txt",
Path("subdir") / "dummyfile1.txt",
Path("subdir") / "dummyfile2.txt",
]
async def test_rglob(self, populated_tmpdir: pathlib.Path) -> None:
all_paths = []
async for path in Path(populated_tmpdir).rglob("*.txt"):
assert isinstance(path, Path)
all_paths.append(path.relative_to(populated_tmpdir))
all_paths.sort()
assert all_paths == [
Path("sibdir") / "dummyfile1.txt",
Path("sibdir") / "dummyfile2.txt",
Path("subdir") / "dummyfile1.txt",
Path("subdir") / "dummyfile2.txt",
]
async def test_iterdir(self, populated_tmpdir: pathlib.Path) -> None:
all_paths = []
async for path in Path(populated_tmpdir).iterdir():
assert isinstance(path, Path)
all_paths.append(path.name)
all_paths.sort()
assert all_paths == ["sibdir", "subdir", "testfile", "testfile2"]
def test_joinpath(self) -> None:
path = Path("/foo").joinpath("bar")
assert path == Path("/foo/bar")
@pytest.mark.skipif(
sys.version_info < (3, 13),
reason="Path.full_match() is only available on Python 3.13+",
)
def test_fullmatch(self) -> None:
assert Path("/foo/bar").full_match("/foo/*")
assert not Path("/foo/bar").full_match("/baz/*")
def test_match(self) -> None:
assert Path("/foo/bar").match("/foo/*")
assert not Path("/foo/bar").match("/baz/*")
@pytest.mark.skipif(
platform.system() == "Windows", reason="chmod() is not available on Windows"
)
async def test_chmod(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
path.touch(0o666)
await Path(path).chmod(0o444)
assert path.stat().st_mode & 0o777 == 0o444
@pytest.mark.skipif(
platform.system() == "Windows", reason="hard links are not supported on Windows"
)
async def test_hardlink_to(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
target = tmp_path / "link"
target.touch()
await Path(path).hardlink_to(Path(target))
assert path.stat().st_nlink == 2
assert target.stat().st_nlink == 2
@pytest.mark.skipif(
platform.system() == "Windows", reason="lchmod() does not work on Windows"
)
@pytest.mark.skipif(
not hasattr(os, "lchmod"), reason="os.lchmod() is not available"
)
async def test_lchmod(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
path.symlink_to("/foo/bar/baz")
await Path(path).lchmod(0o600)
assert path.lstat().st_mode & 0o777 == 0o600
@pytest.mark.skipif(
platform.system() == "Windows",
reason="symbolic links are not supported on Windows",
)
async def test_lstat(self, tmp_path: pathlib.Path) -> None:
path = tmp_path.joinpath("testfile")
path.symlink_to("/foo/bar/baz")
result = await Path(path).lstat()
assert isinstance(result, os.stat_result)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="owner and group are not supported on Windows",
)
async def test_group(self, tmp_path: pathlib.Path) -> None:
import grp
group_name = grp.getgrgid(os.getegid()).gr_name
assert await Path(tmp_path).group() == group_name
async def test_mkdir(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testdir"
await Path(path).mkdir()
assert path.is_dir()
async def test_open(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
path.write_bytes(b"bibbitibobbitiboo")
fp = await Path(path).open("rb")
assert isinstance(fp, AsyncFile)
assert fp.name == str(path)
await fp.aclose()
@pytest.mark.skipif(
platform.system() == "Windows",
reason="owner and group are not supported on Windows",
)
async def test_owner(self, tmp_path: pathlib.Path) -> None:
import pwd
user_name = pwd.getpwuid(os.geteuid()).pw_name
assert await Path(tmp_path).owner() == user_name
@pytest.mark.skipif(
platform.system() == "Windows",
reason="symbolic links are not supported on Windows",
)
async def test_readlink(self, tmp_path: pathlib.Path) -> None:
path = tmp_path.joinpath("testfile")
path.symlink_to("/foo/bar/baz")
link_target = await Path(path).readlink()
assert isinstance(link_target, Path)
assert str(link_target) == "/foo/bar/baz"
async def test_read_bytes(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
path.write_bytes(b"bibbitibobbitiboo")
assert await Path(path).read_bytes() == b"bibbitibobbitiboo"
async def test_read_text(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
path.write_text("some text åäö", encoding="utf-8")
assert await Path(path).read_text(encoding="utf-8") == "some text åäö"
async def test_relative_to_subpath(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "subdir"
assert path.relative_to(tmp_path) == Path("subdir")
@pytest.mark.skipif(
sys.version_info < (3, 12),
reason="Path.relative_to(walk_up=) is only available on Python 3.12+",
)
async def test_relative_to_sibling(
self,
populated_tmpdir: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
subdir = Path(populated_tmpdir / "subdir")
sibdir = Path(populated_tmpdir / "sibdir")
with pytest.raises(ValueError):
subdir.relative_to(sibdir, walk_up=False)
monkeypatch.chdir(sibdir)
relpath = subdir.relative_to(sibdir, walk_up=True) / "dummyfile1.txt"
assert os.access(relpath, os.R_OK)
async def test_rename(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somefile"
path.touch()
target = tmp_path / "anotherfile"
result = await Path(path).rename(Path(target))
assert isinstance(result, Path)
assert result == target
async def test_replace(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somefile"
path.write_text("hello")
target = tmp_path / "anotherfile"
target.write_text("world")
result = await Path(path).replace(Path(target))
assert isinstance(result, Path)
assert result == target
assert target.read_text() == "hello"
async def test_resolve(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somedir" / ".." / "somefile"
result = await Path(path).resolve()
assert result == tmp_path / "somefile"
async def test_rmdir(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somedir"
path.mkdir()
await Path(path).rmdir()
assert not path.exists()
async def test_samefile(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "somefile"
path.touch()
assert await Path(tmp_path / "somefile").samefile(Path(path))
async def test_stat(self, tmp_path: pathlib.Path) -> None:
result = await Path(tmp_path).stat()
assert isinstance(result, os.stat_result)
async def test_touch(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
await Path(path).touch()
assert path.is_file()
@pytest.mark.skipif(
platform.system() == "Windows",
reason="symbolic links are not supported on Windows",
)
async def test_symlink_to(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
target = tmp_path / "link"
await Path(path).symlink_to(Path(target))
assert path.is_symlink()
async def test_unlink(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
path.touch()
await Path(path).unlink()
assert not path.exists()
async def test_unlink_missing_file(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
await Path(path).unlink(missing_ok=True)
with pytest.raises(FileNotFoundError):
await Path(path).unlink(missing_ok=False)
@pytest.mark.skipif(
sys.version_info < (3, 12),
reason="Path.walk() is only available on Python 3.12+",
)
async def test_walk(self, tmp_path: pathlib.Path) -> None:
subdir = tmp_path / "subdir"
subdir.mkdir()
subdir.joinpath("file1").touch()
subdir.joinpath("file2").touch()
path = Path(tmp_path)
iterator = Path(tmp_path).walk().__aiter__()
root, dirs, files = await iterator.__anext__()
assert root == path
assert dirs == ["subdir"]
assert files == []
root, dirs, files = await iterator.__anext__()
assert root == path / "subdir"
assert dirs == []
assert sorted(files) == ["file1", "file2"]
with pytest.raises(StopAsyncIteration):
await iterator.__anext__()
def test_with_name(self) -> None:
assert Path("/xyz/foo.txt").with_name("bar").name == "bar"
def test_with_stem(self) -> None:
assert Path("/xyz/foo.txt").with_stem("bar").name == "bar.txt"
def test_with_suffix(self) -> None:
assert Path("/xyz/foo.txt.gz").with_suffix(".zip").name == "foo.txt.zip"
async def test_write_bytes(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
await Path(path).write_bytes(b"bibbitibobbitiboo")
assert path.read_bytes() == b"bibbitibobbitiboo"
async def test_write_text(self, tmp_path: pathlib.Path) -> None:
path = tmp_path / "testfile"
await Path(path).write_text("some text åäö", encoding="utf-8")
assert path.read_text(encoding="utf-8") == "some text åäö"
anyio-4.11.0/tests/test_from_thread.py 0000664 0000000 0000000 00000062741 15064462627 0020007 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import math
import sys
import threading
import time
from collections.abc import AsyncGenerator, Awaitable, Callable
from concurrent import futures
from concurrent.futures import CancelledError, Future, ThreadPoolExecutor
from contextlib import asynccontextmanager, suppress
from contextvars import ContextVar
from typing import Any, Literal, NoReturn, TypeVar
import pytest
import sniffio
from _pytest.logging import LogCaptureFixture
from anyio import (
CancelScope,
Event,
RunFinishedError,
create_task_group,
fail_after,
from_thread,
get_all_backends,
get_cancelled_exc_class,
get_current_task,
run,
sleep,
to_thread,
wait_all_tasks_blocked,
)
from anyio._core._exceptions import NoEventLoopError
from anyio.abc import TaskStatus
from anyio.from_thread import BlockingPortal, start_blocking_portal
from anyio.lowlevel import EventLoopToken, checkpoint, current_token
from .conftest import asyncio_params
if sys.version_info < (3, 11):
from exceptiongroup import ExceptionGroup
T_Retval = TypeVar("T_Retval")
async def async_add(a: int, b: int) -> int:
assert threading.current_thread() is threading.main_thread()
return a + b
async def asyncgen_add(a: int, b: int) -> AsyncGenerator[int, Any]:
yield a + b
def sync_add(a: int, b: int) -> int:
assert threading.current_thread() is threading.main_thread()
return a + b
def thread_worker_async(
func: Callable[..., Awaitable[T_Retval]], *args: Any
) -> T_Retval:
assert threading.current_thread() is not threading.main_thread()
return from_thread.run(func, *args)
def thread_worker_sync(func: Callable[..., T_Retval], *args: Any) -> T_Retval:
assert threading.current_thread() is not threading.main_thread()
return from_thread.run_sync(func, *args)
@pytest.mark.parametrize("cancel", [True, False])
async def test_thread_cancelled(cancel: bool) -> None:
event = threading.Event()
thread_finished_future: Future[None] = Future()
def sync_function() -> None:
event.wait(3)
try:
from_thread.check_cancelled()
except BaseException as exc:
thread_finished_future.set_exception(exc)
else:
thread_finished_future.set_result(None)
async with create_task_group() as tg:
tg.start_soon(to_thread.run_sync, sync_function)
await wait_all_tasks_blocked()
if cancel:
tg.cancel_scope.cancel()
event.set()
if cancel:
with pytest.raises(get_cancelled_exc_class()):
thread_finished_future.result(3)
else:
thread_finished_future.result(3)
async def test_thread_cancelled_and_abandoned() -> None:
event = threading.Event()
thread_finished_future: Future[None] = Future()
def sync_function() -> None:
event.wait(3)
try:
from_thread.check_cancelled()
except BaseException as exc:
thread_finished_future.set_exception(exc)
else:
thread_finished_future.set_result(None)
async with create_task_group() as tg:
tg.start_soon(lambda: to_thread.run_sync(sync_function, abandon_on_cancel=True))
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
event.set()
with pytest.raises(get_cancelled_exc_class()):
thread_finished_future.result(3)
def test_thread_cancelled_not_in_worker_thread() -> None:
with pytest.raises(
NoEventLoopError,
match="This function can only be called inside an AnyIO worker thread",
):
from_thread.check_cancelled()
async def test_cancelscope_propagation() -> None:
async def async_time_bomb() -> None:
cancel_scope.cancel()
with fail_after(1):
await sleep(3)
with CancelScope() as cancel_scope:
await to_thread.run_sync(from_thread.run, async_time_bomb)
assert cancel_scope.cancelled_caught
async def test_cancelscope_propagation_when_abandoned() -> None:
host_cancelled_event = Event()
completed_event = Event()
async def async_time_bomb() -> None:
cancel_scope.cancel()
with fail_after(3):
await host_cancelled_event.wait()
completed_event.set()
with CancelScope() as cancel_scope:
await to_thread.run_sync(
from_thread.run, async_time_bomb, abandon_on_cancel=True
)
assert cancel_scope.cancelled_caught
host_cancelled_event.set()
with fail_after(3):
await completed_event.wait()
class TestRunAsyncFromThread:
async def test_run_corofunc_from_thread(self) -> None:
result = await to_thread.run_sync(thread_worker_async, async_add, 1, 2)
assert result == 3
async def test_run_asyncgen_from_thread(self) -> None:
gen = asyncgen_add(1, 2)
try:
result = await to_thread.run_sync(thread_worker_async, gen.__anext__)
assert result == 3
finally:
await gen.aclose()
async def test_run_sync_from_thread(self) -> None:
result = await to_thread.run_sync(thread_worker_sync, sync_add, 1, 2)
assert result == 3
async def test_run_async_from_unclaimed_thread(self) -> None:
async def asyncfunc() -> int:
event.set()
return 7
def externalthread() -> int:
return from_thread.run(asyncfunc, token=token)
event = Event()
token = current_token()
with ThreadPoolExecutor(1) as executor:
future = executor.submit(externalthread)
await event.wait()
result = await to_thread.run_sync(future.result)
assert result == 7
def test_run_sync_from_thread_pooling(self) -> None:
async def main() -> None:
thread_ids = set()
for _ in range(5):
thread_ids.add(await to_thread.run_sync(threading.get_ident))
# Expects that all the work has been done in the same worker thread
assert len(thread_ids) == 1
assert thread_ids.pop() != threading.get_ident()
assert threading.active_count() == initial_count + 1
# The thread should not exist after the event loop has been closed
initial_count = threading.active_count()
run(main, backend="asyncio")
for _ in range(10):
if threading.active_count() == initial_count:
return
time.sleep(0.1)
pytest.fail("Worker thread did not exit within 1 second")
async def test_run_async_from_thread_exception(self) -> None:
with pytest.raises(TypeError) as exc:
await to_thread.run_sync(thread_worker_async, async_add, 1, "foo")
exc.match("unsupported operand type")
async def test_run_sync_from_thread_exception(self) -> None:
with pytest.raises(TypeError) as exc:
await to_thread.run_sync(thread_worker_sync, sync_add, 1, "foo")
exc.match("unsupported operand type")
async def test_run_anyio_async_func_from_thread(self) -> None:
def worker(delay: float) -> Literal[True]:
from_thread.run(sleep, delay)
return True
assert await to_thread.run_sync(worker, 0)
def test_run_async_no_event_loop(self) -> None:
exc = pytest.raises(RuntimeError, from_thread.run, sleep, 0.1)
exc.match(
"Not running inside an AnyIO worker thread, and no event loop token was "
"provided"
)
def test_run_async_closed_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def get_token() -> EventLoopToken:
return current_token()
token = run(
get_token, backend=anyio_backend_name, backend_options=anyio_backend_options
)
with pytest.raises(RunFinishedError):
from_thread.run(sleep, 0.1, token=token)
async def test_contextvar_propagation(self, anyio_backend_name: str) -> None:
var = ContextVar("var", default=1)
async def async_func() -> int:
await checkpoint()
return var.get()
def worker() -> int:
var.set(6)
return from_thread.run(async_func)
assert await to_thread.run_sync(worker) == 6
async def test_sniffio(self, anyio_backend_name: str) -> None:
async def async_func() -> str:
return sniffio.current_async_library()
def worker() -> str:
sniffio.current_async_library_cvar.set("something invalid for async_func")
return from_thread.run(async_func)
assert await to_thread.run_sync(worker) == anyio_backend_name
class TestRunSyncFromThread:
async def test_run_sync_from_unclaimed_thread(self) -> None:
def external_thread() -> int:
from_thread.run_sync(event.set, token=token)
return 7
event = Event()
token = current_token()
with ThreadPoolExecutor(1) as executor:
future = executor.submit(external_thread)
await event.wait()
result = await to_thread.run_sync(future.result)
assert result == 7
def test_run_sync_from_unclaimed_thread_no_token(self) -> None:
def foo() -> None:
pass
exc = pytest.raises(RuntimeError, from_thread.run_sync, foo)
exc.match(
"Not running inside an AnyIO worker thread, and no event loop token was provided"
)
def test_run_sync_closed_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def get_token() -> EventLoopToken:
return current_token()
token = run(
get_token, backend=anyio_backend_name, backend_options=anyio_backend_options
)
with pytest.raises(RunFinishedError):
from_thread.run_sync(current_token, token=token)
async def test_contextvar_propagation(self) -> None:
var = ContextVar("var", default=1)
def worker() -> int:
var.set(6)
return from_thread.run_sync(var.get)
assert await to_thread.run_sync(worker) == 6
async def test_sniffio(self, anyio_backend_name: str) -> None:
def worker() -> str:
sniffio.current_async_library_cvar.set("something invalid for async_func")
return from_thread.run_sync(sniffio.current_async_library)
assert await to_thread.run_sync(worker) == anyio_backend_name
class TestBlockingPortal:
class AsyncCM:
def __init__(self, ignore_error: bool):
self.ignore_error = ignore_error
async def __aenter__(self) -> Literal["test"]:
return "test"
async def __aexit__(
self, exc_type: object, exc_val: object, exc_tb: object
) -> bool:
return self.ignore_error
async def test_call_corofunc(self) -> None:
async with BlockingPortal() as portal:
result = await to_thread.run_sync(portal.call, async_add, 1, 2)
assert result == 3
async def test_call_anext(self) -> None:
gen = asyncgen_add(1, 2)
try:
async with BlockingPortal() as portal:
result = await to_thread.run_sync(portal.call, gen.__anext__)
assert result == 3
finally:
await gen.aclose()
async def test_aexit_with_exception(self) -> None:
"""
Test that when the portal exits with an exception, all tasks are cancelled.
"""
def external_thread() -> None:
try:
portal.call(sleep, 3)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results: list[BaseException | None] = []
with suppress(Exception):
async with BlockingPortal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
raise Exception
await to_thread.run_sync(thread1.join)
await to_thread.run_sync(thread2.join)
assert len(results) == 2
assert isinstance(results[0], CancelledError)
assert isinstance(results[1], CancelledError)
async def test_aexit_without_exception(self) -> None:
"""Test that when the portal exits, it waits for all tasks to finish."""
def external_thread() -> None:
try:
portal.call(sleep, 0.2)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results: list[BaseException | None] = []
async with BlockingPortal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
await to_thread.run_sync(thread1.join)
await to_thread.run_sync(thread2.join)
assert results == [None, None]
async def test_call_portal_from_event_loop_thread(self) -> None:
async with BlockingPortal() as portal:
exc = pytest.raises(RuntimeError, portal.call, threading.get_ident)
exc.match("This method cannot be called from the event loop thread")
def test_start_with_new_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def async_get_thread_id() -> int:
return threading.get_ident()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
thread_id = portal.call(async_get_thread_id)
assert isinstance(thread_id, int)
assert thread_id != threading.get_ident()
def test_start_with_thread_name(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
with start_blocking_portal(
anyio_backend_name, anyio_backend_options, name="foo"
) as portal:
assert portal.call(lambda: threading.current_thread().name) == "foo"
def test_start_without_thread_name(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
assert portal.call(lambda: threading.current_thread().name) == (
f"{anyio_backend_name}-portal-{id(portal):x}"
)
def test_start_with_nonexistent_backend(self) -> None:
with pytest.raises(LookupError) as exc:
with start_blocking_portal("foo"):
pass
exc.match("No such backend: foo")
def test_call_stopped_portal(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
pass
pytest.raises(RuntimeError, portal.call, threading.get_ident).match(
"This portal is not running"
)
def test_start_task_soon(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def event_waiter() -> Literal["test"]:
await event1.wait()
event2.set()
return "test"
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
event1 = portal.call(Event)
event2 = portal.call(Event)
future = portal.start_task_soon(event_waiter)
portal.call(event1.set)
portal.call(event2.wait)
assert future.result() == "test"
def test_start_task_soon_cancel_later(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def noop() -> None:
await sleep(2)
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future = portal.start_task_soon(noop)
portal.call(wait_all_tasks_blocked)
future.cancel()
assert future.cancelled()
def test_start_task_soon_cancel_immediately(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
cancelled = False
done_event = threading.Event()
async def event_waiter() -> None:
nonlocal cancelled
try:
await sleep(3)
except get_cancelled_exc_class():
cancelled = True
finally:
done_event.set()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future = portal.start_task_soon(event_waiter)
future.cancel()
done_event.wait(10)
assert cancelled
def test_start_task_soon_with_name(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
task_name = None
async def taskfunc() -> None:
nonlocal task_name
task_name = get_current_task().name
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
portal.start_task_soon(taskfunc, name="testname")
assert task_name == "testname"
def test_async_context_manager_success(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with portal.wrap_async_context_manager(
TestBlockingPortal.AsyncCM(False)
) as cm:
assert cm == "test"
def test_async_context_manager_error(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(Exception) as exc:
with portal.wrap_async_context_manager(
TestBlockingPortal.AsyncCM(False)
) as cm:
assert cm == "test"
raise Exception("should NOT be ignored")
exc.match("should NOT be ignored")
def test_async_context_manager_error_ignore(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with portal.wrap_async_context_manager(
TestBlockingPortal.AsyncCM(True)
) as cm:
assert cm == "test"
raise Exception("should be ignored")
def test_async_context_manager_exception_in_task_group(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
"""Regression test for #381."""
async def failing_func() -> None:
raise ZeroDivisionError
@asynccontextmanager
async def run_in_context() -> AsyncGenerator[None, None]:
async with create_task_group() as tg:
tg.start_soon(failing_func)
yield
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(ExceptionGroup) as exc:
with portal.wrap_async_context_manager(run_in_context()):
pass
assert len(exc.value.exceptions) == 1
assert isinstance(exc.value.exceptions[0], ZeroDivisionError)
def test_start_no_value(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def taskfunc(*, task_status: TaskStatus) -> None:
task_status.started()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc)
assert value is None
assert future.result() is None
def test_start_with_value(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def taskfunc(*, task_status: TaskStatus[str]) -> None:
task_status.started("foo")
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc)
assert value == "foo"
assert future.result() is None
def test_start_crash_before_started_call(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def taskfunc(*, task_status: object) -> NoReturn:
raise Exception("foo")
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(Exception, match="foo"):
portal.start_task(taskfunc)
def test_start_crash_after_started_call(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def taskfunc(*, task_status: TaskStatus[int]) -> NoReturn:
task_status.started(2)
raise Exception("foo")
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc)
assert value == 2
with pytest.raises(Exception, match="foo"):
future.result()
def test_start_no_started_call(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def taskfunc(*, task_status: TaskStatus) -> None:
pass
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(RuntimeError, match="Task exited"):
portal.start_task(taskfunc)
def test_start_with_name(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def taskfunc(*, task_status: TaskStatus[str | None]) -> None:
task_status.started(get_current_task().name)
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, start_value = portal.start_task(taskfunc, name="testname")
assert start_value == "testname"
def test_contextvar_propagation_sync(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
var = ContextVar("var", default=1)
var.set(6)
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
propagated_value = portal.call(var.get)
assert propagated_value == 6
def test_contextvar_propagation_async(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
var = ContextVar("var", default=1)
var.set(6)
async def get_var() -> int:
await checkpoint()
return var.get()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
propagated_value = portal.call(get_var)
assert propagated_value == 6
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_asyncio_run_sync_called(self, caplog: LogCaptureFixture) -> None:
"""Regression test for #357."""
async def in_loop() -> None:
raise CancelledError
async with BlockingPortal() as portal:
await to_thread.run_sync(portal.start_task_soon, in_loop)
assert not caplog.text
def test_raise_baseexception_from_task(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
"""
Test that when a task raises a BaseException, it does not trigger additional
exceptions when trying to close the portal.
"""
async def raise_baseexception() -> None:
assert threading.current_thread().daemon
raise BaseException("fatal error")
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(BaseException, match="fatal error") as exc:
portal.call(raise_baseexception)
assert exc.value.__context__ is None
@pytest.mark.parametrize("portal_backend_name", get_all_backends())
@pytest.mark.usefixtures("deactivate_blockbuster")
async def test_from_async(
self, anyio_backend_name: str, portal_backend_name: str
) -> None:
"""
Test that portals don't deadlock when started/used from async code.
Note: This test will deadlock if there is a regression. A deadlock should be
treated as a failure. See also
https://github.com/agronholm/anyio/pull/524#discussion_r1183080886.
"""
if anyio_backend_name == "trio" and portal_backend_name == "trio":
pytest.xfail("known bug (#525)")
with start_blocking_portal(portal_backend_name) as portal:
portal.call(checkpoint)
async def test_cancel_portal_future(self) -> None:
"""Regression test for #575."""
event = Event()
def sync_thread() -> None:
fs = [portal.start_task_soon(sleep, math.inf)]
from_thread.run_sync(event.set)
done, not_done = futures.wait(
fs, timeout=1, return_when=futures.FIRST_COMPLETED
)
assert not not_done
async with from_thread.BlockingPortal() as portal:
async with create_task_group() as tg:
tg.start_soon(to_thread.run_sync, sync_thread)
# Ensure thread has time to start the task
await event.wait()
await portal.stop(cancel_remaining=True)
anyio-4.11.0/tests/test_lowlevel.py 0000664 0000000 0000000 00000007163 15064462627 0017343 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from typing import Any
import pytest
from anyio import create_task_group, run
from anyio.lowlevel import (
RunVar,
cancel_shielded_checkpoint,
checkpoint,
checkpoint_if_cancelled,
)
@pytest.mark.parametrize("cancel", [False, True])
async def test_checkpoint_if_cancelled(cancel: bool) -> None:
finished = second_finished = False
async def func() -> None:
nonlocal finished
tg.start_soon(second_func)
if cancel:
tg.cancel_scope.cancel()
await checkpoint_if_cancelled()
finished = True
async def second_func() -> None:
nonlocal second_finished
assert finished != cancel
second_finished = True
async with create_task_group() as tg:
tg.start_soon(func)
assert finished != cancel
assert second_finished
@pytest.mark.parametrize("cancel", [False, True])
async def test_cancel_shielded_checkpoint(cancel: bool) -> None:
finished = second_finished = False
async def func() -> None:
nonlocal finished
await cancel_shielded_checkpoint()
finished = True
async def second_func() -> None:
nonlocal second_finished
assert not finished
second_finished = True
async with create_task_group() as tg:
tg.start_soon(func)
tg.start_soon(second_func)
if cancel:
tg.cancel_scope.cancel()
assert finished
assert second_finished
@pytest.mark.parametrize("cancel", [False, True])
async def test_checkpoint(cancel: bool) -> None:
finished = second_finished = False
async def func() -> None:
nonlocal finished
await checkpoint()
finished = True
async def second_func() -> None:
nonlocal second_finished
assert not finished
second_finished = True
async with create_task_group() as tg:
tg.start_soon(func)
tg.start_soon(second_func)
if cancel:
tg.cancel_scope.cancel()
assert finished != cancel
assert second_finished
class TestRunVar:
def test_get_set(
self,
anyio_backend_name: str,
anyio_backend_options: dict[str, Any],
) -> None:
async def taskfunc(index: int) -> None:
assert var.get() == index
var.set(index + 1)
async def main() -> None:
pytest.raises(LookupError, var.get)
for i in range(2):
var.set(i)
async with create_task_group() as tg:
tg.start_soon(taskfunc, i)
assert var.get() == i + 1
var = RunVar[int]("var")
for _ in range(2):
run(main, backend=anyio_backend_name, backend_options=anyio_backend_options)
async def test_reset_token_used_on_wrong_runvar(self) -> None:
var1 = RunVar[str]("var1")
var2 = RunVar[str]("var2")
token = var1.set("blah")
with pytest.raises(
ValueError, match="This token does not belong to this RunVar"
):
var2.reset(token)
async def test_reset_token_used_twice(self) -> None:
var = RunVar[str]("var")
token = var.set("blah")
var.reset(token)
with pytest.raises(ValueError, match="This token has already been used"):
var.reset(token)
async def test_runvar_does_not_share_storage_by_name(self) -> None:
var1: RunVar[int] = RunVar("var", 1)
var2: RunVar[str] = RunVar("var", "a")
assert var1.get() == 1
assert var2.get() == "a"
var1.set(2)
var2.set("b")
assert var1.get() == 2
assert var2.get() == "b"
anyio-4.11.0/tests/test_pytest_plugin.py 0000664 0000000 0000000 00000045353 15064462627 0020423 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import socket
from collections.abc import Sequence
import pytest
from _pytest.logging import LogCaptureFixture
from _pytest.pytester import Pytester
from anyio import get_all_backends
from anyio.pytest_plugin import FreePortFactory
pytestmark = [
pytest.mark.filterwarnings(
"ignore:The TerminalReporter.writer attribute is deprecated"
":pytest.PytestDeprecationWarning:"
)
]
pytest_args = "-v", "-p", "anyio", "-p", "no:asyncio", "-p", "no:trio"
def test_plugin(testdir: Pytester) -> None:
testdir.makeconftest(
"""
from contextvars import ContextVar
import sniffio
import pytest
from anyio import sleep
var = ContextVar("var")
@pytest.fixture
async def async_fixture():
await sleep(0)
return sniffio.current_async_library()
@pytest.fixture
async def context_variable():
token = var.set("testvalue")
yield var
var.reset(token)
@pytest.fixture
async def some_feature():
yield None
await sleep(0)
"""
)
testdir.makepyfile(
"""
import pytest
import sniffio
from hypothesis import strategies, given
from anyio import get_all_backends, sleep
@pytest.mark.anyio
async def test_marked_test() -> None:
# Test that tests marked with @pytest.mark.anyio are run
pass
@pytest.mark.anyio
async def test_async_fixture_from_marked_test(async_fixture):
# Test that async functions can use async fixtures
assert async_fixture in get_all_backends()
def test_async_fixture_from_sync_test(anyio_backend_name, async_fixture):
# Test that regular functions can use async fixtures too
assert async_fixture == anyio_backend_name
@pytest.mark.anyio
async def test_skip_inline(some_feature):
# Test for github #214
pytest.skip("Test that skipping works")
@pytest.mark.anyio
async def test_contextvar(context_variable):
# Test that a contextvar set in an async fixture is visible to the test
assert context_variable.get() == "testvalue"
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes(
passed=4 * len(get_all_backends()), skipped=len(get_all_backends())
)
def test_asyncio(testdir: Pytester, caplog: LogCaptureFixture) -> None:
testdir.makeconftest(
"""
import asyncio
import pytest
import threading
@pytest.fixture(scope='class')
def anyio_backend():
return 'asyncio'
@pytest.fixture
async def setup_fail_fixture():
def callback():
raise RuntimeError('failing fixture setup')
asyncio.get_running_loop().call_soon(callback)
await asyncio.sleep(0)
yield None
@pytest.fixture
async def teardown_fail_fixture():
def callback():
raise RuntimeError('failing fixture teardown')
yield None
asyncio.get_running_loop().call_soon(callback)
await asyncio.sleep(0)
@pytest.fixture
def no_thread_leaks_fixture():
# this has to be non-async fixture so that it wraps up
# after the event loop gets closed
threads_before = threading.enumerate()
yield
threads_after = threading.enumerate()
leaked_threads = set(threads_after) - set(threads_before)
assert not leaked_threads
"""
)
testdir.makepyfile(
"""
import asyncio
import pytest
pytestmark = pytest.mark.anyio
class TestClassFixtures:
@pytest.fixture(scope='class')
async def async_class_fixture(self, anyio_backend):
await asyncio.sleep(0)
return anyio_backend
def test_class_fixture_in_test_method(
self,
async_class_fixture,
anyio_backend_name
):
assert anyio_backend_name == 'asyncio'
assert async_class_fixture == 'asyncio'
async def test_callback_exception_during_test() -> None:
def callback():
nonlocal started
started = True
raise Exception('foo')
started = False
asyncio.get_running_loop().call_soon(callback)
await asyncio.sleep(0)
assert started
async def test_callback_exception_during_setup(setup_fail_fixture):
pass
async def test_callback_exception_during_teardown(teardown_fail_fixture):
pass
async def test_exception_handler_no_exception():
asyncio.get_event_loop().call_exception_handler(
{"message": "bogus error"}
)
await asyncio.sleep(0.1)
async def test_shutdown_default_executor(no_thread_leaks_fixture):
# Test for github #503
asyncio.get_event_loop().run_in_executor(None, lambda: 1)
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes(passed=4, failed=1, errors=2)
assert len(caplog.messages) == 1
assert caplog.messages[0] == "bogus error"
def test_autouse_async_fixture(testdir: Pytester) -> None:
testdir.makeconftest(
"""
import pytest
autouse_backend = None
@pytest.fixture(autouse=True)
async def autouse_async_fixture(anyio_backend_name):
global autouse_backend
autouse_backend = anyio_backend_name
@pytest.fixture
def autouse_backend_name():
return autouse_backend
"""
)
testdir.makepyfile(
"""
import pytest
import sniffio
from anyio import get_all_backends, sleep
def test_autouse_backend(autouse_backend_name):
# Test that async autouse fixtures are triggered
assert autouse_backend_name in get_all_backends()
"""
)
result = testdir.runpytest_subprocess(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()))
def test_cancel_scope_in_asyncgen_fixture(testdir: Pytester) -> None:
testdir.makepyfile(
"""
import pytest
from anyio import create_task_group, sleep
@pytest.fixture
async def asyncgen_fixture():
async with create_task_group() as tg:
tg.cancel_scope.cancel()
await sleep(1)
yield 1
@pytest.mark.anyio
async def test_cancel_in_asyncgen_fixture(asyncgen_fixture):
assert asyncgen_fixture == 1
"""
)
result = testdir.runpytest_subprocess(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()))
def test_module_scoped_task_group_fixture(testdir: Pytester) -> None:
testdir.makeconftest(
"""
import pytest
from anyio import (
CancelScope,
create_memory_object_stream,
create_task_group,
get_all_backends,
)
@pytest.fixture(scope="module", params=get_all_backends())
def anyio_backend():
return 'asyncio'
@pytest.fixture(scope="module")
async def task_group():
async with create_task_group() as tg:
yield tg
@pytest.fixture
async def streams(task_group):
async def echo_messages(*, task_status):
with CancelScope() as cancel_scope:
task_status.started(cancel_scope)
async for obj in receive1:
await send2.send(obj)
send1, receive1 = create_memory_object_stream()
send2, receive2 = create_memory_object_stream()
cancel_scope = await task_group.start(echo_messages)
yield send1, receive2
cancel_scope.cancel()
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.mark.anyio
async def test_task_group(streams):
send1, receive2 = streams
await send1.send("hello")
assert await receive2.receive() == "hello"
"""
)
result = testdir.runpytest_subprocess(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()))
def test_async_fixture_teardown_after_sync_test(testdir: Pytester) -> None:
# Regression test for #619
testdir.makepyfile(
"""
import pytest
from anyio import create_task_group, sleep
@pytest.fixture(scope="session")
def anyio_backend():
return "asyncio"
@pytest.fixture(scope="module")
async def bbbbbb():
yield ""
@pytest.fixture(scope="module")
async def aaaaaa():
yield ""
@pytest.mark.anyio
async def test_1(bbbbbb):
pass
@pytest.mark.anyio
async def test_2(aaaaaa, bbbbbb):
pass
"""
)
result = testdir.runpytest_subprocess(*pytest_args)
result.assert_outcomes(passed=2)
def test_hypothesis_module_mark(testdir: Pytester) -> None:
testdir.makepyfile(
"""
import pytest
from hypothesis import given
from hypothesis.strategies import just
pytestmark = pytest.mark.anyio
@given(x=just(1))
async def test_hypothesis_wrapper(x):
assert isinstance(x, int)
@given(x=just(1))
def test_hypothesis_wrapper_regular(x):
assert isinstance(x, int)
@pytest.mark.xfail(strict=True)
@given(x=just(1))
async def test_hypothesis_wrapper_failing(x):
pytest.fail('This test failed successfully')
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes(
passed=len(get_all_backends()) + 1, xfailed=len(get_all_backends())
)
def test_hypothesis_function_mark(testdir: Pytester) -> None:
testdir.makepyfile(
"""
import pytest
from hypothesis import given
from hypothesis.strategies import just
@pytest.mark.anyio
@given(x=just(1))
async def test_anyio_mark_first(x):
assert isinstance(x, int)
@given(x=just(1))
@pytest.mark.anyio
async def test_anyio_mark_last(x):
assert isinstance(x, int)
@pytest.mark.xfail(strict=True)
@pytest.mark.anyio
@given(x=just(1))
async def test_anyio_mark_first_fail(x):
pytest.fail('This test failed successfully')
@given(x=just(1))
@pytest.mark.xfail(strict=True)
@pytest.mark.anyio
async def test_anyio_mark_last_fail(x):
pytest.fail('This test failed successfully')
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes(
passed=2 * len(get_all_backends()), xfailed=2 * len(get_all_backends())
)
@pytest.mark.parametrize("anyio_backend", get_all_backends(), indirect=True)
def test_debugger_exit_in_taskgroup(testdir: Pytester, anyio_backend_name: str) -> None:
testdir.makepyfile(
f"""
import pytest
from _pytest.outcomes import Exit
from anyio import create_task_group
@pytest.fixture
def anyio_backend():
return {anyio_backend_name!r}
@pytest.mark.anyio
async def test_debugger_exit():
async with create_task_group() as tg:
raise Exit('Quitting debugger')
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes()
@pytest.mark.parametrize("anyio_backend", get_all_backends(), indirect=True)
def test_keyboardinterrupt_during_test(
testdir: Pytester, anyio_backend_name: str
) -> None:
testdir.makepyfile(
f"""
import pytest
from anyio import create_task_group, sleep
@pytest.fixture
def anyio_backend():
return {anyio_backend_name!r}
async def send_keyboardinterrupt():
raise KeyboardInterrupt
@pytest.mark.anyio
async def test_anyio_mark_first():
async with create_task_group() as tg:
tg.start_soon(send_keyboardinterrupt)
await sleep(10)
"""
)
testdir.runpytest_subprocess(*pytest_args, timeout=3)
def test_async_fixture_in_test_class(testdir: Pytester) -> None:
# Regression test for #633
testdir.makepyfile(
"""
import pytest
class TestAsyncFixtureMethod:
is_same_instance = False
@pytest.fixture(autouse=True)
async def async_fixture_method(self):
self.is_same_instance = True
@pytest.mark.anyio
async def test_async_fixture_method(self):
assert self.is_same_instance
"""
)
result = testdir.runpytest_subprocess(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()))
def test_asyncgen_fixture_in_test_class(testdir: Pytester) -> None:
# Regression test for #633
testdir.makepyfile(
"""
import pytest
class TestAsyncFixtureMethod:
is_same_instance = False
@pytest.fixture(autouse=True)
async def async_fixture_method(self):
self.is_same_instance = True
yield
@pytest.mark.anyio
async def test_async_fixture_method(self):
assert self.is_same_instance
"""
)
result = testdir.runpytest_subprocess(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()))
def test_anyio_fixture_adoption_does_not_persist(testdir: Pytester) -> None:
testdir.makepyfile(
"""
import inspect
import pytest
@pytest.fixture
async def fixt():
return 1
@pytest.mark.anyio
async def test_fixt(fixt):
assert fixt == 1
def test_no_mark(fixt):
assert inspect.iscoroutine(fixt)
fixt.close()
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()) + 1)
def test_async_fixture_params(testdir: Pytester) -> None:
testdir.makepyfile(
"""
import inspect
import pytest
@pytest.fixture(params=[1, 2])
async def fixt(request):
return request.param
@pytest.mark.anyio
async def test_params(fixt):
assert fixt in (1, 2)
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()) * 2)
def test_auto_mode(testdir: Pytester) -> None:
testdir.makepyprojecttoml(
"""
[tool.pytest.ini_options]
anyio_mode = "auto"
"""
)
testdir.makepyfile(
"""
import inspect
import pytest
@pytest.fixture
async def fixt(request):
return 1
async def test_params(fixt):
assert fixt == 1
"""
)
result = testdir.runpytest(*pytest_args)
result.assert_outcomes(passed=len(get_all_backends()))
def test_auto_mode_conflict_warning(testdir: Pytester) -> None:
testdir.makepyprojecttoml(
"""
[tool.pytest.ini_options]
anyio_mode = "auto"
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"
"""
)
testdir.makeconftest(
"""
import pytest
class FakeAsyncioPlugin:
def pytest_addoption(self, parser, pluginmanager):
parser.addini(
"asyncio_mode",
default="strict",
type="string",
help="dummy asyncio plugin"
)
def pytest_addhooks(pluginmanager):
if not pluginmanager.has_plugin("asyncio"):
pluginmanager.register(FakeAsyncioPlugin(), "asyncio")
"""
)
testdir.makepyfile(
"""
import inspect
import pytest
@pytest.fixture
async def fixt(request):
return 1
async def test_params(fixt):
assert fixt == 1
"""
)
result = testdir.runpytest("-p", "anyio")
result.assert_outcomes(passed=len(get_all_backends()))
assert (
"PytestConfigWarning: AnyIO auto mode has been enabled together with "
"pytest-asyncio auto mode. This may cause unexpected behavior."
) in result.stdout.str()
class TestFreePortFactory:
@pytest.fixture(scope="class")
def families(self) -> Sequence[tuple[socket.AddressFamily, str]]:
from .test_sockets import has_ipv6
families: list[tuple[socket.AddressFamily, str]] = [
(socket.AF_INET, "127.0.0.1")
]
if has_ipv6:
families.append((socket.AF_INET6, "::1"))
return families
async def test_tcp_factory(
self,
families: Sequence[tuple[socket.AddressFamily, str]],
free_tcp_port_factory: FreePortFactory,
) -> None:
generated_ports = {free_tcp_port_factory() for _ in range(5)}
assert all(isinstance(port, int) for port in generated_ports)
assert len(generated_ports) == 5
for port in generated_ports:
for family, addr in families:
with socket.socket(family, socket.SOCK_STREAM) as sock:
try:
sock.bind((addr, port))
except OSError:
pass
async def test_udp_factory(
self,
families: Sequence[tuple[socket.AddressFamily, str]],
free_udp_port_factory: FreePortFactory,
) -> None:
generated_ports = {free_udp_port_factory() for _ in range(5)}
assert all(isinstance(port, int) for port in generated_ports)
assert len(generated_ports) == 5
for port in generated_ports:
for family, addr in families:
with socket.socket(family, socket.SOCK_DGRAM) as sock:
sock.bind((addr, port))
async def test_free_tcp_port(
self, families: Sequence[tuple[socket.AddressFamily, str]], free_tcp_port: int
) -> None:
assert isinstance(free_tcp_port, int)
for family, addr in families:
with socket.socket(family, socket.SOCK_STREAM) as sock:
sock.bind((addr, free_tcp_port))
async def test_free_udp_port(
self, families: Sequence[tuple[socket.AddressFamily, str]], free_udp_port: int
) -> None:
assert isinstance(free_udp_port, int)
for family, addr in families:
with socket.socket(family, socket.SOCK_DGRAM) as sock:
sock.bind((addr, free_udp_port))
anyio-4.11.0/tests/test_signals.py 0000664 0000000 0000000 00000003601 15064462627 0017143 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import signal
import sys
from collections.abc import AsyncIterable
import pytest
from anyio import create_task_group, fail_after, open_signal_receiver, to_thread
pytestmark = [
pytest.mark.skipif(
sys.platform == "win32",
reason="Signal delivery cannot be tested on Windows",
),
]
async def test_receive_signals() -> None:
with open_signal_receiver(signal.SIGUSR1, signal.SIGUSR2) as sigiter:
await to_thread.run_sync(os.kill, os.getpid(), signal.SIGUSR1)
await to_thread.run_sync(os.kill, os.getpid(), signal.SIGUSR2)
with fail_after(1):
sigusr1 = await sigiter.__anext__()
assert isinstance(sigusr1, signal.Signals)
assert sigusr1 == signal.Signals.SIGUSR1
sigusr2 = await sigiter.__anext__()
assert isinstance(sigusr2, signal.Signals)
assert sigusr2 == signal.Signals.SIGUSR2
async def test_task_group_cancellation_open() -> None:
async def signal_handler() -> None:
with open_signal_receiver(signal.SIGUSR1) as sigiter:
async for _ in sigiter:
pytest.fail("SIGUSR1 should not be sent")
pytest.fail("signal_handler should have been cancelled")
pytest.fail("open_signal_receiver should not suppress cancellation")
async with create_task_group() as tg:
tg.start_soon(signal_handler)
tg.cancel_scope.cancel()
async def test_task_group_cancellation_consume() -> None:
async def consume(sigiter: AsyncIterable[int]) -> None:
async for _ in sigiter:
pytest.fail("SIGUSR1 should not be sent")
pytest.fail("consume should have been cancelled")
with open_signal_receiver(signal.SIGUSR1) as sigiter:
async with create_task_group() as tg:
tg.start_soon(consume, sigiter)
tg.cancel_scope.cancel()
anyio-4.11.0/tests/test_sockets.py 0000664 0000000 0000000 00000266271 15064462627 0017174 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import array
import errno
import gc
import io
import os
import platform
import re
import socket
import sys
import tempfile
import threading
import time
from collections.abc import Generator, Iterable, Iterator
from contextlib import suppress
from ipaddress import IPv4Address, IPv6Address
from pathlib import Path
from socket import AddressFamily
from ssl import SSLContext, SSLError
from threading import Thread
from typing import TYPE_CHECKING, Any, Literal, NoReturn, Protocol, TypeVar, cast
from unittest import mock
from unittest.mock import MagicMock, patch
import psutil
import pytest
from _pytest.fixtures import SubRequest
from _pytest.logging import LogCaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from _pytest.tmpdir import TempPathFactory
from pytest import FixtureRequest
from pytest_mock.plugin import MockerFixture
from anyio import (
BrokenResourceError,
BusyResourceError,
ClosedResourceError,
EndOfStream,
Event,
TCPConnectable,
TypedAttributeLookupError,
UNIXConnectable,
as_connectable,
connect_tcp,
connect_unix,
create_connected_udp_socket,
create_connected_unix_datagram_socket,
create_task_group,
create_tcp_listener,
create_udp_socket,
create_unix_datagram_socket,
create_unix_listener,
fail_after,
getaddrinfo,
getnameinfo,
move_on_after,
notify_closing,
wait_all_tasks_blocked,
wait_readable,
wait_socket_readable,
wait_socket_writable,
wait_writable,
)
from anyio._core._eventloop import get_async_backend
from anyio.abc import (
ConnectedUDPSocket,
ConnectedUNIXDatagramSocket,
IPSockAddrType,
Listener,
SocketAttribute,
SocketListener,
SocketStream,
UDPSocket,
UNIXDatagramSocket,
UNIXSocketStream,
)
from anyio.lowlevel import checkpoint
from anyio.streams.stapled import MultiListener
from anyio.streams.tls import TLSConnectable
from .conftest import asyncio_params, no_other_refs
if sys.version_info < (3, 11):
from exceptiongroup import ExceptionGroup
if TYPE_CHECKING:
from _typeshed import FileDescriptorLike
AnyIPAddressFamily = Literal[
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
]
# If a socket can bind to ::1, the current environment has IPv6 properly configured
has_ipv6 = False
if socket.has_ipv6:
try:
s = socket.socket(AddressFamily.AF_INET6)
try:
s.bind(("::1", 0))
finally:
s.close()
del s
except OSError:
pass
else:
has_ipv6 = True
skip_ipv6_mark = pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
skip_unix_abstract_mark = pytest.mark.skipif(
not sys.platform.startswith("linux"),
reason="Abstract namespace sockets is a Linux only feature",
)
@pytest.fixture
def fake_localhost_dns(monkeypatch: MonkeyPatch) -> None:
def fake_getaddrinfo(*args: Any, **kwargs: Any) -> object:
# Make it return IPv4 addresses first so we can test the IPv6 preference
results = real_getaddrinfo(*args, **kwargs)
return sorted(results, key=lambda item: item[0])
real_getaddrinfo = socket.getaddrinfo
monkeypatch.setattr("socket.getaddrinfo", fake_getaddrinfo)
@pytest.fixture(
params=[
pytest.param(AddressFamily.AF_INET, id="ipv4"),
pytest.param(AddressFamily.AF_INET6, id="ipv6", marks=[skip_ipv6_mark]),
]
)
def family(request: SubRequest) -> AnyIPAddressFamily:
return request.param
@pytest.fixture
def check_asyncio_bug(anyio_backend_name: str, family: AnyIPAddressFamily) -> None:
if (
anyio_backend_name == "asyncio"
and sys.platform == "win32"
and family == AddressFamily.AF_INET6
):
import asyncio
policy = asyncio.get_event_loop_policy()
if policy.__class__.__name__ == "WindowsProactorEventLoopPolicy":
pytest.skip("Does not work due to a known bug (39148)")
class SockFdFactoryProtocol(Protocol):
def __call__(
self,
family: socket.AddressFamily,
kind: socket.SocketKind,
*,
bound: bool = False,
connected: bool = False,
) -> socket.socket | int: ...
@pytest.fixture(
params=[pytest.param(False, id="sock"), pytest.param(True, id="fileno")]
)
def sock_or_fd_factory(
request: SubRequest, tmp_path_factory: TempPathFactory
) -> SockFdFactoryProtocol:
def factory(
family: socket.AddressFamily,
kind: socket.SocketKind,
*,
bound: bool = False,
connected: bool = False,
) -> socket.socket | int:
sock = socket.socket(family, kind)
if bound or connected:
if family in (socket.AF_INET, socket.AF_INET6):
local_addr: str | tuple[str, int] = ("localhost", 0)
else:
local_addr = str(tmp_path_factory.mktemp("anyio") / "socket")
if bound:
sock.bind(local_addr)
if kind == socket.SOCK_STREAM:
sock.listen()
elif connected:
server_sock = socket.socket(family, kind)
request.addfinalizer(server_sock.close)
server_sock.bind(local_addr)
if kind == socket.SOCK_STREAM:
server_sock.listen()
sock.connect(server_sock.getsockname())
if request.param:
return sock.detach()
else:
request.addfinalizer(sock.close)
return sock
return factory
_T = TypeVar("_T")
def _identity(v: _T) -> _T:
return v
def fill_socket(sock: socket.socket) -> None:
try:
while True:
sock.send(b"x" * 65536)
except BlockingIOError:
pass
# _ProactorBasePipeTransport.abort() after _ProactorBasePipeTransport.close()
# does not cancel writes: https://bugs.python.org/issue44428
_ignore_win32_resource_warnings = (
pytest.mark.filterwarnings(
"ignore:unclosed Iterator[socket.socket]:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.settimeout(1)
sock.bind(("localhost", 0))
sock.listen()
yield sock
sock.close()
@pytest.fixture
def server_addr(self, server_sock: socket.socket) -> tuple[str, int]:
return server_sock.getsockname()[:2]
async def test_extra_attributes(
self,
server_sock: socket.socket,
server_addr: tuple[str, int],
family: AnyIPAddressFamily,
) -> None:
async with await connect_tcp(*server_addr) as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert stream.extra(SocketAttribute.family) == family
assert (
stream.extra(SocketAttribute.local_address)
== raw_socket.getsockname()[:2]
)
assert (
stream.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1]
)
assert stream.extra(SocketAttribute.remote_address) == server_addr
assert stream.extra(SocketAttribute.remote_port) == server_addr[1]
async def test_send_receive(
self, server_sock: socket.socket, server_addr: tuple[str, int]
) -> None:
async with await connect_tcp(*server_addr) as stream:
client, _ = server_sock.accept()
await stream.send(b"blah")
request = client.recv(100)
client.sendall(request[::-1])
response = await stream.receive()
client.close()
assert response == b"halb"
async def test_send_large_buffer(
self, server_sock: socket.socket, server_addr: tuple[str, int]
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(buffer)
client.close()
buffer = (
b"\xff" * 1024 * 1024
) # should exceed the maximum kernel send buffer size
async with await connect_tcp(*server_addr) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
response = b""
while len(response) < len(buffer):
chunk = await stream.receive()
assert isinstance(chunk, bytes)
response += chunk
thread.join()
assert response == buffer
async def test_send_eof(
self, server_sock: socket.socket, server_addr: tuple[str, int]
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
request = b""
while True:
data = client.recv(100)
request += data
if not data:
break
client.sendall(request[::-1])
client.close()
async with await connect_tcp(*server_addr) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
await stream.send(b"hello, ")
await stream.send(b"world\n")
await stream.send_eof()
response = await stream.receive()
thread.join()
assert response == b"\ndlrow ,olleh"
async def test_iterate(
self, server_sock: socket.socket, server_addr: tuple[str, int]
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(b"bl")
event.wait(1)
client.sendall(b"ah")
client.close()
event = threading.Event()
thread = Thread(target=serve, daemon=True)
thread.start()
chunks = []
async with await connect_tcp(*server_addr) as stream:
async for chunk in stream:
chunks.append(chunk)
event.set()
thread.join()
assert chunks == [b"bl", b"ah"]
async def test_socket_options(
self, family: AnyIPAddressFamily, server_addr: tuple[str, int]
) -> None:
async with await connect_tcp(*server_addr) as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert raw_socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) != 0
@skip_ipv6_mark
@pytest.mark.parametrize(
"local_addr, expected_client_addr",
[
pytest.param("", "::1", id="dualstack"),
pytest.param("127.0.0.1", "127.0.0.1", id="ipv4"),
pytest.param("::1", "::1", id="ipv6"),
],
)
async def test_happy_eyeballs(
self, local_addr: str, expected_client_addr: str, fake_localhost_dns: None
) -> None:
client_addr = None, None
def serve() -> None:
nonlocal client_addr
client, client_addr = server_sock.accept()
client.close()
family = (
AddressFamily.AF_INET
if local_addr == "127.0.0.1"
else AddressFamily.AF_INET6
)
server_sock = socket.socket(family)
server_sock.bind((local_addr, 0))
server_sock.listen()
port = server_sock.getsockname()[1]
thread = Thread(target=serve, daemon=True)
thread.start()
async with await connect_tcp("localhost", port):
pass
thread.join()
server_sock.close()
assert client_addr[0] == expected_client_addr
@pytest.mark.skipif(
sys.implementation.name == "pypy",
reason=(
"gc.get_referrers is broken on PyPy (see "
"https://github.com/pypy/pypy/issues/5075)"
),
)
async def test_happy_eyeballs_refcycles(
self, free_tcp_port: int, anyio_backend_name: str
) -> None:
"""
Test derived from https://github.com/python/cpython/pull/124859
"""
if anyio_backend_name == "asyncio" and sys.version_info < (3, 10):
pytest.skip(
"asyncio.BaseEventLoop.create_connection creates refcycles on py 3.9"
)
exc = None
try:
async with await connect_tcp("127.0.0.1", free_tcp_port):
pass
except OSError as e:
exc = e.__cause__
assert isinstance(exc, OSError)
assert gc.get_referrers(exc) == no_other_refs()
@pytest.mark.parametrize(
"target, exception_class",
[
pytest.param(
"localhost", ExceptionGroup, id="multi", marks=[skip_ipv6_mark]
),
pytest.param("127.0.0.1", ConnectionRefusedError, id="single"),
],
)
async def test_connection_refused(
self,
target: str,
exception_class: type[ExceptionGroup] | type[ConnectionRefusedError],
fake_localhost_dns: None,
free_tcp_port: int,
) -> None:
with pytest.raises(OSError) as exc:
await connect_tcp(target, free_tcp_port)
assert exc.match("All connection attempts failed")
assert isinstance(exc.value.__cause__, exception_class)
if isinstance(exc.value.__cause__, ExceptionGroup):
for exception in exc.value.__cause__.exceptions:
assert isinstance(exception, ConnectionRefusedError)
async def test_receive_timeout(
self, server_sock: socket.socket, server_addr: tuple[str, int]
) -> None:
def serve() -> None:
conn, _ = server_sock.accept()
time.sleep(1)
conn.close()
thread = Thread(target=serve, daemon=True)
thread.start()
async with await connect_tcp(*server_addr) as stream:
start_time = time.monotonic()
with move_on_after(0.1):
while time.monotonic() - start_time < 0.3:
await stream.receive(1)
pytest.fail("The timeout was not respected")
async def test_concurrent_send(self, server_addr: tuple[str, int]) -> None:
async def send_data() -> NoReturn:
while True:
await stream.send(b"\x00" * 4096)
async with await connect_tcp(*server_addr) as stream:
async with create_task_group() as tg:
tg.start_soon(send_data)
await wait_all_tasks_blocked()
with pytest.raises(BusyResourceError) as exc:
await stream.send(b"foo")
exc.match("already writing to")
tg.cancel_scope.cancel()
async def test_concurrent_receive(self, server_addr: tuple[str, int]) -> None:
async with await connect_tcp(*server_addr) as client:
async with create_task_group() as tg:
tg.start_soon(client.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await client.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(self, server_addr: tuple[str, int]) -> None:
async def interrupt() -> None:
await wait_all_tasks_blocked()
await stream.aclose()
async with await connect_tcp(*server_addr) as stream:
async with create_task_group() as tg:
tg.start_soon(interrupt)
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_receive_after_close(self, server_addr: tuple[str, int]) -> None:
stream = await connect_tcp(*server_addr)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_send_after_close(self, server_addr: tuple[str, int]) -> None:
stream = await connect_tcp(*server_addr)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.send(b"foo")
async def test_receive_after_peer_closed(
self, family: AnyIPAddressFamily, request: FixtureRequest
) -> None:
server_sock = socket.create_server(("localhost", 0), family=family)
request.addfinalizer(server_sock.close)
server_sock.settimeout(1)
server_addr = server_sock.getsockname()[:2]
async with await connect_tcp(*server_addr) as stream:
client_sock, _ = server_sock.accept()
client_sock.close()
with pytest.raises(EndOfStream):
await stream.receive(1)
with pytest.raises(ClosedResourceError):
await stream.receive(1)
async def test_send_after_peer_closed(
self, family: AnyIPAddressFamily, request: FixtureRequest
) -> None:
server_sock = socket.create_server(("localhost", 0), family=family)
request.addfinalizer(server_sock.close)
server_sock.settimeout(1)
server_addr = server_sock.getsockname()[:2]
async with await connect_tcp(*server_addr) as stream:
client_sock, _ = server_sock.accept()
client_sock.close()
with pytest.raises(BrokenResourceError):
for _ in range(1000):
await stream.send(b"foo")
with pytest.raises(ClosedResourceError):
await stream.send(b"foo")
async def test_connect_tcp_with_tls(
self,
server_context: SSLContext,
client_context: SSLContext,
server_sock: socket.socket,
server_addr: tuple[str, int],
) -> None:
def serve() -> None:
with suppress(socket.timeout):
client, addr = server_sock.accept()
client.settimeout(1)
client = server_context.wrap_socket(client, server_side=True)
data = client.recv(100)
client.sendall(data[::-1])
client.unwrap()
client.close()
# The TLSStream tests are more comprehensive than this one!
thread = Thread(target=serve, daemon=True)
thread.start()
async with await connect_tcp(
*server_addr, tls_hostname="localhost", ssl_context=client_context
) as stream:
await stream.send(b"hello")
response = await stream.receive()
assert response == b"olleh"
thread.join()
async def test_connect_tcp_with_tls_cert_check_fail(
self,
server_context: SSLContext,
server_sock: socket.socket,
server_addr: tuple[str, int],
) -> None:
thread_exception = None
def serve() -> None:
nonlocal thread_exception
client, addr = server_sock.accept()
with client:
client.settimeout(1)
try:
server_context.wrap_socket(client, server_side=True)
except OSError:
pass
except BaseException as exc:
thread_exception = exc
thread = Thread(target=serve, daemon=True)
thread.start()
with pytest.raises(SSLError):
await connect_tcp(*server_addr, tls_hostname="localhost")
thread.join()
assert thread_exception is None
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_unretrieved_future_exception_server_crash(
self, family: AnyIPAddressFamily, caplog: LogCaptureFixture
) -> None:
"""
Test that there won't be any leftover Futures that don't get their exceptions
retrieved.
See https://github.com/encode/httpcore/issues/382 for details.
"""
def serve() -> None:
sock, addr = server_sock.accept()
event.wait(3)
sock.close()
del sock
gc.collect()
with socket.socket(family, socket.SOCK_STREAM) as server_sock:
server_sock.settimeout(1)
server_sock.bind(("localhost", 0))
server_sock.listen()
server_addr = server_sock.getsockname()[:2]
event = threading.Event()
thread = Thread(target=serve)
thread.start()
async with await connect_tcp(*server_addr) as stream:
await stream.send(b"GET")
event.set()
with pytest.raises(BrokenResourceError):
await stream.receive()
thread.join()
gc.collect()
caplog_text = "\n".join(
msg
for msg in caplog.messages
if not re.search("took [0-9.]+ seconds", msg)
)
assert not caplog_text
async def test_from_socket(
self, family: AnyIPAddressFamily, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(family, socket.SOCK_STREAM, connected=True)
async with await SocketStream.from_socket(sock_or_fd) as stream:
assert isinstance(stream, SocketStream)
assert stream.extra(SocketAttribute.family) == family
async def test_from_socket_not_a_socket(self) -> None:
with pytest.raises(
TypeError,
match="expected an int or socket, got str instead",
):
await SocketStream.from_socket("foo") # type: ignore[arg-type]
async def test_from_socket_pass_file_fd(self, tmp_path: Path) -> None:
with pytest.raises(
ValueError,
match="the file descriptor does not refer to a socket",
):
with tmp_path.joinpath("foo").open("wb") as fd:
await SocketStream.from_socket(fd.fileno())
async def test_from_socket_wrong_socket_type(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_INET, socket.SocketKind.SOCK_DGRAM, connected=True
)
with pytest.raises(
ValueError,
match="socket type mismatch: expected SOCK_STREAM, got SOCK_DGRAM",
):
await SocketStream.from_socket(sock_or_fd)
async def test_from_socket_not_connected(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(socket.AF_INET, socket.SOCK_STREAM, bound=True)
with pytest.raises(ValueError, match="the socket must be connected"):
await SocketStream.from_socket(sock_or_fd)
@pytest.mark.network
class TestTCPListener:
async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
assert multi.extra(SocketAttribute.family) == family
for listener in multi.listeners:
raw_socket = listener.extra(SocketAttribute.raw_socket)
assert listener.extra(SocketAttribute.family) == family
assert (
listener.extra(SocketAttribute.local_address)
== raw_socket.getsockname()[:2]
)
assert (
listener.extra(SocketAttribute.local_port)
== raw_socket.getsockname()[1]
)
pytest.raises(
TypedAttributeLookupError,
listener.extra,
SocketAttribute.remote_address,
)
pytest.raises(
TypedAttributeLookupError,
listener.extra,
SocketAttribute.remote_port,
)
@pytest.mark.parametrize(
"family",
[
pytest.param(AddressFamily.AF_INET, id="ipv4"),
pytest.param(AddressFamily.AF_INET6, id="ipv6", marks=[skip_ipv6_mark]),
pytest.param(socket.AF_UNSPEC, id="both", marks=[skip_ipv6_mark]),
],
)
async def test_accept(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
for listener in multi.listeners:
client = socket.socket(listener.extra(SocketAttribute.family))
client.settimeout(1)
addr = listener.extra(SocketAttribute.local_address)
host, port = addr[0], addr[1]
# On Windows, connecting to ANY (:: or 0.0.0.0) is invalid (WinError 10049).
# Replace it with loopback (::1 or 127.0.0.1) to make the test portable.
if sys.platform == "win32" and host in ("::", "0.0.0.0"):
family = listener.extra(SocketAttribute.family)
if family == socket.AF_INET6:
addr = ("::1", port)
elif family == socket.AF_INET:
addr = ("127.0.0.1", port)
client.connect(addr)
assert isinstance(listener, SocketListener)
stream = await listener.accept()
client.sendall(b"blah")
request = await stream.receive()
await stream.send(request[::-1])
assert client.recv(100) == b"halb"
client.close()
await stream.aclose()
async def test_accept_after_close(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
for listener in multi.listeners:
await listener.aclose()
assert isinstance(listener, SocketListener)
with pytest.raises(ClosedResourceError):
await listener.accept()
async def test_socket_options(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
for listener in multi.listeners:
raw_socket = listener.extra(SocketAttribute.raw_socket)
if sys.platform == "win32":
assert (
raw_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE
)
!= 0
)
else:
assert (
raw_socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
!= 0
)
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 80000)
assert raw_socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) in (
80000,
160000,
)
client = socket.socket(raw_socket.family)
client.settimeout(1)
client.connect(raw_socket.getsockname())
assert isinstance(listener, SocketListener)
async with await listener.accept() as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert raw_socket.gettimeout() == 0
assert raw_socket.family == listener.extra(SocketAttribute.family)
assert (
raw_socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
!= 0
)
client.close()
@pytest.mark.skipif(
not hasattr(socket, "SO_REUSEPORT"), reason="SO_REUSEPORT option not supported"
)
async def test_reuse_port(self, family: AnyIPAddressFamily) -> None:
multi1 = await create_tcp_listener(
local_host="localhost", family=family, reuse_port=True
)
assert len(multi1.listeners) == 1
multi2 = await create_tcp_listener(
local_host="localhost",
local_port=multi1.listeners[0].extra(SocketAttribute.local_port),
family=family,
reuse_port=True,
)
assert len(multi2.listeners) == 1
assert multi1.listeners[0].extra(
SocketAttribute.local_address
) == multi2.listeners[0].extra(SocketAttribute.local_address)
await multi1.aclose()
await multi2.aclose()
async def test_close_from_other_task(self, family: AnyIPAddressFamily) -> None:
listener = await create_tcp_listener(local_host="localhost", family=family)
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(listener.serve, lambda stream: None)
await wait_all_tasks_blocked()
await listener.aclose()
tg.cancel_scope.cancel()
assert len(exc.value.exceptions) == 1
assert isinstance(exc.value.exceptions[0], ExceptionGroup)
nested_grp = exc.value.exceptions[0]
assert len(nested_grp.exceptions) == 1
assert isinstance(nested_grp.exceptions[0], ExceptionGroup)
async def test_send_after_eof(self, family: AnyIPAddressFamily) -> None:
async def handle(stream: SocketStream) -> None:
async with stream:
await stream.send(b"Hello\n")
multi = await create_tcp_listener(family=family, local_host="localhost")
async with multi, create_task_group() as tg:
tg.start_soon(multi.serve, handle)
await wait_all_tasks_blocked()
with socket.socket(family) as client:
client.connect(multi.extra(SocketAttribute.local_address))
client.shutdown(socket.SHUT_WR)
client.setblocking(False)
with fail_after(1):
while True:
try:
message = client.recv(10)
except BlockingIOError:
await checkpoint()
else:
assert message == b"Hello\n"
break
tg.cancel_scope.cancel()
async def test_eof_after_send(self, family: AnyIPAddressFamily) -> None:
"""Regression test for #701."""
received_bytes = b""
async def handle(stream: SocketStream) -> None:
nonlocal received_bytes
async with stream:
received_bytes = await stream.receive()
with pytest.raises(EndOfStream), fail_after(1):
await stream.receive()
tg.cancel_scope.cancel()
multi = await create_tcp_listener(family=family, local_host="localhost")
async with multi, create_task_group() as tg:
with socket.socket(family) as client:
client.connect(multi.extra(SocketAttribute.local_address))
client.send(b"Hello")
client.shutdown(socket.SHUT_WR)
await multi.serve(handle)
assert received_bytes == b"Hello"
@skip_ipv6_mark
@pytest.mark.skipif(
sys.platform == "win32",
reason="Windows does not support interface name suffixes",
)
async def test_bind_link_local(self) -> None:
# Regression test for #554
link_local_ipv6_address = next(
(
addr.address
for addresses in psutil.net_if_addrs().values()
for addr in addresses
if addr.address.startswith("fe80::") and "%" in addr.address
),
None,
)
if link_local_ipv6_address is None:
pytest.fail("Could not find a link-local IPv6 interface")
async with await create_tcp_listener(local_host=link_local_ipv6_address):
pass
async def test_from_socket(
self, family: AnyIPAddressFamily, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(family, socket.SOCK_STREAM, bound=True)
async with await SocketListener.from_socket(sock_or_fd) as listener:
assert isinstance(listener, SocketListener)
assert listener.extra(SocketAttribute.family) == family
async def test_from_socket_not_bound(
self, family: AnyIPAddressFamily, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(family, socket.SOCK_STREAM)
with pytest.raises(ValueError, match="the socket must be bound"):
await SocketListener.from_socket(sock_or_fd)
@pytest.mark.parametrize(
"local_host,family,expected_listeners,local_port",
[
(None, AddressFamily.AF_UNSPEC, 1 if socket.has_dualstack_ipv6() else 2, 0),
("localhost", AddressFamily.AF_UNSPEC, 2, 0),
("localhost", AddressFamily.AF_INET, 1, 0),
("::", AddressFamily.AF_UNSPEC, 1, 0),
("127.0.0.1", AddressFamily.AF_INET, 1, 0),
("::1", AddressFamily.AF_INET6, 1, 0),
(
None,
AddressFamily.AF_UNSPEC,
1 if socket.has_dualstack_ipv6() else 2,
54321,
),
("localhost", AddressFamily.AF_UNSPEC, 2, 54321),
("localhost", AddressFamily.AF_INET, 1, 54321),
],
)
async def test_tcp_listener_same_port(
self,
local_host: str | None,
family: AnyIPAddressFamily,
expected_listeners: int,
local_port: int,
) -> None:
async with await create_tcp_listener(
local_host=local_host, family=family, local_port=local_port
) as multi:
ports = {
listener.extra(SocketAttribute.local_port)
for listener in multi.listeners
}
assert len(ports) == 1
if local_port != 0:
assert ports == {local_port}
assert len(multi.listeners) == expected_listeners
@skip_ipv6_mark
async def test_tcp_listener_dualstack_disabled(
self, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.setattr(socket, "has_dualstack_ipv6", lambda: False)
async with await create_tcp_listener(
local_host=None, family=AddressFamily.AF_UNSPEC
) as multi:
families = {
listener.extra(SocketAttribute.family) for listener in multi.listeners
}
assert families == {socket.AF_INET, socket.AF_INET6}
assert len(multi.listeners) == 2
ports = {
listener.extra(SocketAttribute.local_port)
for listener in multi.listeners
}
assert len(ports) == 1
async def test_tcp_listener_retry_after_partial_failure(self) -> None:
"""
Simulate a case where the first bind() succeeds with an ephemeral port,
the second bind() fails with EADDRINUSE, and verify that create_tcp_listener
retries and eventually succeeds with all listeners bound to the same port.
"""
real_bind = socket.socket.bind
bind_count = 0
fail_once = True
def fake_bind(
self: socket.socket, addr: tuple[str, int] | tuple[str, int, int, int]
) -> None:
nonlocal bind_count, fail_once
port = addr[1] if isinstance(addr, tuple) and len(addr) >= 2 else None
bind_count += 1
if bind_count == 1 and port == 0:
return real_bind(self, addr)
if fail_once and port != 0:
fail_once = False
raise OSError(errno.EADDRINUSE, "simulated collision on second bind")
return real_bind(self, addr)
with patch.object(socket.socket, "bind", new=fake_bind):
async with await create_tcp_listener(
local_host="localhost", family=socket.AF_UNSPEC, local_port=0
) as multi:
assert bind_count >= 2
ports = {
listener.extra(SocketAttribute.local_port)
for listener in multi.listeners
}
assert len(ports) == 1
assert all(
isinstance(listener, SocketListener) for listener in multi.listeners
)
async def test_tcp_listener_total_bind_failure(self) -> None:
"""
Test for a situation where bind() always fails when other listeners are being
bound to the same port as the first listener which was randomly assigned a free
port by the kernel.
"""
def raise_oserror(addr: tuple[str, int]) -> None:
# Pretend that every explicitly requested port is already in use
if addr[1] != 0:
raise OSError(errno.EADDRINUSE, "bind failure")
mock_socket_instance = MagicMock()
mock_socket_instance.bind.side_effect = raise_oserror
asynclib = get_async_backend()
with (
patch("anyio._core._sockets.socket") as mock_anyio_sockets,
patch.object(
asynclib, "create_tcp_listener", return_value=MagicMock(SocketListener)
),
pytest.raises(
OSError, match="Could not create 2 listeners with a consistent port"
),
):
mock_anyio_sockets.socket.configure_mock(return_value=mock_socket_instance)
await create_tcp_listener(local_host="localhost")
@pytest.mark.skipif(
sys.platform == "win32", reason="UNIX sockets are not available on Windows"
)
class TestUNIXStream:
@pytest.fixture(
params=[
"path",
pytest.param("abstract", marks=[skip_unix_abstract_mark]),
]
)
def socket_path(self, request: SubRequest) -> Generator[Path, None, None]:
# Use stdlib tempdir generation
# Fixes `OSError: AF_UNIX path too long` from pytest generated temp_path
with tempfile.TemporaryDirectory() as path:
if request.param == "path":
yield Path(path) / "socket"
else:
yield Path(f"\0{path}") / "socket"
@pytest.fixture(params=[False, True], ids=["str", "path"])
def socket_path_or_str(self, request: SubRequest, socket_path: Path) -> Path | str:
return socket_path if request.param else str(socket_path)
@pytest.fixture
def server_sock(self, socket_path: Path) -> Iterable[socket.socket]:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(1)
sock.bind(str(socket_path))
sock.listen()
yield sock
sock.close()
async def test_extra_attributes(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert stream.extra(SocketAttribute.family) == socket.AF_UNIX
assert (
stream.extra(SocketAttribute.local_address) == raw_socket.getsockname()
)
remote_addr = stream.extra(SocketAttribute.remote_address)
if isinstance(remote_addr, str):
assert stream.extra(SocketAttribute.remote_address) == str(socket_path)
else:
assert isinstance(remote_addr, bytes)
assert stream.extra(SocketAttribute.remote_address) == bytes(
socket_path
)
pytest.raises(
TypedAttributeLookupError, stream.extra, SocketAttribute.local_port
)
pytest.raises(
TypedAttributeLookupError, stream.extra, SocketAttribute.remote_port
)
async def test_send_receive(
self, server_sock: socket.socket, socket_path_or_str: Path | str
) -> None:
async with await connect_unix(socket_path_or_str) as stream:
client, _ = server_sock.accept()
await stream.send(b"blah")
request = client.recv(100)
client.sendall(request[::-1])
response = await stream.receive()
client.close()
assert response == b"halb"
async def test_receive_large_buffer(
self, server_sock: socket.socket, socket_path: Path
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(buffer)
client.close()
buffer = (
b"\xff" * 1024 * 512 + b"\x00" * 1024 * 512
) # should exceed the maximum kernel send buffer size
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
response = b""
while len(response) < len(buffer):
response += await stream.receive()
thread.join()
assert response == buffer
async def test_send_large_buffer(
self, server_sock: socket.socket, socket_path: Path
) -> None:
response = b""
def serve() -> None:
nonlocal response
client, _ = server_sock.accept()
while True:
data = client.recv(1024)
if not data:
break
response += data
client.close()
buffer = (
b"\xff" * 1024 * 512 + b"\x00" * 1024 * 512
) # should exceed the maximum kernel send buffer size
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
await stream.send(buffer)
thread.join()
assert response == buffer
async def test_receive_fds(
self, server_sock: socket.socket, socket_path: Path, tmp_path: Path
) -> None:
def serve() -> None:
path1 = tmp_path / "file1"
path2 = tmp_path / "file2"
path1.write_text("Hello, ")
path2.write_text("World!")
with path1.open() as file1, path2.open() as file2:
fdarray = array.array("i", [file1.fileno(), file2.fileno()])
client, _ = server_sock.accept()
cmsg = (socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)
with client:
client.sendmsg([b"test"], [cmsg])
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
message, fds = await stream.receive_fds(10, 2)
thread.join()
text = ""
for fd in fds:
with os.fdopen(fd) as file:
text += file.read()
assert message == b"test"
assert text == "Hello, World!"
async def test_receive_fds_bad_args(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as stream:
for msglen in (-1, "foo"):
with pytest.raises(
ValueError, match="msglen must be a non-negative integer"
):
await stream.receive_fds(msglen, 0) # type: ignore[arg-type]
for maxfds in (0, "foo"):
with pytest.raises(
ValueError, match="maxfds must be a positive integer"
):
await stream.receive_fds(0, maxfds) # type: ignore[arg-type]
async def test_send_fds(
self, server_sock: socket.socket, socket_path: Path, tmp_path: Path
) -> None:
def serve() -> None:
fds = array.array("i")
client, _ = server_sock.accept()
msg, ancdata, *_ = client.recvmsg(10, socket.CMSG_LEN(2 * fds.itemsize))
client.close()
assert msg == b"test"
for cmsg_level, cmsg_type, cmsg_data in ancdata:
assert cmsg_level == socket.SOL_SOCKET
assert cmsg_type == socket.SCM_RIGHTS
fds.frombytes(
cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]
)
text = ""
for fd in fds:
with os.fdopen(fd) as file:
text += file.read()
assert text == "Hello, World!"
path1 = tmp_path / "file1"
path2 = tmp_path / "file2"
path1.write_text("Hello, ")
path2.write_text("World!")
with path1.open() as file1, path2.open() as file2, fail_after(2):
assert isinstance(file1, io.TextIOWrapper)
assert isinstance(file2, io.TextIOWrapper)
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
await stream.send_fds(b"test", [file1, file2])
thread.join()
async def test_send_eof(
self, server_sock: socket.socket, socket_path: Path
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
request = b""
while True:
data = client.recv(100)
request += data
if not data:
break
client.sendall(request[::-1])
client.close()
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
await stream.send(b"hello, ")
await stream.send(b"world\n")
await stream.send_eof()
response = await stream.receive()
thread.join()
assert response == b"\ndlrow ,olleh"
async def test_iterate(self, server_sock: socket.socket, socket_path: Path) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(b"bl")
time.sleep(0.05)
client.sendall(b"ah")
client.close()
thread = Thread(target=serve, daemon=True)
thread.start()
async with await connect_unix(socket_path) as stream:
chunks = [chunk async for chunk in stream]
thread.join()
assert chunks == [b"bl", b"ah"]
async def test_send_fds_bad_args(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as stream:
with pytest.raises(ValueError, match="message must not be empty"):
await stream.send_fds(b"", [0])
with pytest.raises(ValueError, match="fds must not be empty"):
await stream.send_fds(b"test", [])
async def test_concurrent_send(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async def send_data() -> NoReturn:
while True:
await client.send(b"\x00" * 4096)
async with await connect_unix(socket_path) as client:
async with create_task_group() as tg:
tg.start_soon(send_data)
await wait_all_tasks_blocked()
with pytest.raises(BusyResourceError) as exc:
await client.send(b"foo")
exc.match("already writing to")
tg.cancel_scope.cancel()
async def test_concurrent_receive(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as client:
async with create_task_group() as tg:
tg.start_soon(client.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await client.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async def interrupt() -> None:
await wait_all_tasks_blocked()
await stream.aclose()
async with await connect_unix(socket_path) as stream:
async with create_task_group() as tg:
tg.start_soon(interrupt)
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_receive_after_close(
self, server_sock: socket.socket, socket_path: Path
) -> None:
stream = await connect_unix(socket_path)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_send_after_close(
self, server_sock: socket.socket, socket_path: Path
) -> None:
stream = await connect_unix(socket_path)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.send(b"foo")
async def test_cannot_connect(self, socket_path: Path) -> None:
if str(socket_path).startswith("\0"):
with pytest.raises(ConnectionRefusedError):
await connect_unix(socket_path)
else:
with pytest.raises(FileNotFoundError):
await connect_unix(socket_path)
async def test_connecting_using_bytes(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(str(socket_path).encode()):
pass
@pytest.mark.skipif(
platform.system() == "Darwin", reason="macOS requires valid UTF-8 paths"
)
async def test_connecting_with_non_utf8(self, socket_path: Path) -> None:
actual_path = str(socket_path).encode() + b"\xf0"
with socket.socket(socket.AF_UNIX) as server:
server.bind(actual_path)
server.listen(1)
async with await connect_unix(actual_path):
pass
async def test_from_socket(
self, socket_path: Path, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_UNIX, socket.SOCK_STREAM, connected=True
)
async with await UNIXSocketStream.from_socket(sock_or_fd) as stream:
assert isinstance(stream, UNIXSocketStream)
async def test_from_socket_wrong_socket_type(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_UNIX, socket.SOCK_DGRAM, connected=True
)
with pytest.raises(
ValueError,
match="socket type mismatch: expected SOCK_STREAM, got SOCK_DGRAM",
):
await UNIXSocketStream.from_socket(sock_or_fd)
async def test_from_socket_wrong_address_family(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_INET, socket.SOCK_STREAM, connected=True
)
with pytest.raises(
ValueError,
match="address family mismatch: expected AF_UNIX, got AF_INET",
):
await UNIXSocketStream.from_socket(sock_or_fd)
async def test_from_socket_not_connected(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(socket.AF_UNIX, socket.SOCK_STREAM)
with pytest.raises(ValueError, match="the socket must be connected"):
await UNIXSocketStream.from_socket(sock_or_fd)
@pytest.mark.skipif(
sys.platform == "win32", reason="UNIX sockets are not available on Windows"
)
class TestUNIXListener:
@pytest.fixture(
params=[
"path",
pytest.param("abstract", marks=[skip_unix_abstract_mark]),
]
)
def socket_path(self, request: SubRequest) -> Generator[Path, None, None]:
# Use stdlib tempdir generation
# Fixes `OSError: AF_UNIX path too long` from pytest generated temp_path
with tempfile.TemporaryDirectory() as path:
if request.param == "path":
yield Path(path) / "socket"
else:
yield Path(f"\0{path}") / "socket"
@pytest.fixture(params=[False, True], ids=["str", "path"])
def socket_path_or_str(self, request: SubRequest, socket_path: Path) -> Path | str:
return socket_path if request.param else str(socket_path)
async def test_extra_attributes(self, socket_path: Path) -> None:
async with await create_unix_listener(socket_path) as listener:
raw_socket = listener.extra(SocketAttribute.raw_socket)
assert listener.extra(SocketAttribute.family) == socket.AF_UNIX
assert (
listener.extra(SocketAttribute.local_address)
== raw_socket.getsockname()
)
pytest.raises(
TypedAttributeLookupError, listener.extra, SocketAttribute.local_port
)
pytest.raises(
TypedAttributeLookupError,
listener.extra,
SocketAttribute.remote_address,
)
pytest.raises(
TypedAttributeLookupError, listener.extra, SocketAttribute.remote_port
)
async def test_accept(self, socket_path_or_str: Path | str) -> None:
async with await create_unix_listener(socket_path_or_str) as listener:
client = socket.socket(socket.AF_UNIX)
client.settimeout(1)
client.connect(str(socket_path_or_str))
stream = await listener.accept()
client.sendall(b"blah")
request = await stream.receive()
await stream.send(request[::-1])
assert client.recv(100) == b"halb"
client.close()
await stream.aclose()
async def test_socket_options(self, socket_path: Path) -> None:
async with await create_unix_listener(socket_path) as listener:
listener_socket = listener.extra(SocketAttribute.raw_socket)
assert listener_socket.family == socket.AF_UNIX
listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 80000)
assert listener_socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) in (
80000,
160000,
)
client = socket.socket(listener_socket.family)
client.settimeout(1)
client.connect(listener_socket.getsockname())
async with await listener.accept() as stream:
assert stream.extra(SocketAttribute.raw_socket).gettimeout() == 0
assert stream.extra(SocketAttribute.family) == listener_socket.family
client.close()
async def test_send_after_eof(self, socket_path: Path) -> None:
async def handle(stream: SocketStream) -> None:
async with stream:
await stream.send(b"Hello\n")
async with (
await create_unix_listener(socket_path) as listener,
create_task_group() as tg,
):
tg.start_soon(listener.serve, handle)
await wait_all_tasks_blocked()
with socket.socket(socket.AF_UNIX) as client:
client.connect(str(socket_path))
client.shutdown(socket.SHUT_WR)
client.setblocking(False)
with fail_after(1):
while True:
try:
message = client.recv(10)
except BlockingIOError:
await checkpoint()
else:
assert message == b"Hello\n"
break
tg.cancel_scope.cancel()
async def test_bind_twice(self, socket_path: Path) -> None:
"""Test that the previous socket is removed before binding to the path."""
for _ in range(2):
async with await create_unix_listener(socket_path):
pass
async def test_listening_bytes_path(self, socket_path: Path) -> None:
async with await create_unix_listener(str(socket_path).encode()):
pass
@pytest.mark.skipif(
platform.system() == "Darwin", reason="macOS requires valid UTF-8 paths"
)
async def test_listening_invalid_ascii(self, socket_path: Path) -> None:
real_path = str(socket_path).encode() + b"\xf0"
async with await create_unix_listener(real_path):
pass
async def test_from_socket(self, sock_or_fd_factory: SockFdFactoryProtocol) -> None:
sock_or_fd = sock_or_fd_factory(socket.AF_UNIX, socket.SOCK_STREAM, bound=True)
async with await SocketListener.from_socket(sock_or_fd) as listener:
assert isinstance(listener, SocketListener)
assert listener.extra(SocketAttribute.family) == socket.AF_UNIX
async def test_multi_listener(tmp_path_factory: TempPathFactory) -> None:
async def handle(stream: SocketStream) -> None:
client_addresses.append(stream.extra(SocketAttribute.remote_address))
event.set()
await stream.aclose()
client_addresses: list[str | IPSockAddrType] = []
listeners: list[Listener] = [await create_tcp_listener(local_host="localhost")]
with tempfile.TemporaryDirectory() as path:
if sys.platform != "win32":
listeners.append(await create_unix_listener(Path(path) / "socket"))
expected_addresses: list[str | IPSockAddrType] = []
async with MultiListener(listeners) as multi_listener:
async with create_task_group() as tg:
tg.start_soon(multi_listener.serve, handle)
for listener in multi_listener.listeners:
event = Event()
local_address = listener.extra(SocketAttribute.local_address)
if (
sys.platform != "win32"
and listener.extra(SocketAttribute.family) == socket.AF_UNIX
):
assert isinstance(local_address, str)
stream: SocketStream = await connect_unix(local_address)
else:
assert isinstance(local_address, tuple)
host, port = local_address
host = (
"::1"
if listener.extra(SocketAttribute.family) == socket.AF_INET6
else "127.0.0.1"
)
stream = await connect_tcp(host, port)
expected_addresses.append(
stream.extra(SocketAttribute.local_address)
)
await event.wait()
await stream.aclose()
tg.cancel_scope.cancel()
assert client_addresses == expected_addresses
@pytest.mark.network
@pytest.mark.usefixtures("check_asyncio_bug")
class TestUDPSocket:
async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
family=family, local_host="localhost"
) as udp:
raw_socket = udp.extra(SocketAttribute.raw_socket)
assert raw_socket.gettimeout() == 0
assert udp.extra(SocketAttribute.family) == family
assert (
udp.extra(SocketAttribute.local_address) == raw_socket.getsockname()[:2]
)
assert udp.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1]
pytest.raises(
TypedAttributeLookupError, udp.extra, SocketAttribute.remote_address
)
pytest.raises(
TypedAttributeLookupError, udp.extra, SocketAttribute.remote_port
)
async def test_send_receive(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
local_host="localhost", family=family
) as sock:
host, port = sock.extra(SocketAttribute.local_address) # type: ignore[misc]
await sock.sendto(b"blah", host, port)
request, addr = await sock.receive()
assert request == b"blah"
assert addr == sock.extra(SocketAttribute.local_address)
await sock.sendto(b"halb", host, port)
response, addr = await sock.receive()
assert response == b"halb"
assert addr == (host, port)
async def test_iterate(self, family: AnyIPAddressFamily) -> None:
async def serve() -> None:
async for packet, addr in server:
await server.send((packet[::-1], addr))
async with await create_udp_socket(
family=family, local_host="localhost"
) as server:
host, port = server.extra( # type: ignore[misc]
SocketAttribute.local_address
)
async with await create_udp_socket(
family=family, local_host="localhost"
) as client:
async with create_task_group() as tg:
tg.start_soon(serve)
await client.sendto(b"FOOBAR", host, port)
assert await client.receive() == (b"RABOOF", (host, port))
await client.sendto(b"123456", host, port)
assert await client.receive() == (b"654321", (host, port))
tg.cancel_scope.cancel()
@pytest.mark.skipif(
not hasattr(socket, "SO_REUSEPORT"), reason="SO_REUSEPORT option not supported"
)
async def test_reuse_port(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
family=family, local_host="localhost", reuse_port=True
) as udp:
port = udp.extra(SocketAttribute.local_port)
assert port != 0
async with await create_udp_socket(
family=family, local_host="localhost", local_port=port, reuse_port=True
) as udp2:
assert port == udp2.extra(SocketAttribute.local_port)
async def test_concurrent_receive(self) -> None:
async with await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
) as udp:
async with create_task_group() as tg:
tg.start_soon(udp.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await udp.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(self) -> None:
async def close_when_blocked() -> None:
await wait_all_tasks_blocked()
await udp.aclose()
async with await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
) as udp:
async with create_task_group() as tg:
tg.start_soon(close_when_blocked)
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_receive_after_close(self) -> None:
udp = await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_send_after_close(self) -> None:
udp = await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
)
host, port = udp.extra(SocketAttribute.local_address) # type: ignore[misc]
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.sendto(b"foo", host, port)
async def test_create_unbound_socket(self, family: AnyIPAddressFamily) -> None:
"""Regression test for #360."""
async with await create_udp_socket(family=family) as udp:
local_address = cast(
IPSockAddrType, udp.extra(SocketAttribute.local_address)
)
assert local_address[1] > 0
async def test_from_socket(
self, family: AnyIPAddressFamily, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(family, socket.SOCK_DGRAM, bound=True)
async with await UDPSocket.from_socket(sock_or_fd) as udp_socket:
assert isinstance(udp_socket, UDPSocket)
assert udp_socket.extra(SocketAttribute.family) == family
async def test_from_socket_wrong_socket_type(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_INET, socket.SOCK_STREAM, connected=True
)
with pytest.raises(
ValueError,
match="socket type mismatch: expected SOCK_DGRAM, got SOCK_STREAM",
):
await UDPSocket.from_socket(sock_or_fd)
@pytest.mark.network
@pytest.mark.usefixtures("check_asyncio_bug")
class TestConnectedUDPSocket:
async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None:
async with await create_connected_udp_socket(
"localhost", 5000, family=family
) as udp:
raw_socket = udp.extra(SocketAttribute.raw_socket)
assert udp.extra(SocketAttribute.family) == family
assert (
udp.extra(SocketAttribute.local_address) == raw_socket.getsockname()[:2]
)
assert udp.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1]
assert (
udp.extra(SocketAttribute.remote_address)
== raw_socket.getpeername()[:2]
)
assert udp.extra(SocketAttribute.remote_port) == 5000
async def test_send_receive(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
family=family, local_host="localhost"
) as udp1:
host, port = udp1.extra(SocketAttribute.local_address) # type: ignore[misc]
async with await create_connected_udp_socket(
host, port, local_host="localhost", family=family
) as udp2:
host, port = udp2.extra(
SocketAttribute.local_address # type: ignore[misc]
)
await udp2.send(b"blah")
request = await udp1.receive()
assert request == (b"blah", (host, port))
await udp1.sendto(b"halb", host, port)
response = await udp2.receive()
assert response == b"halb"
async def test_iterate(self, family: AnyIPAddressFamily) -> None:
async def serve() -> None:
async for packet in udp2:
await udp2.send(packet[::-1])
async with await create_udp_socket(
family=family, local_host="localhost"
) as udp1:
host, port = udp1.extra(SocketAttribute.local_address) # type: ignore[misc]
async with await create_connected_udp_socket(host, port) as udp2:
host, port = udp2.extra( # type: ignore[misc]
SocketAttribute.local_address
)
async with create_task_group() as tg:
tg.start_soon(serve)
await udp1.sendto(b"FOOBAR", host, port)
assert await udp1.receive() == (b"RABOOF", (host, port))
await udp1.sendto(b"123456", host, port)
assert await udp1.receive() == (b"654321", (host, port))
tg.cancel_scope.cancel()
@pytest.mark.skipif(
not hasattr(socket, "SO_REUSEPORT"), reason="SO_REUSEPORT option not supported"
)
async def test_reuse_port(self, family: AnyIPAddressFamily) -> None:
async with await create_connected_udp_socket(
"localhost", 6000, family=family, local_host="localhost", reuse_port=True
) as udp:
port = udp.extra(SocketAttribute.local_port)
assert port != 0
async with await create_connected_udp_socket(
"localhost",
6001,
family=family,
local_host="localhost",
local_port=port,
reuse_port=True,
) as udp2:
assert port == udp2.extra(SocketAttribute.local_port)
async def test_concurrent_receive(self) -> None:
async with await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=AddressFamily.AF_INET
) as udp:
async with create_task_group() as tg:
tg.start_soon(udp.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await udp.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(self) -> None:
async def close_when_blocked() -> None:
await wait_all_tasks_blocked()
await udp.aclose()
async with await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=AddressFamily.AF_INET
) as udp:
async with create_task_group() as tg:
tg.start_soon(close_when_blocked)
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_receive_after_close(self, family: AnyIPAddressFamily) -> None:
udp = await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=family
)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_send_after_close(self, family: AnyIPAddressFamily) -> None:
udp = await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=family
)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.send(b"foo")
async def test_from_socket(
self, family: AnyIPAddressFamily, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(family, socket.SOCK_DGRAM, connected=True)
async with await ConnectedUDPSocket.from_socket(sock_or_fd) as udp_socket:
assert isinstance(udp_socket, ConnectedUDPSocket)
assert udp_socket.extra(SocketAttribute.family) == family
async def test_from_socket_wrong_socket_type(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_INET, socket.SOCK_STREAM, connected=True
)
with pytest.raises(
ValueError,
match="socket type mismatch: expected SOCK_DGRAM, got SOCK_STREAM",
):
await ConnectedUDPSocket.from_socket(sock_or_fd)
async def test_from_socket_not_connected(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(socket.AF_INET, socket.SOCK_DGRAM, bound=True)
with pytest.raises(ValueError, match="the socket must be connected"):
await ConnectedUDPSocket.from_socket(sock_or_fd)
@pytest.mark.skipif(
sys.platform == "win32", reason="UNIX sockets are not available on Windows"
)
class TestUNIXDatagramSocket:
@pytest.fixture(
params=[
"path",
pytest.param("abstract", marks=[skip_unix_abstract_mark]),
]
)
def socket_path(self, request: SubRequest) -> Generator[Path, None, None]:
# Use stdlib tempdir generation
# Fixes `OSError: AF_UNIX path too long` from pytest generated temp_path
with tempfile.TemporaryDirectory() as path:
if request.param == "path":
yield Path(path) / "socket"
else:
yield Path(f"\0{path}") / "socket"
@pytest.fixture(params=[False, True], ids=["str", "path"])
def socket_path_or_str(self, request: SubRequest, socket_path: Path) -> Path | str:
return socket_path if request.param else str(socket_path)
@pytest.fixture
def peer_socket_path(self) -> Generator[Path, None, None]:
# Use stdlib tempdir generation
# Fixes `OSError: AF_UNIX path too long` from pytest generated temp_path
with tempfile.TemporaryDirectory() as path:
yield Path(path) / "peer_socket"
async def test_extra_attributes(self, socket_path: Path) -> None:
async with await create_unix_datagram_socket(local_path=socket_path) as unix_dg:
raw_socket = unix_dg.extra(SocketAttribute.raw_socket)
assert raw_socket.gettimeout() == 0
assert unix_dg.extra(SocketAttribute.family) == socket.AF_UNIX
assert (
unix_dg.extra(SocketAttribute.local_address) == raw_socket.getsockname()
)
pytest.raises(
TypedAttributeLookupError, unix_dg.extra, SocketAttribute.local_port
)
pytest.raises(
TypedAttributeLookupError, unix_dg.extra, SocketAttribute.remote_address
)
pytest.raises(
TypedAttributeLookupError, unix_dg.extra, SocketAttribute.remote_port
)
async def test_send_receive(self, socket_path_or_str: Path | str) -> None:
async with await create_unix_datagram_socket(
local_path=socket_path_or_str,
) as sock:
path = str(socket_path_or_str)
await sock.sendto(b"blah", path)
request, addr = await sock.receive()
assert request == b"blah"
if isinstance(addr, bytes):
assert addr == path.encode()
else:
assert addr == path
await sock.sendto(b"halb", path)
response, addr = await sock.receive()
assert response == b"halb"
if isinstance(addr, bytes):
assert addr == path.encode()
else:
assert addr == path
async def test_iterate(self, peer_socket_path: Path, socket_path: Path) -> None:
async def serve() -> None:
async for packet, addr in server:
await server.send((packet[::-1], addr))
async with await create_unix_datagram_socket(
local_path=peer_socket_path,
) as server:
peer_path = str(peer_socket_path)
async with await create_unix_datagram_socket(
local_path=socket_path
) as client:
async with create_task_group() as tg:
tg.start_soon(serve)
await client.sendto(b"FOOBAR", peer_path)
assert await client.receive() == (b"RABOOF", peer_path)
await client.sendto(b"123456", peer_path)
assert await client.receive() == (b"654321", peer_path)
tg.cancel_scope.cancel()
async def test_concurrent_receive(self) -> None:
async with await create_unix_datagram_socket() as unix_dg:
async with create_task_group() as tg:
tg.start_soon(unix_dg.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await unix_dg.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(self) -> None:
async def close_when_blocked() -> None:
await wait_all_tasks_blocked()
await unix_dg.aclose()
async with await create_unix_datagram_socket() as unix_dg:
async with create_task_group() as tg:
tg.start_soon(close_when_blocked)
with pytest.raises(ClosedResourceError):
await unix_dg.receive()
async def test_receive_after_close(self) -> None:
unix_dg = await create_unix_datagram_socket()
await unix_dg.aclose()
with pytest.raises(ClosedResourceError):
await unix_dg.receive()
async def test_send_after_close(self, socket_path: Path) -> None:
unix_dg = await create_unix_datagram_socket(local_path=socket_path)
path = str(socket_path)
await unix_dg.aclose()
with pytest.raises(ClosedResourceError):
await unix_dg.sendto(b"foo", path)
async def test_local_path_bytes(self, socket_path: Path) -> None:
async with await create_unix_datagram_socket(
local_path=str(socket_path).encode()
):
pass
@pytest.mark.skipif(
platform.system() == "Darwin", reason="macOS requires valid UTF-8 paths"
)
async def test_local_path_invalid_ascii(self, socket_path: Path) -> None:
real_path = str(socket_path).encode() + b"\xf0"
async with await create_unix_datagram_socket(local_path=real_path):
pass
async def test_from_socket(self, sock_or_fd_factory: SockFdFactoryProtocol) -> None:
sock_or_fd = sock_or_fd_factory(socket.AF_UNIX, socket.SOCK_DGRAM)
async with await UNIXDatagramSocket.from_socket(sock_or_fd) as udp_socket:
assert isinstance(udp_socket, UNIXDatagramSocket)
assert udp_socket.extra(SocketAttribute.family) == socket.AF_UNIX
async def test_from_socket_wrong_socket_type(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(socket.AF_UNIX, socket.SOCK_STREAM)
with pytest.raises(
ValueError,
match="socket type mismatch: expected SOCK_DGRAM, got SOCK_STREAM",
):
await UNIXDatagramSocket.from_socket(sock_or_fd)
@pytest.mark.skipif(
sys.platform == "win32", reason="UNIX sockets are not available on Windows"
)
class TestConnectedUNIXDatagramSocket:
@pytest.fixture(
params=[
"path",
pytest.param("abstract", marks=[skip_unix_abstract_mark]),
]
)
def socket_path(self, request: SubRequest) -> Generator[Path, None, None]:
# Use stdlib tempdir generation
# Fixes `OSError: AF_UNIX path too long` from pytest generated temp_path
with tempfile.TemporaryDirectory() as path:
if request.param == "path":
yield Path(path) / "socket"
else:
yield Path(f"\0{path}") / "socket"
@pytest.fixture(params=[False, True], ids=["str", "path"])
def socket_path_or_str(self, request: SubRequest, socket_path: Path) -> Path | str:
return socket_path if request.param else str(socket_path)
@pytest.fixture(
params=[
pytest.param("path", id="path-peer"),
pytest.param(
"abstract", marks=[skip_unix_abstract_mark], id="abstract-peer"
),
]
)
def peer_socket_path(self) -> Generator[Path, None, None]:
# Use stdlib tempdir generation
# Fixes `OSError: AF_UNIX path too long` from pytest generated temp_path
with tempfile.TemporaryDirectory() as path:
yield Path(path) / "peer_socket"
@pytest.fixture(params=[False, True], ids=["peer_str", "peer_path"])
def peer_socket_path_or_str(
self, request: SubRequest, peer_socket_path: Path
) -> Path | str:
return peer_socket_path if request.param else str(peer_socket_path)
@pytest.fixture
def peer_sock(self, peer_socket_path: Path) -> Iterable[socket.socket]:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(1)
sock.bind(str(peer_socket_path))
yield sock
sock.close()
async def test_extra_attributes(
self,
socket_path: Path,
peer_socket_path: Path,
peer_sock: socket.socket,
) -> None:
async with await create_connected_unix_datagram_socket(
remote_path=peer_socket_path,
local_path=socket_path,
) as unix_dg:
raw_socket = unix_dg.extra(SocketAttribute.raw_socket)
assert raw_socket is not None
assert unix_dg.extra(SocketAttribute.family) == AddressFamily.AF_UNIX
assert os.fsencode(
cast(os.PathLike, unix_dg.extra(SocketAttribute.local_address))
) == os.fsencode(socket_path)
assert os.fsencode(
cast(os.PathLike, unix_dg.extra(SocketAttribute.remote_address))
) == os.fsencode(peer_socket_path)
pytest.raises(
TypedAttributeLookupError, unix_dg.extra, SocketAttribute.local_port
)
pytest.raises(
TypedAttributeLookupError, unix_dg.extra, SocketAttribute.remote_port
)
async def test_send_receive(
self,
socket_path_or_str: Path | str,
peer_socket_path_or_str: Path | str,
) -> None:
async with await create_unix_datagram_socket(
local_path=peer_socket_path_or_str,
) as unix_dg1:
async with await create_connected_unix_datagram_socket(
peer_socket_path_or_str,
local_path=socket_path_or_str,
) as unix_dg2:
socket_path = os.fsdecode(socket_path_or_str)
await unix_dg2.send(b"blah")
data, remote_addr = await unix_dg1.receive()
assert (data, os.fsdecode(remote_addr)) == (b"blah", socket_path)
await unix_dg1.sendto(b"halb", socket_path)
response = await unix_dg2.receive()
assert response == b"halb"
async def test_iterate(
self,
socket_path: Path,
peer_socket_path: Path,
) -> None:
async def serve() -> None:
async for packet in unix_dg2:
await unix_dg2.send(packet[::-1])
async with await create_unix_datagram_socket(
local_path=peer_socket_path,
) as unix_dg1:
async with await create_connected_unix_datagram_socket(
peer_socket_path, local_path=socket_path
) as unix_dg2:
path = os.fsdecode(socket_path)
async with create_task_group() as tg:
tg.start_soon(serve)
await unix_dg1.sendto(b"FOOBAR", path)
data, addr = await unix_dg1.receive()
assert (data, os.fsdecode(addr)) == (b"RABOOF", path)
await unix_dg1.sendto(b"123456", path)
data, addr = await unix_dg1.receive()
assert (data, os.fsdecode(addr)) == (b"654321", path)
tg.cancel_scope.cancel()
async def test_concurrent_receive(
self, peer_socket_path: Path, peer_sock: socket.socket
) -> None:
async with await create_connected_unix_datagram_socket(
peer_socket_path
) as unix_dg:
async with create_task_group() as tg:
tg.start_soon(unix_dg.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await unix_dg.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(
self, peer_socket_path_or_str: Path | str, peer_sock: socket.socket
) -> None:
async def close_when_blocked() -> None:
await wait_all_tasks_blocked()
await udp.aclose()
async with await create_connected_unix_datagram_socket(
peer_socket_path_or_str
) as udp:
async with create_task_group() as tg:
tg.start_soon(close_when_blocked)
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_receive_after_close(
self, peer_socket_path_or_str: Path | str, peer_sock: socket.socket
) -> None:
udp = await create_connected_unix_datagram_socket(peer_socket_path_or_str)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_send_after_close(
self, peer_socket_path_or_str: Path | str, peer_sock: socket.socket
) -> None:
udp = await create_connected_unix_datagram_socket(peer_socket_path_or_str)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.send(b"foo")
async def test_from_socket(self, sock_or_fd_factory: SockFdFactoryProtocol) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_UNIX, socket.SOCK_DGRAM, connected=True
)
async with await ConnectedUNIXDatagramSocket.from_socket(
sock_or_fd
) as udp_socket:
assert isinstance(udp_socket, ConnectedUNIXDatagramSocket)
assert udp_socket.extra(SocketAttribute.family) == socket.AF_UNIX
async def test_from_socket_wrong_socket_type(
self, socket_path: Path, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(
socket.AF_UNIX, socket.SOCK_STREAM, connected=True
)
with pytest.raises(
ValueError,
match="socket type mismatch: expected SOCK_DGRAM, got SOCK_STREAM",
):
await ConnectedUNIXDatagramSocket.from_socket(sock_or_fd)
async def test_from_socket_not_connected(
self, sock_or_fd_factory: SockFdFactoryProtocol
) -> None:
sock_or_fd = sock_or_fd_factory(socket.AF_UNIX, socket.SOCK_DGRAM, bound=True)
with pytest.raises(ValueError, match="the socket must be connected"):
await ConnectedUNIXDatagramSocket.from_socket(sock_or_fd)
async def test_tcp_connectable(mocker: MockerFixture) -> None:
mock_connect_tcp = mocker.patch("anyio._core._sockets.connect_tcp")
connectable = TCPConnectable("localhost", 1234)
await connectable.connect()
mock_connect_tcp.assert_called_once_with("localhost", 1234)
async def test_unix_connectable(mocker: MockerFixture) -> None:
mock_connect_tcp = mocker.patch("anyio._core._sockets.connect_unix")
connectable = UNIXConnectable("/foo/bar")
await connectable.connect()
mock_connect_tcp.assert_called_once_with("/foo/bar")
class TestAsConnectable:
def test_pass_connectable(self) -> None:
connectable = TCPConnectable("localhost", 1234)
assert as_connectable(connectable) is connectable
@pytest.mark.parametrize(
"addr",
[
pytest.param("localhost", id="string"),
pytest.param(IPv4Address("127.0.0.1"), id="ipv4addr"),
pytest.param(IPv6Address("::1"), id="ipv6addr"),
],
)
def test_tcp(self, addr: str | IPv4Address | IPv6Address) -> None:
connectable = as_connectable((addr, 1234))
assert isinstance(connectable, TCPConnectable)
assert connectable.host == addr
assert connectable.port == 1234
@pytest.mark.parametrize(
"path",
[
pytest.param("/foo/bar", id="str"),
pytest.param(b"/foo/bar", id="bytes"),
pytest.param(Path("/foo/bar"), id="strpath"),
],
)
def test_unix(self, path: str | bytes | os.PathLike[str]) -> None:
connectable = as_connectable(path)
assert isinstance(connectable, UNIXConnectable)
assert connectable.path == path
def test_bad_type(self) -> None:
with pytest.raises(TypeError, match="cannot convert 1234 to a connectable"):
as_connectable(1234) # type: ignore[arg-type]
def test_tls_true(self) -> None:
connectable = as_connectable(
"/foo/bar",
tls=True,
tls_hostname="example.com",
tls_standard_compatible=False,
)
assert isinstance(connectable, TLSConnectable)
assert connectable.hostname == "example.com"
assert not connectable.standard_compatible
assert isinstance(connectable, TLSConnectable)
def test_tls_explicit_context(self, client_context: SSLContext) -> None:
connectable = as_connectable(
"/foo/bar", tls=True, ssl_context=client_context, tls_hostname="example.com"
)
assert isinstance(connectable, TLSConnectable)
assert connectable.ssl_context is client_context
assert connectable.hostname == "example.com"
def test_tls_tcp_implicit_hostname(self, client_context: SSLContext) -> None:
connectable = as_connectable(("localhost", 1234), tls=True)
assert isinstance(connectable, TLSConnectable)
assert connectable.hostname == "localhost"
@pytest.mark.network
async def test_getaddrinfo() -> None:
# IDNA 2003 gets this wrong
correct = await getaddrinfo("faß.de", 0)
wrong = await getaddrinfo("fass.de", 0)
assert correct != wrong
@pytest.mark.parametrize("sock_type", [socket.SOCK_STREAM, socket.SOCK_STREAM])
async def test_getaddrinfo_ipv6addr(
sock_type: Literal[socket.SocketKind.SOCK_STREAM],
) -> None:
# IDNA trips up over raw IPv6 addresses
proto = 0 if platform.system() == "Windows" else 6
assert await getaddrinfo("::1", 0, type=sock_type) == [
(
socket.AF_INET6,
socket.SOCK_STREAM,
proto,
"",
("::1", 0),
)
]
async def test_getaddrinfo_ipv6_disabled() -> None:
gai_result = [(AddressFamily.AF_INET6, socket.SOCK_STREAM, 6, "", (1, b""))]
with mock.patch.object(get_async_backend(), "getaddrinfo", return_value=gai_result):
assert await getaddrinfo("::1", 0) == []
async def test_getnameinfo() -> None:
expected_result = socket.getnameinfo(("127.0.0.1", 6666), 0)
result = await getnameinfo(("127.0.0.1", 6666))
assert result == expected_result
async def test_connect_tcp_getaddrinfo_context() -> None:
"""
See https://github.com/agronholm/anyio/issues/815
"""
with pytest.raises(socket.gaierror) as exc_info:
async with await connect_tcp("anyio.invalid", 6666):
pass
assert exc_info.value.__context__ is None
@pytest.mark.parametrize("socket_type", ["socket", "fd"])
@pytest.mark.parametrize("event", ["readable", "writable"])
async def test_wait_socket(event: str, socket_type: str) -> None:
wait = wait_readable if event == "readable" else wait_writable
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_sock:
server_sock.bind(("127.0.0.1", 0))
port = server_sock.getsockname()[1]
server_sock.listen()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_sock:
client_sock.connect(("127.0.0.1", port))
client_sock.sendall(b"Hello, world")
conn, addr = server_sock.accept()
with conn:
sock_or_fd: FileDescriptorLike = (
conn.fileno() if socket_type == "fd" else conn
)
with fail_after(3):
await wait(sock_or_fd)
assert conn.recv(1024) == b"Hello, world"
async def test_deprecated_wait_socket(anyio_backend_name: str) -> None:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
with pytest.warns(
DeprecationWarning,
match="This function is deprecated; use `wait_readable` instead",
):
with move_on_after(0.1):
await wait_socket_readable(sock)
with pytest.warns(
DeprecationWarning,
match="This function is deprecated; use `wait_writable` instead",
):
with move_on_after(0.1):
await wait_socket_writable(sock)
@pytest.mark.parametrize("socket_type", ["socket", "fd"])
async def test_interrupted_by_close(socket_type: str) -> None:
a_sock, b = socket.socketpair()
with a_sock, b:
a_sock.setblocking(False)
b.setblocking(False)
a: FileDescriptorLike = a_sock.fileno() if socket_type == "fd" else a_sock
async def reader() -> None:
with pytest.raises(ClosedResourceError):
await wait_readable(a)
async def writer() -> None:
with pytest.raises(ClosedResourceError):
await wait_writable(a)
fill_socket(a_sock)
async with create_task_group() as tg:
tg.start_soon(reader)
tg.start_soon(writer)
await wait_all_tasks_blocked()
notify_closing(a_sock)
a_sock.close()
anyio-4.11.0/tests/test_subprocesses.py 0000664 0000000 0000000 00000024412 15064462627 0020226 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import platform
import sys
from collections.abc import Callable
from pathlib import Path
from subprocess import CalledProcessError
from textwrap import dedent
from typing import Any
import pytest
from pytest import FixtureRequest
from anyio import (
CancelScope,
ClosedResourceError,
create_task_group,
open_process,
run_process,
)
from anyio.streams.buffered import BufferedByteReceiveStream
@pytest.mark.parametrize(
"shell, command",
[
pytest.param(
True,
f'{sys.executable} -c "import sys; print(sys.stdin.read()[::-1])"',
id="shell",
),
pytest.param(
False,
[sys.executable, "-c", "import sys; print(sys.stdin.read()[::-1])"],
id="exec",
),
],
)
async def test_run_process(
shell: bool, command: str | list[str], anyio_backend_name: str
) -> None:
process = await run_process(command, input=b"abc")
assert process.returncode == 0
assert process.stdout.rstrip() == b"cba"
async def test_run_process_checked() -> None:
with pytest.raises(CalledProcessError) as exc:
await run_process(
[
sys.executable,
"-c",
'import sys; print("stderr-text", file=sys.stderr); '
'print("stdout-text"); sys.exit(1)',
],
check=True,
)
assert exc.value.returncode == 1
assert exc.value.stdout.rstrip() == b"stdout-text"
assert exc.value.stderr.rstrip() == b"stderr-text"
@pytest.mark.skipif(
platform.system() == "Windows",
reason="process.terminate() kills the process instantly on Windows",
)
async def test_terminate(tmp_path: Path) -> None:
script_path = tmp_path / "script.py"
script_path.write_text(
dedent(
"""\
import signal, sys, time
def terminate(signum, frame):
sys.exit(2)
signal.signal(signal.SIGTERM, terminate)
print('ready', flush=True)
time.sleep(5)
"""
)
)
async with await open_process([sys.executable, str(script_path)]) as process:
stdout = process.stdout
assert stdout is not None
buffered_stdout = BufferedByteReceiveStream(stdout)
line = await buffered_stdout.receive_until(b"\n", 100)
assert line.rstrip() == b"ready"
process.terminate()
assert await process.wait() == 2
async def test_process_cwd(tmp_path: Path) -> None:
"""Test that `cwd` is successfully passed to the subprocess implementation"""
cmd = [sys.executable, "-c", "import os; print(os.getcwd())"]
result = await run_process(cmd, cwd=tmp_path)
assert result.stdout.decode().strip() == str(tmp_path)
async def test_process_env() -> None:
"""Test that `env` is successfully passed to the subprocess implementation"""
env = os.environ.copy()
env.update({"foo": "bar"})
cmd = [sys.executable, "-c", "import os; print(os.environ['foo'])"]
result = await run_process(cmd, env=env)
assert result.stdout.decode().strip() == env["foo"]
@pytest.mark.skipif(
platform.system() == "Windows", reason="Windows does not have os.getsid()"
)
async def test_process_new_session_sid() -> None:
"""
Test that start_new_session is successfully passed to the subprocess implementation.
"""
sid = os.getsid(os.getpid())
cmd = [sys.executable, "-c", "import os; print(os.getsid(os.getpid()))"]
result = await run_process(cmd)
assert result.stdout.decode().strip() == str(sid)
result = await run_process(cmd, start_new_session=True)
assert result.stdout.decode().strip() != str(sid)
async def test_open_process_connect_to_file(tmp_path: Path) -> None:
stdinfile = tmp_path / "stdin"
stdinfile.write_text("Hello, process!\n")
stdoutfile = tmp_path / "stdout"
stderrfile = tmp_path / "stderr"
with (
stdinfile.open("rb") as fin,
stdoutfile.open("wb") as fout,
stderrfile.open("wb") as ferr,
):
async with await open_process(
[
sys.executable,
"-c",
"import sys; txt = sys.stdin.read().strip(); "
'print("stdin says", repr(txt), "but stderr says NO!", '
"file=sys.stderr); "
'print("stdin says", repr(txt), "and stdout says YES!")',
],
stdin=fin,
stdout=fout,
stderr=ferr,
) as p:
assert await p.wait() == 0
assert (
stdoutfile.read_text() == "stdin says 'Hello, process!' and stdout says YES!\n"
)
assert (
stderrfile.read_text() == "stdin says 'Hello, process!' but stderr says NO!\n"
)
async def test_run_process_connect_to_file(tmp_path: Path) -> None:
stdinfile = tmp_path / "stdin"
stdinfile.write_text("Hello, process!\n")
stdoutfile = tmp_path / "stdout"
stderrfile = tmp_path / "stderr"
with (
stdinfile.open("rb") as fin,
stdoutfile.open("wb") as fout,
stderrfile.open("wb") as ferr,
):
await run_process(
[
sys.executable,
"-c",
"import sys; txt = sys.stdin.read().strip(); "
'print("stdin says", repr(txt), "but stderr says NO!", '
"file=sys.stderr); "
'print("stdin says", repr(txt), "and stdout says YES!")',
],
stdin=fin,
stdout=fout,
stderr=ferr,
)
assert (
stdoutfile.read_text() == "stdin says 'Hello, process!' and stdout says YES!\n"
)
assert (
stderrfile.read_text() == "stdin says 'Hello, process!' but stderr says NO!\n"
)
async def test_stdin_input_both_passed(tmp_path: Path) -> None:
stdinfile = tmp_path / "stdin"
stdinfile.write_text("Hello, process!\n")
with pytest.raises(ValueError, match="only one of"), stdinfile.open("rb") as fin:
await run_process([sys.executable, "--version"], input=b"abc", stdin=fin)
async def test_run_process_inherit_stdout(capfd: pytest.CaptureFixture[str]) -> None:
await run_process(
[
sys.executable,
"-c",
'import sys; print("stderr-text", file=sys.stderr); print("stdout-text")',
],
check=True,
stdout=None,
stderr=None,
)
out, err = capfd.readouterr()
assert out == "stdout-text" + os.linesep
assert err == "stderr-text" + os.linesep
async def test_process_aexit_cancellation_doesnt_orphan_process() -> None:
"""
Regression test for #669.
Ensures that open_process.__aexit__() doesn't leave behind an orphan process when
cancelled.
"""
with CancelScope() as scope:
async with await open_process(
[sys.executable, "-c", "import time; time.sleep(1)"]
) as process:
scope.cancel()
assert process.returncode is not None
assert process.returncode != 0
async def test_process_aexit_cancellation_closes_standard_streams(
request: FixtureRequest,
anyio_backend_name: str,
) -> None:
"""
Regression test for #669.
Ensures that open_process.__aexit__() closes standard streams when cancelled. Also
ensures that process.std{in.send,{out,err}.receive}() raise ClosedResourceError on a
closed stream.
"""
if anyio_backend_name == "asyncio":
# Avoid pytest.xfail here due to https://github.com/pytest-dev/pytest/issues/9027
request.node.add_marker(
pytest.mark.xfail(reason="#671 needs to be resolved first")
)
with CancelScope() as scope:
async with await open_process(
[sys.executable, "-c", "import time; time.sleep(1)"]
) as process:
scope.cancel()
assert process.stdin is not None
with pytest.raises(ClosedResourceError):
await process.stdin.send(b"foo")
assert process.stdout is not None
with pytest.raises(ClosedResourceError):
await process.stdout.receive(1)
assert process.stderr is not None
with pytest.raises(ClosedResourceError):
await process.stderr.receive(1)
@pytest.mark.parametrize(
"argname, argvalue_factory",
[
pytest.param(
"user",
lambda: os.getuid(),
id="user",
marks=[
pytest.mark.skipif(
platform.system() == "Windows",
reason="os.getuid() is not available on Windows",
)
],
),
pytest.param(
"group",
lambda: os.getgid(),
id="user",
marks=[
pytest.mark.skipif(
platform.system() == "Windows",
reason="os.getgid() is not available on Windows",
)
],
),
pytest.param("extra_groups", list, id="extra_groups"),
pytest.param("umask", lambda: 0, id="umask"),
],
)
async def test_py39_arguments(
argname: str,
argvalue_factory: Callable[[], Any],
anyio_backend_name: str,
anyio_backend_options: dict[str, Any],
) -> None:
try:
await run_process(
[sys.executable, "-c", "print('hello')"],
**{argname: argvalue_factory()},
)
except ValueError as exc:
if (
"unexpected kwargs" in str(exc)
and anyio_backend_name == "asyncio"
and anyio_backend_options["loop_factory"]
and anyio_backend_options["loop_factory"].__module__ == "uvloop"
):
pytest.skip(f"the {argname!r} argument is not supported by uvloop yet")
raise
async def test_close_early() -> None:
"""Regression test for #490."""
code = dedent("""\
import sys
for _ in range(100):
sys.stdout.buffer.write(bytes(range(256)))
""")
async with await open_process([sys.executable, "-c", code]):
pass
async def test_close_while_reading() -> None:
code = dedent("""\
import time
time.sleep(3)
""")
async with (
await open_process([sys.executable, "-c", code]) as process,
create_task_group() as tg,
):
assert process.stdout
tg.start_soon(process.stdout.aclose)
with pytest.raises(ClosedResourceError):
await process.stdout.receive()
process.terminate()
anyio-4.11.0/tests/test_synchronization.py 0000664 0000000 0000000 00000070443 15064462627 0020754 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
from typing import Any
import pytest
from anyio import (
CancelScope,
Condition,
Event,
Lock,
Semaphore,
WouldBlock,
create_task_group,
fail_after,
move_on_after,
run,
to_thread,
wait_all_tasks_blocked,
)
from anyio.abc import CapacityLimiter, TaskStatus
from anyio.lowlevel import checkpoint
from .conftest import asyncio_params
class TestLock:
async def test_contextmanager(self) -> None:
async def task() -> None:
assert lock.locked()
async with lock:
results.append("2")
results = []
lock = Lock()
async with create_task_group() as tg:
async with lock:
tg.start_soon(task)
await wait_all_tasks_blocked()
results.append("1")
assert not lock.locked()
assert results == ["1", "2"]
async def test_manual_acquire(self) -> None:
async def task() -> None:
assert lock.locked()
await lock.acquire()
try:
results.append("2")
finally:
lock.release()
results = []
lock = Lock()
async with create_task_group() as tg:
await lock.acquire()
try:
tg.start_soon(task)
await wait_all_tasks_blocked()
results.append("1")
finally:
lock.release()
assert not lock.locked()
assert results == ["1", "2"]
async def test_fast_acquire(self) -> None:
"""
Test that fast_acquire=True does not yield back control to the event loop when
there is no contention.
"""
other_task_called = False
async def other_task() -> None:
nonlocal other_task_called
other_task_called = True
lock = Lock(fast_acquire=True)
async with create_task_group() as tg:
tg.start_soon(other_task)
async with lock:
assert not other_task_called
async def test_acquire_nowait(self) -> None:
lock = Lock()
lock.acquire_nowait()
assert lock.locked()
async def test_acquire_nowait_wouldblock(self) -> None:
async def try_lock() -> None:
pytest.raises(WouldBlock, lock.acquire_nowait)
lock = Lock()
async with lock, create_task_group() as tg:
assert lock.locked()
tg.start_soon(try_lock)
@pytest.mark.parametrize("fast_acquire", [True, False])
async def test_acquire_twice_async(self, fast_acquire: bool) -> None:
lock = Lock(fast_acquire=fast_acquire)
await lock.acquire()
with pytest.raises(
RuntimeError, match="Attempted to acquire an already held Lock"
):
await lock.acquire()
async def test_acquire_twice_sync(self) -> None:
lock = Lock()
lock.acquire_nowait()
with pytest.raises(
RuntimeError, match="Attempted to acquire an already held Lock"
):
lock.acquire_nowait()
@pytest.mark.parametrize(
"release_first",
[pytest.param(False, id="releaselast"), pytest.param(True, id="releasefirst")],
)
async def test_cancel_during_acquire(self, release_first: bool) -> None:
acquired = False
async def task(*, task_status: TaskStatus) -> None:
nonlocal acquired
task_status.started()
async with lock:
acquired = True
lock = Lock()
async with create_task_group() as tg:
await lock.acquire()
await tg.start(task)
tg.cancel_scope.cancel()
with CancelScope(shield=True):
if release_first:
lock.release()
await wait_all_tasks_blocked()
else:
await wait_all_tasks_blocked()
lock.release()
assert not acquired
assert not lock.locked()
async def test_statistics(self) -> None:
async def waiter() -> None:
async with lock:
pass
lock = Lock()
async with create_task_group() as tg:
assert not lock.statistics().locked
assert lock.statistics().tasks_waiting == 0
async with lock:
assert lock.statistics().locked
assert lock.statistics().tasks_waiting == 0
for i in range(1, 3):
tg.start_soon(waiter)
await wait_all_tasks_blocked()
assert lock.statistics().tasks_waiting == i
assert not lock.statistics().locked
assert lock.statistics().tasks_waiting == 0
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_asyncio_deadlock(self) -> None:
"""Regression test for #398."""
lock = Lock()
async def acquire() -> None:
async with lock:
await asyncio.sleep(0)
loop = asyncio.get_running_loop()
task1 = loop.create_task(acquire())
task2 = loop.create_task(acquire())
await asyncio.sleep(0)
task1.cancel()
await asyncio.wait_for(task2, 1)
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cancel_after_release(self) -> None:
"""
Test that a native asyncio cancellation will not cause a lock ownership
to get lost between a release() and the resumption of acquire().
"""
# Create the lock and acquire it right away so that any task acquiring it will
# block
lock = Lock()
lock.acquire_nowait()
# Start a task that gets blocked on trying to acquire the semaphore
loop = asyncio.get_running_loop()
task1 = loop.create_task(lock.acquire(), name="task1")
await asyncio.sleep(0)
# Trigger the acquiring task to be rescheduled, but also cancel it right away
lock.release()
task1.cancel()
statistics = lock.statistics()
assert statistics.owner
assert statistics.owner.name == "task1"
await asyncio.wait([task1], timeout=1)
# The acquire() method should've released the semaphore because acquisition
# failed due to cancellation
statistics = lock.statistics()
assert statistics.owner is None
assert statistics.tasks_waiting == 0
lock.acquire_nowait()
def test_instantiate_outside_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def use_lock() -> None:
async with lock:
pass
lock = Lock()
statistics = lock.statistics()
assert not statistics.locked
assert statistics.owner is None
assert statistics.tasks_waiting == 0
run(use_lock, backend=anyio_backend_name, backend_options=anyio_backend_options)
async def test_owner_after_release(self) -> None:
async def taskfunc1() -> None:
await lock.acquire()
owner = lock.statistics().owner
assert owner
assert owner.name == "task1"
await event.wait()
lock.release()
owner = lock.statistics().owner
assert owner
assert owner.name == "task2"
event = Event()
lock = Lock()
async with create_task_group() as tg:
tg.start_soon(taskfunc1, name="task1")
await wait_all_tasks_blocked()
tg.start_soon(lock.acquire, name="task2")
await wait_all_tasks_blocked()
event.set()
class TestEvent:
async def test_event(self) -> None:
async def setter() -> None:
assert not event.is_set()
event.set()
event = Event()
async with create_task_group() as tg:
tg.start_soon(setter)
await event.wait()
assert event.is_set()
async def test_event_cancel(self) -> None:
task_started = event_set = False
async def task() -> None:
nonlocal task_started, event_set
task_started = True
await event.wait()
event_set = True
event = Event()
async with create_task_group() as tg:
tg.start_soon(task)
tg.cancel_scope.cancel()
event.set()
assert task_started
assert not event_set
async def test_event_wait_before_set_before_cancel(self) -> None:
setter_started = waiter_woke = False
async def setter() -> None:
nonlocal setter_started
setter_started = True
assert not event.is_set()
event.set()
tg.cancel_scope.cancel()
event = Event()
async with create_task_group() as tg:
tg.start_soon(setter)
await event.wait()
waiter_woke = True
assert setter_started
assert waiter_woke
async def test_statistics(self) -> None:
async def waiter() -> None:
await event.wait()
event = Event()
async with create_task_group() as tg:
assert event.statistics().tasks_waiting == 0
for i in range(1, 3):
tg.start_soon(waiter)
await wait_all_tasks_blocked()
assert event.statistics().tasks_waiting == i
event.set()
assert event.statistics().tasks_waiting == 0
def test_instantiate_outside_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
event = Event()
assert not event.is_set()
assert event.statistics().tasks_waiting == 0
event.set()
assert event.is_set()
run(
event.wait,
backend=anyio_backend_name,
backend_options=anyio_backend_options,
)
class TestCondition:
async def test_contextmanager(self) -> None:
async def notifier() -> None:
async with condition:
condition.notify_all()
condition = Condition()
async with create_task_group() as tg:
async with condition:
assert condition.locked()
tg.start_soon(notifier)
await condition.wait()
async def test_manual_acquire(self) -> None:
async def notifier() -> None:
await condition.acquire()
try:
condition.notify_all()
finally:
condition.release()
condition = Condition()
async with create_task_group() as tg:
await condition.acquire()
try:
assert condition.locked()
tg.start_soon(notifier)
await condition.wait()
finally:
condition.release()
async def test_acquire_nowait(self) -> None:
condition = Condition()
condition.acquire_nowait()
assert condition.locked()
async def test_acquire_nowait_wouldblock(self) -> None:
async def try_lock() -> None:
pytest.raises(WouldBlock, condition.acquire_nowait)
condition = Condition()
async with condition, create_task_group() as tg:
assert condition.locked()
tg.start_soon(try_lock)
async def test_wait_cancel(self) -> None:
task_started = notified = False
async def task() -> None:
nonlocal task_started, notified
task_started = True
async with condition:
event.set()
await condition.wait()
notified = True
event = Event()
condition = Condition()
async with create_task_group() as tg:
tg.start_soon(task)
await event.wait()
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
assert task_started
assert not notified
async def test_wait_no_lock(self) -> None:
condition = Condition()
with pytest.raises(
RuntimeError, match="The current task is not holding the underlying lock"
):
await condition.wait()
async def test_statistics(self) -> None:
async def waiter() -> None:
async with condition:
await condition.wait()
condition = Condition()
async with create_task_group() as tg:
assert not condition.statistics().lock_statistics.locked
assert condition.statistics().tasks_waiting == 0
async with condition:
assert condition.statistics().lock_statistics.locked
assert condition.statistics().tasks_waiting == 0
for i in range(1, 3):
tg.start_soon(waiter)
await wait_all_tasks_blocked()
assert condition.statistics().tasks_waiting == i
for i in range(1, -1, -1):
async with condition:
condition.notify(1)
await wait_all_tasks_blocked()
assert condition.statistics().tasks_waiting == i
assert not condition.statistics().lock_statistics.locked
assert condition.statistics().tasks_waiting == 0
def test_instantiate_outside_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def use_condition() -> None:
async with condition:
pass
condition = Condition()
assert condition.statistics().tasks_waiting == 0
run(
use_condition,
backend=anyio_backend_name,
backend_options=anyio_backend_options,
)
async def test_wait_for(self) -> None:
result = None
async def waiter() -> None:
nonlocal result
async with condition:
result = await condition.wait_for(lambda: value)
value = None
condition = Condition()
async with create_task_group() as tg:
tg.start_soon(waiter)
await wait_all_tasks_blocked()
async with condition:
condition.notify_all()
await wait_all_tasks_blocked()
assert result is None
value = "foo"
async with condition:
condition.notify_all()
assert result == "foo"
class TestSemaphore:
async def test_contextmanager(self) -> None:
async def acquire() -> None:
async with semaphore:
assert semaphore.value in (0, 1)
semaphore = Semaphore(2)
async with create_task_group() as tg:
tg.start_soon(acquire, name="task 1")
tg.start_soon(acquire, name="task 2")
assert semaphore.value == 2
async def test_manual_acquire(self) -> None:
async def acquire() -> None:
await semaphore.acquire()
try:
assert semaphore.value in (0, 1)
finally:
semaphore.release()
semaphore = Semaphore(2)
async with create_task_group() as tg:
tg.start_soon(acquire, name="task 1")
tg.start_soon(acquire, name="task 2")
assert semaphore.value == 2
async def test_fast_acquire(self) -> None:
"""
Test that fast_acquire=True does not yield back control to the event loop when
there is no contention.
"""
other_task_called = False
async def other_task() -> None:
nonlocal other_task_called
other_task_called = True
semaphore = Semaphore(1, fast_acquire=True)
async with create_task_group() as tg:
tg.start_soon(other_task)
async with semaphore:
assert not other_task_called
async def test_acquire_nowait(self) -> None:
semaphore = Semaphore(1)
semaphore.acquire_nowait()
assert semaphore.value == 0
pytest.raises(WouldBlock, semaphore.acquire_nowait)
@pytest.mark.parametrize(
"release_first",
[pytest.param(False, id="releaselast"), pytest.param(True, id="releasefirst")],
)
async def test_cancel_during_acquire(self, release_first: bool) -> None:
acquired = False
async def task(*, task_status: TaskStatus) -> None:
nonlocal acquired
task_status.started()
async with semaphore:
acquired = True
semaphore = Semaphore(1)
async with create_task_group() as tg:
await semaphore.acquire()
await tg.start(task)
tg.cancel_scope.cancel()
with CancelScope(shield=True):
if release_first:
semaphore.release()
await wait_all_tasks_blocked()
else:
await wait_all_tasks_blocked()
semaphore.release()
assert not acquired
assert semaphore.value == 1
@pytest.mark.parametrize("max_value", [2, None])
async def test_max_value(self, max_value: int | None) -> None:
semaphore = Semaphore(0, max_value=max_value)
assert semaphore.max_value == max_value
async def test_max_value_exceeded(self) -> None:
semaphore = Semaphore(1, max_value=2)
semaphore.release()
pytest.raises(ValueError, semaphore.release)
async def test_statistics(self) -> None:
async def waiter() -> None:
async with semaphore:
pass
semaphore = Semaphore(1)
async with create_task_group() as tg:
assert semaphore.statistics().tasks_waiting == 0
async with semaphore:
assert semaphore.statistics().tasks_waiting == 0
for i in range(1, 3):
tg.start_soon(waiter)
await wait_all_tasks_blocked()
assert semaphore.statistics().tasks_waiting == i
assert semaphore.statistics().tasks_waiting == 0
async def test_acquire_race(self) -> None:
"""
Test against a race condition: when a task waiting on acquire() is rescheduled
but another task snatches the last available slot, the task should not raise
WouldBlock.
"""
semaphore = Semaphore(1)
async with create_task_group() as tg:
semaphore.acquire_nowait()
tg.start_soon(semaphore.acquire)
await wait_all_tasks_blocked()
semaphore.release()
pytest.raises(WouldBlock, semaphore.acquire_nowait)
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_asyncio_deadlock(self) -> None:
"""Regression test for #398."""
semaphore = Semaphore(1)
async def acquire() -> None:
async with semaphore:
await asyncio.sleep(0)
loop = asyncio.get_running_loop()
task1 = loop.create_task(acquire())
task2 = loop.create_task(acquire())
await asyncio.sleep(0)
task1.cancel()
await asyncio.wait_for(task2, 1)
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cancel_after_release(self) -> None:
"""
Test that a native asyncio cancellation will not cause a semaphore ownership
to get lost between a release() and the resumption of acquire().
"""
# Create the semaphore in such a way that any task acquiring it will block
semaphore = Semaphore(0, max_value=1)
# Start a task that gets blocked on trying to acquire the semaphore
loop = asyncio.get_running_loop()
task1 = loop.create_task(semaphore.acquire())
await asyncio.sleep(0)
# Trigger the acquiring task to be rescheduled, but also cancel it right away
semaphore.release()
task1.cancel()
assert semaphore.value == 0
await asyncio.wait([task1], timeout=1)
# The acquire() method should've released the semaphore because acquisition
# failed due to cancellation
assert semaphore.value == 1
assert semaphore.statistics().tasks_waiting == 0
semaphore.acquire_nowait()
def test_instantiate_outside_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def use_semaphore() -> None:
async with semaphore:
pass
semaphore = Semaphore(1, max_value=3)
assert semaphore.value == 1
assert semaphore.max_value == 3
assert semaphore.statistics().tasks_waiting == 0
run(
use_semaphore,
backend=anyio_backend_name,
backend_options=anyio_backend_options,
)
class TestCapacityLimiter:
async def test_bad_init_type(self) -> None:
pytest.raises(TypeError, CapacityLimiter, 1.0).match(
"total_tokens must be an int or math.inf"
)
async def test_bad_init_value(self) -> None:
pytest.raises(ValueError, CapacityLimiter, 0).match("total_tokens must be >= 1")
async def test_borrow(self) -> None:
limiter = CapacityLimiter(2)
assert limiter.total_tokens == 2
assert limiter.available_tokens == 2
assert limiter.borrowed_tokens == 0
async with limiter:
assert limiter.total_tokens == 2
assert limiter.available_tokens == 1
assert limiter.borrowed_tokens == 1
async def test_limit(self) -> None:
value = 0
async def taskfunc() -> None:
nonlocal value
for _ in range(5):
async with limiter:
assert value == 0
value = 1
await wait_all_tasks_blocked()
value = 0
limiter = CapacityLimiter(1)
async with create_task_group() as tg:
for _ in range(3):
tg.start_soon(taskfunc)
async def test_borrow_twice(self) -> None:
limiter = CapacityLimiter(1)
await limiter.acquire()
with pytest.raises(RuntimeError) as exc:
await limiter.acquire()
exc.match(
"this borrower is already holding one of this CapacityLimiter's tokens"
)
async def test_bad_release(self) -> None:
limiter = CapacityLimiter(1)
with pytest.raises(RuntimeError) as exc:
limiter.release()
exc.match("this borrower isn't holding any of this CapacityLimiter's tokens")
async def test_increase_tokens(self) -> None:
async def setter() -> None:
# Wait until waiter() is inside the limiter block
await event1.wait()
async with limiter:
# This can only happen when total_tokens has been increased
event2.set()
async def waiter() -> None:
async with limiter:
event1.set()
await event2.wait()
limiter = CapacityLimiter(1)
event1, event2 = Event(), Event()
async with create_task_group() as tg:
tg.start_soon(setter)
tg.start_soon(waiter)
await wait_all_tasks_blocked()
assert event1.is_set()
assert not event2.is_set()
limiter.total_tokens = 2
assert event2.is_set()
async def test_current_default_thread_limiter(self) -> None:
limiter = to_thread.current_default_thread_limiter()
assert isinstance(limiter, CapacityLimiter)
assert limiter.total_tokens == 40
async def test_statistics(self) -> None:
async def waiter() -> None:
async with limiter:
pass
limiter = CapacityLimiter(1)
assert limiter.statistics().total_tokens == 1
assert limiter.statistics().borrowed_tokens == 0
assert limiter.statistics().tasks_waiting == 0
async with create_task_group() as tg:
async with limiter:
assert limiter.statistics().borrowed_tokens == 1
assert limiter.statistics().tasks_waiting == 0
for i in range(1, 3):
tg.start_soon(waiter)
await wait_all_tasks_blocked()
assert limiter.statistics().tasks_waiting == i
assert limiter.statistics().tasks_waiting == 0
assert limiter.statistics().borrowed_tokens == 0
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_asyncio_deadlock(self) -> None:
"""Regression test for #398."""
limiter = CapacityLimiter(1)
async def acquire() -> None:
async with limiter:
await asyncio.sleep(0)
loop = asyncio.get_running_loop()
task1 = loop.create_task(acquire())
task2 = loop.create_task(acquire())
await asyncio.sleep(0)
task1.cancel()
await asyncio.wait_for(task2, 1)
async def test_ordered_queue(self) -> None:
limiter = CapacityLimiter(1)
results = []
event = Event()
async def append(x: int, task_status: TaskStatus) -> None:
task_status.started()
async with limiter:
await event.wait()
results.append(x)
async with create_task_group() as tg:
for i in [0, 1, 2]:
await tg.start(append, i)
event.set()
assert results == [0, 1, 2]
async def test_increase_tokens_lets_others_acquire(self) -> None:
limiter = CapacityLimiter(1)
entered_events = [Event() for _ in range(3)]
continue_event = Event()
async def worker(entered_event: Event) -> None:
async with limiter:
entered_event.set()
await continue_event.wait()
async with create_task_group() as tg:
for event in entered_events[:2]:
tg.start_soon(worker, event)
# One task should be able to acquire the limiter while the other is left
# waiting
await wait_all_tasks_blocked()
assert sum(ev.is_set() for ev in entered_events) == 1
# Increase the total tokens and start another worker.
# All tasks should be able to acquire the limiter now.
limiter.total_tokens = 3
tg.start_soon(worker, entered_events[2])
with fail_after(1):
for ev in entered_events[1:]:
await ev.wait()
# Allow all tasks to exit
continue_event.set()
def test_instantiate_outside_event_loop(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> None:
async def use_limiter() -> None:
async with limiter:
pass
limiter = CapacityLimiter(1)
limiter.total_tokens = 2
with pytest.raises(TypeError):
limiter.total_tokens = "2" # type: ignore[assignment]
with pytest.raises(TypeError):
limiter.total_tokens = 3.0
assert limiter.total_tokens == 2
assert limiter.borrowed_tokens == 0
statistics = limiter.statistics()
assert statistics.total_tokens == 2
assert statistics.borrowed_tokens == 0
assert statistics.borrowers == ()
assert statistics.tasks_waiting == 0
run(
use_limiter,
backend=anyio_backend_name,
backend_options=anyio_backend_options,
)
async def test_total_tokens_as_kwarg(self) -> None:
# Regression test for #515
limiter = CapacityLimiter(total_tokens=1)
assert limiter.total_tokens == 1
async def test_acquire_cancelled(self) -> None:
# Regression test for #947
limiter = CapacityLimiter(1)
async def borrower(
event: Event, *, task_status: TaskStatus[CancelScope]
) -> None:
with CancelScope() as scope:
task_status.started(scope)
async with limiter:
event.set()
await checkpoint()
async with create_task_group() as tg:
async with limiter:
event1 = Event()
scope1 = await tg.start(borrower, event1)
event2 = Event()
await tg.start(borrower, event2)
scope1.cancel()
with move_on_after(0.1):
await event2.wait()
return
tg.cancel_scope.cancel()
pytest.fail("The second borrower failed to acquire the limiter")
anyio-4.11.0/tests/test_taskgroups.py 0000664 0000000 0000000 00000153671 15064462627 0017722 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import gc
import math
import sys
import time
from asyncio import CancelledError
from collections.abc import AsyncGenerator, Coroutine, Generator
from typing import Any, NoReturn, cast
from unittest import mock
import pytest
from exceptiongroup import catch
from pytest import FixtureRequest, MonkeyPatch
import anyio
from anyio import (
TASK_STATUS_IGNORED,
CancelScope,
create_task_group,
current_effective_deadline,
current_time,
fail_after,
get_cancelled_exc_class,
get_current_task,
move_on_after,
sleep,
sleep_forever,
wait_all_tasks_blocked,
)
from anyio.abc import TaskGroup, TaskStatus
from anyio.lowlevel import checkpoint
from .conftest import asyncio_params, no_other_refs
if sys.version_info < (3, 11):
from exceptiongroup import BaseExceptionGroup, ExceptionGroup
async def async_error(text: str, delay: float = 0.1) -> NoReturn:
try:
if delay:
await sleep(delay)
finally:
raise Exception(text)
async def test_already_closed() -> None:
async with create_task_group() as tg:
pass
with pytest.raises(RuntimeError) as exc:
tg.start_soon(async_error, "fail")
exc.match("This task group is not active; no new tasks can be started")
async def test_success() -> None:
async def async_add(value: str) -> None:
results.add(value)
results: set[str] = set()
async with create_task_group() as tg:
tg.start_soon(async_add, "a")
tg.start_soon(async_add, "b")
assert results == {"a", "b"}
@pytest.mark.parametrize(
"module",
[
pytest.param(asyncio, id="asyncio"),
pytest.param(pytest.importorskip("trio"), id="trio"),
],
)
def test_run_natively(module: Any) -> None:
async def testfunc() -> None:
async with create_task_group() as tg:
tg.start_soon(sleep, 0)
if module is asyncio:
asyncio.run(testfunc())
else:
module.run(testfunc)
async def test_start_soon_while_running() -> None:
async def task_func() -> None:
tg.start_soon(sleep, 0)
async with create_task_group() as tg:
tg.start_soon(task_func)
async def test_start_soon_after_error() -> None:
with pytest.raises(ExceptionGroup):
async with create_task_group() as tg:
a = 1 / 0 # noqa: F841
with pytest.raises(RuntimeError) as exc:
tg.start_soon(sleep, 0)
exc.match("This task group is not active; no new tasks can be started")
async def test_start_no_value() -> None:
async def taskfunc(*, task_status: TaskStatus) -> None:
task_status.started()
async with create_task_group() as tg:
value = await tg.start(taskfunc)
assert value is None
async def test_start_called_twice() -> None:
async def taskfunc(*, task_status: TaskStatus) -> None:
task_status.started()
with pytest.raises(
RuntimeError, match="called 'started' twice on the same task status"
):
task_status.started()
async with create_task_group() as tg:
value = await tg.start(taskfunc)
assert value is None
async def test_no_called_started_twice() -> None:
async def taskfunc(*, task_status: TaskStatus) -> None:
task_status.started()
async with create_task_group() as tg:
coro = tg.start(taskfunc)
tg.cancel_scope.cancel()
await coro
async def test_start_with_value() -> None:
async def taskfunc(*, task_status: TaskStatus[str]) -> None:
task_status.started("foo")
async with create_task_group() as tg:
value = await tg.start(taskfunc)
assert value == "foo"
async def test_start_crash_before_started_call() -> None:
async def taskfunc(*, task_status: TaskStatus) -> NoReturn:
raise Exception("foo")
async with create_task_group() as tg:
with pytest.raises(Exception) as exc:
await tg.start(taskfunc)
exc.match("foo")
async def test_start_crash_after_started_call() -> None:
async def taskfunc(*, task_status: TaskStatus[int]) -> NoReturn:
task_status.started(2)
raise Exception("foo")
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
value = await tg.start(taskfunc)
assert len(exc.value.exceptions) == 1
assert str(exc.value.exceptions[0]) == "foo"
assert value == 2
async def test_start_no_started_call() -> None:
async def taskfunc(*, task_status: TaskStatus) -> None:
pass
async with create_task_group() as tg:
with pytest.raises(RuntimeError) as exc:
await tg.start(taskfunc)
exc.match("hild exited")
async def test_start_cancelled() -> None:
started = finished = False
async def taskfunc(*, task_status: TaskStatus) -> None:
nonlocal started, finished
started = True
await sleep(2)
finished = True
async with create_task_group() as tg:
tg.cancel_scope.cancel()
await tg.start(taskfunc)
assert started
assert not finished
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_start_native_host_cancelled() -> None:
started = finished = False
async def taskfunc(*, task_status: TaskStatus) -> None:
nonlocal started, finished
started = True
await sleep(2)
finished = True
async def start_another() -> None:
async with create_task_group() as tg:
await tg.start(taskfunc)
task = asyncio.get_running_loop().create_task(start_another())
await wait_all_tasks_blocked()
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
assert started
assert not finished
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_start_native_child_cancelled() -> None:
task = None
finished = False
async def taskfunc(*, task_status: TaskStatus) -> None:
nonlocal task, finished
task = asyncio.current_task()
await sleep(2)
finished = True
async def start_another() -> None:
async with create_task_group() as tg2:
await tg2.start(taskfunc)
async with create_task_group() as tg:
tg.start_soon(start_another)
await wait_all_tasks_blocked()
assert task is not None
task.cancel()
assert not finished
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_propagate_native_cancellation_from_taskgroup() -> None:
async def taskfunc() -> None:
async with create_task_group() as tg:
tg.start_soon(asyncio.sleep, 2)
task = asyncio.create_task(taskfunc())
await wait_all_tasks_blocked()
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cancel_with_nested_task_groups() -> None:
"""Regression test for #695."""
async def shield_task() -> None:
with CancelScope(shield=True) as scope:
with mock.patch.object(
scope,
"_deliver_cancellation",
wraps=getattr(scope, "_deliver_cancellation"),
) as shielded_cancel_spy:
await sleep(0.5)
assert len(outer_cancel_spy.call_args_list) < 10
shielded_cancel_spy.assert_not_called()
async def middle_task() -> None:
try:
async with create_task_group() as tg:
with mock.patch.object(
tg.cancel_scope,
"_deliver_cancellation",
wraps=getattr(tg.cancel_scope, "_deliver_cancellation"),
) as middle_cancel_spy:
tg.start_soon(shield_task, name="shield task")
finally:
assert len(middle_cancel_spy.call_args_list) < 10
assert len(outer_cancel_spy.call_args_list) < 10
async with create_task_group() as tg:
with mock.patch.object(
tg.cancel_scope,
"_deliver_cancellation",
wraps=getattr(tg.cancel_scope, "_deliver_cancellation"),
) as outer_cancel_spy:
tg.start_soon(middle_task, name="middle task")
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
assert len(outer_cancel_spy.call_args_list) < 10
async def test_start_exception_delivery(anyio_backend_name: str) -> None:
def task_fn(*, task_status: TaskStatus[str] = TASK_STATUS_IGNORED) -> None:
task_status.started("hello")
if anyio_backend_name == "trio":
pattern = "appears to be synchronous"
else:
pattern = "is not a coroutine object"
async with anyio.create_task_group() as tg:
with pytest.raises(TypeError, match=pattern):
await tg.start(task_fn) # type: ignore[arg-type]
async def test_start_cancel_after_error() -> None:
"""Regression test for #517."""
sleep_completed = False
async def sleep_and_raise() -> None:
await wait_all_tasks_blocked()
raise RuntimeError("This should cancel the second start() call")
async def sleep_only(task_status: TaskStatus[None]) -> None:
nonlocal sleep_completed
await sleep(1)
sleep_completed = True
task_status.started()
with pytest.raises(ExceptionGroup) as exc:
async with anyio.create_task_group() as outer_tg:
async with anyio.create_task_group() as inner_tg:
inner_tg.start_soon(sleep_and_raise)
await outer_tg.start(sleep_only)
assert isinstance(exc.value.exceptions[0], ExceptionGroup)
assert isinstance(exc.value.exceptions[0].exceptions[0], RuntimeError)
assert not sleep_completed
async def test_host_exception() -> None:
result = None
async def set_result(value: str) -> None:
nonlocal result
await sleep(3)
result = value
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(set_result, "a")
raise Exception("dummy error")
assert len(exc.value.exceptions) == 1
assert str(exc.value.exceptions[0]) == "dummy error"
assert result is None
async def test_level_cancellation() -> None:
marker = None
async def dummy() -> None:
nonlocal marker
marker = 1
# At this point the task has been cancelled so sleep() will raise an exception
await checkpoint()
# Execution should never get this far
marker = 2
async with create_task_group() as tg:
tg.start_soon(dummy)
assert marker is None
tg.cancel_scope.cancel()
assert marker == 1
async def test_failing_child_task_cancels_host() -> None:
async def child() -> NoReturn:
await wait_all_tasks_blocked()
raise Exception("foo")
sleep_completed = False
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(child)
await sleep(0.5)
sleep_completed = True
assert len(exc.value.exceptions) == 1
assert str(exc.value.exceptions[0]) == "foo"
assert not sleep_completed
async def test_failing_host_task_cancels_children() -> None:
sleep_completed = False
async def child() -> None:
nonlocal sleep_completed
await sleep(1)
sleep_completed = True
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(child)
await wait_all_tasks_blocked()
raise Exception("foo")
assert len(exc.value.exceptions) == 1
assert str(exc.value.exceptions[0]) == "foo"
assert not sleep_completed
async def test_cancel_scope_in_another_task() -> None:
local_scope = None
result = False
async def child() -> None:
nonlocal result, local_scope
with CancelScope() as local_scope:
await sleep(2)
result = True
async with create_task_group() as tg:
tg.start_soon(child)
while local_scope is None:
await checkpoint()
local_scope.cancel()
assert not result
async def test_cancel_propagation() -> None:
async def g() -> NoReturn:
async with create_task_group():
await sleep(1)
pytest.fail("Execution should not reach this point")
async with create_task_group() as tg:
tg.start_soon(g)
await checkpoint()
tg.cancel_scope.cancel()
async def test_cancel_twice() -> None:
"""Test that the same task can receive two cancellations."""
async def cancel_group() -> None:
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
for _ in range(2):
async with create_task_group() as tg:
tg.start_soon(cancel_group)
await sleep(1)
pytest.fail("Execution should not reach this point")
async def test_cancel_exiting_task_group() -> None:
"""
Test that if a task group is waiting for subtasks to finish and it receives a
cancellation, the subtasks are also cancelled and the waiting continues.
"""
cancel_received = False
async def waiter() -> None:
nonlocal cancel_received
try:
await sleep(5)
finally:
cancel_received = True
async def subgroup() -> None:
async with create_task_group() as tg2:
tg2.start_soon(waiter)
async with create_task_group() as tg:
tg.start_soon(subgroup)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
assert cancel_received
async def test_cancel_before_entering_scope() -> None:
"""
Test that CancelScope.cancel() is honored even if called before entering the scope.
"""
cancel_scope = anyio.CancelScope()
cancel_scope.cancel()
with cancel_scope:
await anyio.sleep(1) # Checkpoint to allow anyio to check for cancellation
pytest.fail("execution should not reach this point")
async def test_exception_group_children() -> None:
with pytest.raises(BaseExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(async_error, "task1")
tg.start_soon(async_error, "task2", 0.15)
assert len(exc.value.exceptions) == 2
assert sorted(str(e) for e in exc.value.exceptions) == ["task1", "task2"]
async def test_exception_group_host() -> None:
with pytest.raises(BaseExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(async_error, "child", 2)
await wait_all_tasks_blocked()
raise Exception("host")
assert len(exc.value.exceptions) == 2
assert sorted(str(e) for e in exc.value.exceptions) == ["child", "host"]
async def test_escaping_cancelled_exception() -> None:
async with create_task_group() as tg:
tg.cancel_scope.cancel()
await checkpoint()
async def test_cancel_scope_cleared() -> None:
with move_on_after(0.1):
await sleep(1)
await checkpoint()
@pytest.mark.parametrize("delay", [0, 0.1], ids=["instant", "delayed"])
async def test_fail_after(delay: float) -> None:
with pytest.raises(TimeoutError):
with fail_after(delay) as scope:
try:
await sleep(1)
except get_cancelled_exc_class() as exc:
assert "deadline" in str(exc)
raise
else:
pytest.fail("sleep() should have raised a cancellation exception")
assert scope.cancel_called
assert scope.cancelled_caught
async def test_fail_after_no_timeout() -> None:
with fail_after(None) as scope:
assert scope.deadline == float("inf")
await sleep(0.1)
assert not scope.cancel_called
assert not scope.cancelled_caught
async def test_fail_after_after_cancellation() -> None:
event = anyio.Event()
async with anyio.create_task_group() as tg:
tg.cancel_scope.cancel()
await event.wait()
block_complete = False
with pytest.raises(TimeoutError):
with fail_after(0.1):
await anyio.sleep(0.5)
block_complete = True
assert not block_complete
async def test_fail_after_cancelled_before_deadline() -> None:
"""
Test that fail_after() won't raise TimeoutError if its scope is cancelled before the
deadline.
"""
with fail_after(1) as scope:
scope.cancel()
await checkpoint()
@pytest.mark.xfail(
reason="There is currently no way to tell if cancellation happened due to timeout "
"explicitly if the deadline has been exceeded"
)
async def test_fail_after_scope_cancelled_before_timeout() -> None:
with fail_after(0.1) as scope:
scope.cancel()
time.sleep(0.11) # noqa: ASYNC251
await checkpoint()
@pytest.mark.parametrize("delay", [0, 0.1], ids=["instant", "delayed"])
async def test_move_on_after(delay: float) -> None:
result = False
with move_on_after(delay) as scope:
await sleep(1)
result = True
assert not result
assert scope.cancel_called
assert scope.cancelled_caught
async def test_move_on_after_no_timeout() -> None:
result = False
with move_on_after(None) as scope:
assert scope.deadline == float("inf")
await sleep(0.1)
result = True
assert result
assert not scope.cancel_called
async def test_nested_move_on_after() -> None:
sleep_completed = inner_scope_completed = False
with move_on_after(0.1) as outer_scope:
assert current_effective_deadline() == outer_scope.deadline
with move_on_after(1) as inner_scope:
assert current_effective_deadline() == outer_scope.deadline
await sleep(2)
sleep_completed = True
inner_scope_completed = True
assert not sleep_completed
assert not inner_scope_completed
assert outer_scope.cancel_called
assert outer_scope.cancelled_caught
assert not inner_scope.cancel_called
assert not inner_scope.cancelled_caught
async def test_shielding() -> None:
async def cancel_when_ready() -> None:
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
inner_sleep_completed = outer_sleep_completed = False
async with create_task_group() as tg:
tg.start_soon(cancel_when_ready)
with move_on_after(10, shield=True) as inner_scope:
assert inner_scope.shield
await sleep(0.1)
inner_sleep_completed = True
await sleep(1)
outer_sleep_completed = True
assert inner_sleep_completed
assert not outer_sleep_completed
assert tg.cancel_scope.cancel_called
assert not inner_scope.cancel_called
async def test_cancel_from_shielded_scope() -> None:
async with create_task_group() as tg:
with CancelScope(shield=True) as inner_scope:
assert inner_scope.shield
tg.cancel_scope.cancel()
assert current_effective_deadline() == math.inf
assert not get_current_task().has_pending_cancellation()
await checkpoint()
assert current_effective_deadline() == -math.inf
assert get_current_task().has_pending_cancellation()
with pytest.raises(get_cancelled_exc_class()):
await sleep(0.01)
with pytest.raises(get_cancelled_exc_class()):
await sleep(0.01)
async def test_cancel_shielded_scope() -> None:
with CancelScope(shield=True) as cancel_scope:
assert cancel_scope.shield
cancel_scope.cancel()
assert current_effective_deadline() == -math.inf
assert get_current_task().has_pending_cancellation()
with pytest.raises(get_cancelled_exc_class()):
await checkpoint()
async def test_shielded_cleanup_after_cancel() -> None:
"""Regression test for #832."""
with CancelScope() as outer_scope:
outer_scope.cancel()
try:
await checkpoint()
finally:
assert current_effective_deadline() == -math.inf
assert get_current_task().has_pending_cancellation()
with CancelScope(shield=True): # noqa: ASYNC100
assert current_effective_deadline() == math.inf
assert not get_current_task().has_pending_cancellation()
assert current_effective_deadline() == -math.inf
assert get_current_task().has_pending_cancellation()
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cleanup_after_native_cancel() -> None:
"""Regression test for #832."""
# See also https://github.com/python/cpython/pull/102815.
task = asyncio.current_task()
assert task
task.cancel()
with pytest.raises(asyncio.CancelledError):
try:
await checkpoint()
finally:
assert not get_current_task().has_pending_cancellation()
async def test_cancelled_not_caught() -> None:
with CancelScope() as scope: # noqa: ASYNC100
scope.cancel()
assert scope.cancel_called
assert not scope.cancelled_caught
async def test_cancelled_scope_based_checkpoint() -> None:
"""Regression test closely related to #698."""
with CancelScope() as outer_scope:
outer_scope.cancel()
# The following three lines are a way to implement a checkpoint function.
# See also https://github.com/python-trio/trio/issues/860.
with CancelScope() as inner_scope:
inner_scope.cancel()
await sleep_forever()
pytest.fail("checkpoint should have raised")
assert not inner_scope.cancelled_caught
assert outer_scope.cancelled_caught
async def test_cancelled_raises_beyond_origin_unshielded() -> None:
with CancelScope() as outer_scope:
with CancelScope() as inner_scope:
inner_scope.cancel()
try:
await checkpoint()
finally:
outer_scope.cancel()
pytest.fail("checkpoint should have raised")
pytest.fail("exiting the inner scope should've raised a cancellation error")
# Here, the outer scope is responsible for the cancellation, so the inner scope
# won't catch the cancellation exception, but the outer scope will
assert not inner_scope.cancelled_caught
assert outer_scope.cancelled_caught
async def test_cancelled_raises_beyond_origin_shielded() -> None:
code_between_scopes_was_run = False
with CancelScope() as outer_scope:
with CancelScope(shield=True) as inner_scope:
inner_scope.cancel()
try:
await checkpoint()
finally:
outer_scope.cancel()
pytest.fail("checkpoint should have raised")
code_between_scopes_was_run = True
# Here, the inner scope is the one responsible for cancellation, and given that the
# outer scope was also cancelled, it is not considered to have "caught" the
# cancellation, even though it swallows it, because the inner scope triggered it
assert code_between_scopes_was_run
assert inner_scope.cancelled_caught
assert not outer_scope.cancelled_caught
async def test_empty_taskgroup_contains_yield_point() -> None:
"""
Test that a task group yields at exit at least once, even with no child tasks to
wait on.
"""
outer_task_ran = False
async def outer_task() -> None:
nonlocal outer_task_ran
outer_task_ran = True
async with create_task_group() as tg_outer:
for _ in range(2): # this is to make sure Trio actually schedules outer_task()
async with create_task_group():
tg_outer.start_soon(outer_task)
assert outer_task_ran
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cancel_host_asyncgen() -> None:
done = False
async def host_task() -> None:
nonlocal done
with CancelScope() as inner_scope:
inner_scope.cancel()
with pytest.raises(get_cancelled_exc_class()):
await checkpoint()
with pytest.raises(get_cancelled_exc_class()):
await checkpoint()
done = True
async def host_agen_fn() -> AsyncGenerator[None, None]:
await host_task()
yield
pytest.fail("host_agen_fn should only be __anext__ed once")
host_agen = host_agen_fn()
try:
loop = asyncio.get_running_loop()
await loop.create_task(host_agen.__anext__())
finally:
await host_agen.aclose()
assert done
async def test_shielding_immediate_scope_cancelled() -> None:
async def cancel_when_ready() -> None:
await wait_all_tasks_blocked()
scope.cancel()
sleep_completed = False
async with create_task_group() as tg:
with CancelScope(shield=True) as scope:
tg.start_soon(cancel_when_ready)
await sleep(0.5)
sleep_completed = True
assert not sleep_completed
async def test_shielding_mutate() -> None:
completed = False
async def task(task_status: TaskStatus) -> NoReturn:
nonlocal completed
with CancelScope() as scope:
# Enable the shield a little after the scope starts to make this test
# general, even though it has no bearing on the current implementation.
await sleep(0.1)
scope.shield = True
task_status.started()
await sleep(0.1)
completed = True
scope.shield = False
await sleep(1)
pytest.fail("Execution should not reach this point")
async with create_task_group() as tg:
await tg.start(task)
tg.cancel_scope.cancel()
assert completed
async def test_cancel_scope_in_child_task() -> None:
child_scope = None
async def child() -> None:
nonlocal child_scope
with CancelScope() as child_scope:
await sleep(2)
host_done = False
async with create_task_group() as tg:
tg.start_soon(child)
await wait_all_tasks_blocked()
assert child_scope is not None
child_scope.cancel()
await sleep(0.1)
host_done = True
assert host_done
assert not tg.cancel_scope.cancel_called
async def test_exception_cancels_siblings() -> None:
sleep_completed = False
async def child(fail: bool) -> None:
if fail:
raise Exception("foo")
else:
nonlocal sleep_completed
await sleep(1)
sleep_completed = True
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(child, False)
await wait_all_tasks_blocked()
tg.start_soon(child, True)
assert len(exc.value.exceptions) == 1
assert str(exc.value.exceptions[0]) == "foo"
assert not sleep_completed
async def test_cancel_cascade() -> None:
async def do_something() -> NoReturn:
async with create_task_group() as tg2:
tg2.start_soon(sleep, 1, name="sleep")
pytest.fail("Execution should not reach this point")
async with create_task_group() as tg:
tg.start_soon(do_something, name="do_something")
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
async def test_cancelled_parent() -> None:
async def child() -> NoReturn:
with CancelScope():
await sleep(1)
raise Exception("foo")
async def parent(tg: TaskGroup) -> None:
await wait_all_tasks_blocked()
tg.start_soon(child)
async with create_task_group() as tg:
tg.start_soon(parent, tg)
tg.cancel_scope.cancel()
async def test_shielded_deadline() -> None:
with move_on_after(10):
with CancelScope(shield=True):
with move_on_after(1000):
assert current_effective_deadline() - current_time() > 900
await checkpoint()
async def test_deadline_reached_on_start() -> None:
with move_on_after(0):
await checkpoint()
pytest.fail("Execution should not reach this point")
async def test_deadline_moved() -> None:
with fail_after(0.1) as scope:
scope.deadline += 0.3
await sleep(0.2)
async def test_timeout_error_with_multiple_cancellations() -> None:
with pytest.raises(TimeoutError):
with fail_after(0.1):
async with create_task_group() as tg:
tg.start_soon(sleep, 2)
await sleep(2)
async def test_nested_fail_after() -> None:
async def killer(scope: CancelScope) -> None:
await wait_all_tasks_blocked()
scope.cancel()
async with create_task_group() as tg:
with CancelScope() as scope:
with CancelScope():
tg.start_soon(killer, scope)
with fail_after(1):
await sleep(2)
pytest.fail("Execution should not reach this point")
pytest.fail("Execution should not reach this point either")
pytest.fail("Execution should also not reach this point")
assert scope.cancel_called
async def test_nested_shield() -> None:
async def killer(scope: CancelScope) -> None:
await wait_all_tasks_blocked()
scope.cancel()
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
with CancelScope() as scope:
with CancelScope(shield=True):
tg.start_soon(killer, scope)
with fail_after(0.2):
await sleep(2)
assert len(exc.value.exceptions) == 1
assert isinstance(exc.value.exceptions[0], TimeoutError)
async def test_triple_nested_shield_checkpoint_in_outer() -> None:
"""Regression test for #370."""
got_past_checkpoint = False
async def taskfunc() -> None:
nonlocal got_past_checkpoint
with CancelScope() as scope1:
with CancelScope() as scope2:
with CancelScope(shield=True):
scope1.cancel()
scope2.cancel()
await checkpoint()
await checkpoint()
got_past_checkpoint = True
async with create_task_group() as tg:
tg.start_soon(taskfunc)
assert not got_past_checkpoint
async def test_triple_nested_shield_checkpoint_in_middle() -> None:
got_past_checkpoint = False
async def taskfunc() -> None:
nonlocal got_past_checkpoint
with CancelScope() as scope1:
with CancelScope():
with CancelScope(shield=True):
scope1.cancel()
await checkpoint()
await checkpoint()
got_past_checkpoint = True
async with create_task_group() as tg:
tg.start_soon(taskfunc)
assert not got_past_checkpoint
async def test_exception_group_filtering() -> None:
"""Test that CancelledErrors are filtered out of nested exception groups."""
async def fail(name: str) -> NoReturn:
try:
await anyio.sleep(0.1)
finally:
raise Exception(f"{name} task failed")
async def fn() -> None:
async with anyio.create_task_group() as tg:
tg.start_soon(fail, "parent")
async with anyio.create_task_group() as tg2:
tg2.start_soon(fail, "child")
await anyio.sleep(1)
with pytest.raises(BaseExceptionGroup) as exc:
await fn()
assert len(exc.value.exceptions) == 2
assert str(exc.value.exceptions[0]) == "parent task failed"
assert isinstance(exc.value.exceptions[1], ExceptionGroup)
assert len(exc.value.exceptions[1].exceptions) == 1
assert str(exc.value.exceptions[1].exceptions[0]) == "child task failed"
async def test_cancel_propagation_with_inner_spawn() -> None:
async def g() -> NoReturn:
async with anyio.create_task_group() as tg2:
tg2.start_soon(anyio.sleep, 10)
await anyio.sleep(1)
pytest.fail("Execution should not have reached this line")
async with anyio.create_task_group() as tg:
tg.start_soon(g)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
async def test_escaping_cancelled_error_from_cancelled_task() -> None:
"""
Regression test for issue #88. No CancelledError should escape the outer scope.
"""
with CancelScope() as scope:
with move_on_after(0.1):
await sleep(1)
scope.cancel()
@pytest.mark.skipif(
sys.version_info >= (3, 11),
reason="Generator based coroutines have been removed in Python 3.11",
)
@pytest.mark.filterwarnings(
'ignore:"@coroutine" decorator is deprecated:DeprecationWarning'
)
def test_cancel_generator_based_task() -> None:
async def native_coro_part() -> None:
with CancelScope() as scope:
asyncio.get_running_loop().call_soon(scope.cancel)
await asyncio.sleep(1)
pytest.fail("Execution should not have reached this line")
@asyncio.coroutine # type: ignore[attr-defined]
def generator_part() -> Generator[object, BaseException, None]:
yield from native_coro_part() # type: ignore[misc]
anyio.run(generator_part, backend="asyncio")
@pytest.mark.skipif(
sys.version_info >= (3, 11),
reason="Generator based coroutines have been removed in Python 3.11",
)
@pytest.mark.filterwarnings(
'ignore:"@coroutine" decorator is deprecated:DeprecationWarning'
)
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_schedule_old_style_coroutine_func() -> None:
"""
Test that we give a sensible error when a user tries to spawn a task from a
generator-style coroutine function.
"""
@asyncio.coroutine # type: ignore[attr-defined]
def corofunc() -> Generator[Any, Any, None]:
yield from asyncio.sleep(1) # type: ignore[misc]
async with create_task_group() as tg:
funcname = (
f"{__name__}.test_schedule_old_style_coroutine_func..corofunc"
)
with pytest.raises(
TypeError,
match=f"Expected {funcname}\\(\\) to return a coroutine, but the return "
f"value \\(\\) is not a coroutine object",
):
tg.start_soon(corofunc)
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cancel_native_future_tasks() -> None:
async def wait_native_future() -> None:
loop = asyncio.get_running_loop()
await loop.create_future()
async with anyio.create_task_group() as tg:
tg.start_soon(wait_native_future)
tg.cancel_scope.cancel()
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cancel_native_future_tasks_cancel_scope() -> None:
async def wait_native_future() -> None:
with anyio.CancelScope():
loop = asyncio.get_running_loop()
await loop.create_future()
async with anyio.create_task_group() as tg:
tg.start_soon(wait_native_future)
tg.cancel_scope.cancel()
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_cancel_completed_task() -> None:
loop = asyncio.get_running_loop()
old_exception_handler = loop.get_exception_handler()
exceptions = []
def exception_handler(*args: object, **kwargs: object) -> None:
exceptions.append((args, kwargs))
loop.set_exception_handler(exception_handler)
try:
async def noop() -> None:
pass
async with anyio.create_task_group() as tg:
tg.start_soon(noop)
tg.cancel_scope.cancel()
assert exceptions == []
finally:
loop.set_exception_handler(old_exception_handler)
async def test_task_in_sync_spawn_callback() -> None:
outer_task_id = anyio.get_current_task().id
inner_task_id = None
def task_wrap() -> Coroutine[object, object, None]:
assert anyio.get_current_task().id == outer_task_id
async def corofn() -> None:
nonlocal inner_task_id
inner_task_id = anyio.get_current_task().id
return corofn()
async with create_task_group() as tg:
tg.start_soon(task_wrap)
assert inner_task_id is not None
assert inner_task_id != outer_task_id
async def test_shielded_cancel_sleep_time() -> None:
"""
Test that cancelling a shielded tasks spends more time sleeping than cancelling.
"""
event = anyio.Event()
hang_time = 0.2
async def set_event() -> None:
await sleep(hang_time)
event.set()
async def never_cancel_task() -> None:
with CancelScope(shield=True):
await sleep(0.2)
await event.wait()
async with create_task_group() as tg:
tg.start_soon(set_event)
async with create_task_group() as tg:
tg.start_soon(never_cancel_task)
tg.cancel_scope.cancel()
process_time = time.process_time()
assert (time.process_time() - process_time) < hang_time
async def test_cancelscope_wrong_exit_order() -> None:
"""
Test that a RuntimeError is raised if the task tries to exit cancel scopes in the
wrong order.
"""
scope1 = CancelScope()
scope2 = CancelScope()
scope1.__enter__()
scope2.__enter__()
pytest.raises(RuntimeError, scope1.__exit__, None, None, None)
async def test_cancelscope_exit_before_enter() -> None:
"""
Test that a RuntimeError is raised if one tries to exit a cancel scope before
entering.
"""
scope = CancelScope()
pytest.raises(RuntimeError, scope.__exit__, None, None, None)
@pytest.mark.parametrize(
"anyio_backend", asyncio_params
) # trio does not check for this yet
async def test_cancelscope_exit_in_wrong_task() -> None:
async def enter_scope(scope: CancelScope) -> None:
scope.__enter__()
async def exit_scope(scope: CancelScope) -> None:
scope.__exit__(None, None, None)
scope = CancelScope()
async with create_task_group() as tg:
tg.start_soon(enter_scope, scope)
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(exit_scope, scope)
assert len(exc.value.exceptions) == 1
assert str(exc.value.exceptions[0]) == (
"Attempted to exit cancel scope in a different task than it was entered in"
)
def test_unhandled_exception_group(caplog: pytest.LogCaptureFixture) -> None:
def crash() -> NoReturn:
raise KeyboardInterrupt
async def nested() -> None:
async with anyio.create_task_group() as tg:
tg.start_soon(anyio.sleep, 5)
await anyio.sleep(5)
async def main() -> NoReturn:
async with anyio.create_task_group() as tg:
tg.start_soon(nested)
await wait_all_tasks_blocked()
asyncio.get_running_loop().call_soon(crash)
await anyio.sleep(5)
pytest.fail("Execution should never reach this point")
with pytest.raises(KeyboardInterrupt):
anyio.run(main, backend="asyncio")
assert not caplog.messages
async def test_single_cancellation_exc() -> None:
"""
Test that only a single cancellation exception bubbles out of the task group when
case it was cancelled via an outer scope and no actual errors were raised.
"""
with CancelScope() as outer:
try:
async with create_task_group() as tg:
tg.start_soon(sleep, 5)
await wait_all_tasks_blocked()
outer.cancel()
await sleep(5)
except BaseException as exc:
if isinstance(exc, get_cancelled_exc_class()):
raise
pytest.fail(f"Raised the wrong type of exception: {exc}")
else:
pytest.fail("Did not raise a cancellation exception")
async def test_start_soon_parent_id() -> None:
root_task_id = get_current_task().id
parent_id: int | None = None
async def subtask() -> None:
nonlocal parent_id
parent_id = get_current_task().parent_id
async def starter_task() -> None:
tg.start_soon(subtask)
async with anyio.create_task_group() as tg:
tg.start_soon(starter_task)
assert parent_id == root_task_id
async def test_start_parent_id() -> None:
root_task_id = get_current_task().id
starter_task_id: int | None = None
initial_parent_id: int | None = None
permanent_parent_id: int | None = None
async def subtask(*, task_status: TaskStatus) -> None:
nonlocal initial_parent_id, permanent_parent_id
initial_parent_id = get_current_task().parent_id
task_status.started()
permanent_parent_id = get_current_task().parent_id
async def starter_task() -> None:
nonlocal starter_task_id
starter_task_id = get_current_task().id
await tg.start(subtask)
async with anyio.create_task_group() as tg:
tg.start_soon(starter_task)
assert initial_parent_id != permanent_parent_id
assert initial_parent_id == starter_task_id
assert permanent_parent_id == root_task_id
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Task uncancelling is only supported on Python 3.11",
)
@pytest.mark.parametrize("anyio_backend", asyncio_params)
class TestUncancel:
async def test_uncancel_after_native_cancel(self) -> None:
task = cast(asyncio.Task, asyncio.current_task())
with pytest.raises(asyncio.CancelledError), CancelScope():
task.cancel()
await checkpoint()
assert task.cancelling() == 1
task.uncancel()
async def test_uncancel_after_scope_cancel(self) -> None:
task = cast(asyncio.Task, asyncio.current_task())
with CancelScope() as scope:
scope.cancel()
await checkpoint()
assert task.cancelling() == 0
async def test_uncancel_after_scope_and_native_cancel(self) -> None:
task = cast(asyncio.Task, asyncio.current_task())
with pytest.raises(asyncio.CancelledError), CancelScope() as scope:
scope.cancel()
task.cancel()
await checkpoint()
assert task.cancelling() == 1
task.uncancel()
async def test_cancel_message_replaced(self) -> None:
task = asyncio.current_task()
assert task
try:
task.cancel()
await checkpoint()
except asyncio.CancelledError:
try:
with CancelScope() as scope:
scope.cancel()
try:
await checkpoint()
except asyncio.CancelledError as exc:
raise asyncio.CancelledError from exc
except asyncio.CancelledError:
pytest.fail("Should have swallowed the CancelledError")
async def test_cancel_counter_nested_scopes(self) -> None:
with CancelScope() as root_scope:
with CancelScope():
root_scope.cancel()
await checkpoint()
assert not cast(asyncio.Task, asyncio.current_task()).cancelling()
async def test_uncancel_after_taskgroup_cancelled(self) -> None:
"""
Test that a cancel scope only uncancels the host task as many times as it has
cancelled that specific task, and won't count child task cancellations towards
that amount.
"""
async def child_task(task_status: TaskStatus[None]) -> None:
async with create_task_group() as tg:
tg.start_soon(sleep, 3)
await wait_all_tasks_blocked()
task_status.started()
task = asyncio.current_task()
assert task
with pytest.raises(CancelledError):
async with create_task_group() as tg:
await tg.start(child_task)
task.cancel()
assert task.cancelling() == 1
async def test_uncancel_after_group_aexit_native_cancel(self) -> None:
"""Closely related to #695."""
done = anyio.Event()
async def shield_task() -> None:
with CancelScope(shield=True):
await done.wait()
async def middle_task() -> None:
async with create_task_group() as tg:
tg.start_soon(shield_task)
task = asyncio.get_running_loop().create_task(middle_task())
try:
await wait_all_tasks_blocked()
task.cancel("native 1")
await sleep(0.1)
task.cancel("native 2")
finally:
done.set()
with pytest.raises(asyncio.CancelledError) as exc:
await task
# Neither native cancellation should have been uncancelled, and the latest
# cancellation message should be the one coming out of the task group.
assert task.cancelling() == 2
assert str(exc.value) == "native 2"
async def test_uncancel_after_child_task_failed(self) -> None:
async def taskfunc() -> None:
raise Exception("dummy error")
with pytest.raises(ExceptionGroup) as exc_info:
async with create_task_group() as tg:
tg.start_soon(taskfunc)
assert len(exc_info.value.exceptions) == 1
assert str(exc_info.value.exceptions[0]) == "dummy error"
assert not cast(asyncio.Task, asyncio.current_task()).cancelling()
async def test_uncancel_cancelled_scope_based_checkpoint(self) -> None:
"""See also test_cancelled_scope_based_checkpoint."""
task = asyncio.current_task()
assert task
with CancelScope() as outer_scope:
outer_scope.cancel()
try:
# The following three lines are a way to implement a checkpoint
# function. See also https://github.com/python-trio/trio/issues/860.
with CancelScope() as inner_scope:
inner_scope.cancel()
await sleep_forever()
finally:
assert isinstance(sys.exc_info()[1], asyncio.CancelledError)
assert task.cancelling()
assert not task.cancelling()
async def test_cancel_before_entering_task_group() -> None:
with CancelScope() as scope:
scope.cancel()
try:
async with create_task_group():
pass
except get_cancelled_exc_class():
pytest.fail("This should not raise a cancellation exception")
async def test_reraise_cancelled_in_excgroup() -> None:
def handler(excgrp: BaseExceptionGroup) -> None:
raise
with CancelScope() as scope:
scope.cancel()
with catch({get_cancelled_exc_class(): handler}):
await anyio.sleep_forever()
async def test_cancel_child_task_when_host_is_shielded() -> None:
# Regression test for #642
# Tests that cancellation propagates to a child task even if the host task is within
# a shielded cancel scope.
cancelled = anyio.Event()
async def wait_cancel() -> None:
try:
await anyio.sleep_forever()
except anyio.get_cancelled_exc_class():
cancelled.set()
raise
with CancelScope() as parent_scope:
async with anyio.create_task_group() as task_group:
task_group.start_soon(wait_cancel)
await wait_all_tasks_blocked()
with CancelScope(shield=True), fail_after(1):
parent_scope.cancel()
await cancelled.wait()
async def test_start_cancels_parent_scope() -> None:
"""Regression test for #685 / #710."""
started: bool = False
async def in_task_group(task_status: TaskStatus[None]) -> None:
nonlocal started
started = True
await sleep_forever()
async with create_task_group() as tg:
with CancelScope() as inner_scope:
inner_scope.cancel()
await tg.start(in_task_group)
assert started
assert not tg.cancel_scope.cancel_called
@pytest.mark.skipif(
sys.implementation.name == "pypy",
reason=(
"gc.get_referrers is broken on PyPy see "
"https://github.com/pypy/pypy/issues/5075"
),
)
class TestRefcycles:
async def test_exception_refcycles_direct(self) -> None:
"""
Test that TaskGroup doesn't keep a reference to the raised ExceptionGroup
Note: This test never failed on anyio, but keeping this test to align
with the tests from cpython.
"""
tg = create_task_group()
exc = None
class _Done(Exception):
pass
try:
async with tg:
raise _Done
except ExceptionGroup as e:
exc = e
assert exc is not None
assert gc.get_referrers(exc) == no_other_refs()
async def test_exception_refcycles_errors(self) -> None:
"""Test that TaskGroup deletes self._exceptions, and __aexit__ args"""
tg = create_task_group()
exc = None
class _Done(Exception):
pass
try:
async with tg:
raise _Done
except ExceptionGroup as excs:
exc = excs.exceptions[0]
assert isinstance(exc, _Done)
assert gc.get_referrers(exc) == no_other_refs()
async def test_exception_refcycles_parent_task(self) -> None:
"""Test that TaskGroup's cancel_scope deletes self._host_task"""
tg = create_task_group()
exc = None
class _Done(Exception):
pass
async def coro_fn() -> None:
async with tg:
raise _Done
try:
async with anyio.create_task_group() as tg2:
tg2.start_soon(coro_fn)
except ExceptionGroup as excs:
exc = excs.exceptions[0].exceptions[0]
assert isinstance(exc, _Done)
assert gc.get_referrers(exc) == no_other_refs()
async def test_exception_refcycles_propagate_cancellation_error(self) -> None:
"""Test that TaskGroup deletes cancelled_exc"""
tg = anyio.create_task_group()
exc = None
with CancelScope() as cs:
cs.cancel()
try:
async with tg:
await checkpoint()
except get_cancelled_exc_class() as e:
exc = e
raise
assert isinstance(exc, get_cancelled_exc_class())
assert gc.get_referrers(exc) == no_other_refs()
async def test_exception_refcycles_base_error(self) -> None:
"""
Test for BaseExceptions.
anyio doesn't treat these differently so this test is redundant
but copied from CPython's asyncio.TaskGroup tests for completion.
"""
class MyKeyboardInterrupt(KeyboardInterrupt):
pass
tg = create_task_group()
exc = None
try:
async with tg:
raise MyKeyboardInterrupt
except BaseExceptionGroup as excs:
exc = excs.exceptions[0]
assert isinstance(exc, MyKeyboardInterrupt)
assert gc.get_referrers(exc) == no_other_refs()
class TestTaskStatusTyping:
"""
These tests do not do anything at run time, but since the test suite is also checked
with a static type checker, it ensures that the `TaskStatus` typing works as
intended.
"""
async def typetest_None(*, task_status: TaskStatus[None]) -> None:
task_status.started()
task_status.started(None)
async def typetest_None_Union(*, task_status: TaskStatus[int | None]) -> None:
task_status.started()
task_status.started(None)
async def typetest_non_None(*, task_status: TaskStatus[int]) -> None:
# We use `type: ignore` and `--warn-unused-ignores` to get type checking errors
# if these ever stop failing.
task_status.started() # type: ignore[call-arg]
task_status.started(None) # type: ignore[arg-type]
async def typetest_variance_good(*, task_status: TaskStatus[float]) -> None:
task_status2: TaskStatus[int] = task_status
task_status2.started(0)
async def typetest_variance_bad(*, task_status: TaskStatus[int]) -> None:
# We use `type: ignore` and `--warn-unused-ignores` to get type checking errors
# if these ever stop failing.
task_status2: TaskStatus[float] = task_status # type: ignore[assignment]
task_status2.started(0.0)
async def typetest_optional_status(
*,
task_status: TaskStatus[int] = TASK_STATUS_IGNORED,
) -> None:
task_status.started(1)
@pytest.mark.skipif(
sys.version_info < (3, 12),
reason="Eager task factories require Python 3.12",
)
@pytest.mark.parametrize("anyio_backend", ["asyncio"])
@pytest.mark.parametrize("use_custom_eager_factory", [False, True])
async def test_eager_task_factory(
request: FixtureRequest, use_custom_eager_factory: bool
) -> None:
ran = False
async def sync_coro() -> None:
nonlocal ran
ran = True
# This should trigger fetching the task state
with CancelScope(): # noqa: ASYNC100
pass
def create_custom_task(
coro: Coroutine[Any, Any, Any], /, **kwargs: Any
) -> asyncio.Task[Any]:
return asyncio.Task(coro, **kwargs)
loop = asyncio.get_running_loop()
old_task_factory = loop.get_task_factory()
if use_custom_eager_factory:
loop.set_task_factory(asyncio.create_eager_task_factory(create_custom_task))
else:
loop.set_task_factory(asyncio.eager_task_factory)
request.addfinalizer(lambda: loop.set_task_factory(old_task_factory))
async with create_task_group() as tg:
tg.start_soon(sync_coro)
assert not ran
tg.cancel_scope.cancel()
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_patched_asyncio_task(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(
asyncio,
"Task",
asyncio.tasks._PyTask, # type: ignore[attr-defined]
)
async with create_task_group() as tg:
tg.start_soon(sleep, 0)
async def test_exception_groups_suppresses_exc_context() -> None:
with pytest.raises(
cast(type[ExceptionGroup[Exception]], ExceptionGroup)
) as exc_info:
async with create_task_group():
raise Exception("Error")
assert exc_info.value.__suppress_context__
async def test_cancel_reason() -> None:
with CancelScope() as scope:
scope.cancel("test reason")
with pytest.raises(get_cancelled_exc_class()) as exc_info:
await checkpoint()
task = get_current_task()
assert task and task.name
exc_info.match("test reason")
exc_info.match(task.name)
anyio-4.11.0/tests/test_tempfile.py 0000664 0000000 0000000 00000013163 15064462627 0017314 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import pathlib
import shutil
import tempfile
from typing import AnyStr
from unittest.mock import patch
import pytest
from anyio import (
NamedTemporaryFile,
SpooledTemporaryFile,
TemporaryDirectory,
TemporaryFile,
gettempdir,
gettempdirb,
mkdtemp,
mkstemp,
)
class TestTemporaryFile:
async def test_temporary_file(self) -> None:
data = b"temporary file data"
async with TemporaryFile[bytes]() as af:
await af.write(data)
await af.seek(0)
result = await af.read()
assert result == data
assert af.closed
class TestNamedTemporaryFile:
async def test_named_temporary_file(self) -> None:
data = b"named temporary file data"
async with NamedTemporaryFile[bytes]() as af:
filename = af.name
assert os.path.exists(filename) # type: ignore[arg-type]
await af.write(data)
await af.seek(0)
assert await af.read() == data
assert not os.path.exists(filename) # type: ignore[arg-type]
async def test_exception_handling(self) -> None:
async with NamedTemporaryFile[bytes]() as af:
filename = af.name
assert os.path.exists(filename) # type: ignore[arg-type]
assert not os.path.exists(filename) # type: ignore[arg-type]
with pytest.raises(ValueError):
await af.write(b"should fail")
class TestSpooledTemporaryFile:
async def test_writewithout_rolled(self) -> None:
rollover_called = False
async def fake_rollover() -> None:
nonlocal rollover_called
rollover_called = True
await original_rollover()
async with SpooledTemporaryFile(max_size=10) as stf:
original_rollover = stf.rollover
with patch.object(stf, "rollover", fake_rollover):
assert await stf.write(b"12345") == 5
assert not rollover_called
await stf.write(b"67890X")
assert rollover_called
async def test_writelines(self) -> None:
rollover_called = False
async def fake_rollover() -> None:
nonlocal rollover_called
rollover_called = True
await original_rollover()
async with SpooledTemporaryFile(max_size=20) as stf:
original_rollover = stf.rollover
with patch.object(stf, "rollover", fake_rollover):
await stf.writelines([b"hello", b"world"])
assert not rollover_called
await stf.seek(0)
assert await stf.read() == b"helloworld"
await stf.writelines([b"1234567890123456"])
assert rollover_called
async def test_closed_state(self) -> None:
async with SpooledTemporaryFile(max_size=10) as stf:
assert not stf.closed
assert stf.closed
async def test_exact_boundary_no_rollover(self) -> None:
async with SpooledTemporaryFile(max_size=10) as stf:
await stf.write(b"0123456789")
assert not stf._rolled
await stf.write(b"x")
assert stf._rolled
class TestTemporaryDirectory:
async def test_context_manager(self) -> None:
async with TemporaryDirectory() as td:
td_path = pathlib.Path(td)
assert td_path.exists() and td_path.is_dir()
file_path = td_path / "test.txt"
file_path.write_text("temp dir test", encoding="utf-8")
assert file_path.exists()
assert not td_path.exists()
async def test_cleanup_method(self) -> None:
td = TemporaryDirectory()
td_str = await td.__aenter__()
td_path = pathlib.Path(td_str)
file_path = td_path / "file.txt"
file_path.write_text("cleanup test", encoding="utf-8")
await td.cleanup()
assert not td_path.exists()
async def test_exception_handling(self) -> None:
async with TemporaryDirectory() as td:
td_path = pathlib.Path(td)
assert td_path.exists() and td_path.is_dir()
assert not td_path.exists()
with pytest.raises(FileNotFoundError):
(td_path / "nonexistent.txt").write_text("should fail", encoding="utf-8")
@pytest.mark.parametrize(
"suffix, prefix, text, content",
[
(".txt", "mkstemp_", True, "mkstemp"),
(b".txt", b"mkstemp_", False, b"mkstemp"),
],
)
async def test_mkstemp(
suffix: AnyStr,
prefix: AnyStr,
text: bool,
content: AnyStr,
) -> None:
fd, path = await mkstemp(suffix=suffix, prefix=prefix, text=text)
assert isinstance(fd, int)
if text:
assert isinstance(path, str)
else:
assert isinstance(path, bytes)
if text:
with os.fdopen(fd, "w", encoding="utf-8") as f:
f.write(content)
with open(path, encoding="utf-8") as f:
read_content = f.read()
else:
with os.fdopen(fd, "wb") as f:
f.write(content)
with open(os.fsdecode(path), "rb") as f:
read_content = f.read()
assert read_content == content
os.remove(path)
@pytest.mark.parametrize("prefix", [b"mkdtemp_", "mkdtemp_"])
async def test_mkdtemp(prefix: AnyStr) -> None:
d = await mkdtemp(prefix=prefix)
if isinstance(d, bytes):
dp = pathlib.Path(os.fsdecode(d))
else:
dp = pathlib.Path(d)
assert dp.is_dir()
shutil.rmtree(dp)
async def test_gettemp_functions() -> None:
tdir = await gettempdir()
tdirb = await gettempdirb()
assert tdir == tempfile.gettempdir()
assert tdirb == tempfile.gettempdirb()
anyio-4.11.0/tests/test_to_interpreter.py 0000664 0000000 0000000 00000002673 15064462627 0020560 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from collections.abc import AsyncGenerator
from functools import partial
import pytest
from pytest import fixture
from anyio import to_interpreter
pytestmark = [
pytest.mark.skipif(sys.version_info < (3, 13), reason="requires Python 3.13+"),
]
@fixture(autouse=True)
async def destroy_workers() -> AsyncGenerator[None]:
yield
idle_workers = to_interpreter._idle_workers.get()
for worker in idle_workers:
worker.destroy()
idle_workers.clear()
async def test_run_sync() -> None:
"""
Test that the function runs in a different interpreter, and the same interpreter in
both calls.
"""
import _interpreters
main_interpreter_id, _ = _interpreters.get_current()
interpreter_id, _ = await to_interpreter.run_sync(_interpreters.get_current)
interpreter_id_2, _ = await to_interpreter.run_sync(_interpreters.get_current)
assert interpreter_id == interpreter_id_2
assert interpreter_id != main_interpreter_id
async def test_args_kwargs() -> None:
"""Test that partial() can be used to pass keyword arguments."""
result = await to_interpreter.run_sync(partial(sorted, reverse=True), ["a", "b"])
assert result == ["b", "a"]
async def test_exception() -> None:
"""Test that exceptions are delivered properly."""
with pytest.raises(ValueError, match="invalid literal for int"):
assert await to_interpreter.run_sync(int, "a")
anyio-4.11.0/tests/test_to_process.py 0000664 0000000 0000000 00000007632 15064462627 0017673 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import sys
import time
from functools import partial
from pathlib import Path
from unittest.mock import Mock
import pytest
from pytest import MonkeyPatch
from anyio import (
CancelScope,
create_task_group,
fail_after,
to_process,
wait_all_tasks_blocked,
)
from anyio.abc import Process
async def test_run_sync_in_process_pool() -> None:
"""
Test that the function runs in a different process, and the same process in both
calls.
"""
worker_pid = await to_process.run_sync(os.getpid)
assert worker_pid != os.getpid()
assert await to_process.run_sync(os.getpid) == worker_pid
async def test_identical_sys_path() -> None:
"""Test that partial() can be used to pass keyword arguments."""
assert await to_process.run_sync(eval, "sys.path") == sys.path
async def test_partial() -> None:
"""Test that partial() can be used to pass keyword arguments."""
assert await to_process.run_sync(partial(sorted, reverse=True), ["a", "b"]) == [
"b",
"a",
]
async def test_exception() -> None:
"""Test that exceptions are delivered properly."""
with pytest.raises(ValueError, match="invalid literal for int"):
assert await to_process.run_sync(int, "a")
async def test_print() -> None:
"""Test that print() won't interfere with parent-worker communication."""
worker_pid = await to_process.run_sync(os.getpid)
await to_process.run_sync(print, "hello")
await to_process.run_sync(print, "world")
assert await to_process.run_sync(os.getpid) == worker_pid
async def test_cancel_before() -> None:
"""
Test that starting to_process.run_sync() in a cancelled scope does not cause a
worker process to be reserved.
"""
with CancelScope() as scope:
scope.cancel()
await to_process.run_sync(os.getpid)
pytest.raises(LookupError, to_process._process_pool_workers.get)
@pytest.mark.usefixtures("deactivate_blockbuster")
async def test_cancel_during() -> None:
"""
Test that cancelling an operation on the worker process causes the process to be
killed.
"""
worker_pid = await to_process.run_sync(os.getpid)
with fail_after(4):
async with create_task_group() as tg:
tg.start_soon(partial(to_process.run_sync, cancellable=True), time.sleep, 5)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
# The previous worker was killed so we should get a new one now
assert await to_process.run_sync(os.getpid) != worker_pid
async def test_exec_while_pruning() -> None:
"""
Test that in the case when one or more idle workers are pruned, the originally
selected idle worker is re-added to the queue of idle workers.
"""
worker_pid1 = await to_process.run_sync(os.getpid)
workers = to_process._process_pool_workers.get()
idle_workers = to_process._process_pool_idle_workers.get()
real_worker = next(iter(workers))
fake_idle_process = Mock(Process)
workers.add(fake_idle_process)
try:
# Add a mock worker process that's guaranteed to be eligible for pruning
idle_workers.appendleft(
(fake_idle_process, -to_process.WORKER_MAX_IDLE_TIME - 1)
)
worker_pid2 = await to_process.run_sync(os.getpid)
assert worker_pid1 == worker_pid2
fake_idle_process.kill.assert_called_once_with()
assert idle_workers[0][0] is real_worker
finally:
workers.discard(fake_idle_process)
async def test_nonexistent_main_module(
monkeypatch: MonkeyPatch, tmp_path: Path
) -> None:
"""
Test that worker process creation won't fail if the detected path to the `__main__`
module doesn't exist. Regression test for #696.
"""
script_path = tmp_path / "badscript"
script_path.touch()
monkeypatch.setattr("__main__.__file__", str(script_path / "__main__.py"))
await to_process.run_sync(os.getpid)
anyio-4.11.0/tests/test_to_thread.py 0000664 0000000 0000000 00000026063 15064462627 0017463 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import gc
import sys
import threading
import time
from concurrent.futures import Future, ThreadPoolExecutor
from contextvars import ContextVar
from functools import partial
from typing import Any, NoReturn
import pytest
import sniffio
import anyio.to_thread
from anyio import (
CapacityLimiter,
Event,
create_task_group,
from_thread,
sleep,
to_thread,
wait_all_tasks_blocked,
)
from anyio.from_thread import BlockingPortalProvider
from .conftest import asyncio_params, no_other_refs
async def test_run_in_thread_cancelled() -> None:
state = 0
def thread_worker() -> None:
nonlocal state
state = 2
async def worker() -> None:
nonlocal state
state = 1
await to_thread.run_sync(thread_worker)
state = 3
async with create_task_group() as tg:
tg.start_soon(worker)
tg.cancel_scope.cancel()
assert state == 1
async def test_run_in_thread_exception() -> None:
def thread_worker() -> NoReturn:
raise ValueError("foo")
with pytest.raises(ValueError) as exc:
await to_thread.run_sync(thread_worker)
exc.match("^foo$")
async def test_run_in_custom_limiter() -> None:
max_active_threads = 0
def thread_worker() -> None:
nonlocal max_active_threads
active_threads.add(threading.current_thread())
max_active_threads = max(max_active_threads, len(active_threads))
event.wait(1)
active_threads.remove(threading.current_thread())
async def task_worker() -> None:
await to_thread.run_sync(thread_worker, limiter=limiter)
event = threading.Event()
limiter = CapacityLimiter(3)
active_threads: set[threading.Thread] = set()
async with create_task_group() as tg:
for _ in range(4):
tg.start_soon(task_worker)
await sleep(0.1)
assert len(active_threads) == 3
assert limiter.borrowed_tokens == 3
event.set()
assert len(active_threads) == 0
assert max_active_threads == 3
@pytest.mark.parametrize(
"abandon_on_cancel, expected_last_active",
[
pytest.param(False, "task", id="noabandon"),
pytest.param(True, "thread", id="abandon"),
],
)
async def test_cancel_worker_thread(
abandon_on_cancel: bool, expected_last_active: str
) -> None:
"""
Test that when a task running a worker thread is cancelled, the cancellation is not
acted on until the thread finishes.
"""
last_active: str | None = None
def thread_worker() -> None:
nonlocal last_active
from_thread.run_sync(sleep_event.set)
time.sleep(0.2)
last_active = "thread"
from_thread.run_sync(finish_event.set)
async def task_worker() -> None:
nonlocal last_active
try:
await to_thread.run_sync(thread_worker, abandon_on_cancel=abandon_on_cancel)
finally:
last_active = "task"
sleep_event = Event()
finish_event = Event()
async with create_task_group() as tg:
tg.start_soon(task_worker)
await sleep_event.wait()
tg.cancel_scope.cancel()
await finish_event.wait()
assert last_active == expected_last_active
async def test_cancel_wait_on_thread() -> None:
event = threading.Event()
future: Future[bool] = Future()
def wait_event() -> None:
future.set_result(event.wait(1))
async with create_task_group() as tg:
tg.start_soon(partial(to_thread.run_sync, abandon_on_cancel=True), wait_event)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
await to_thread.run_sync(event.set)
assert future.result(1)
async def test_deprecated_cancellable_param() -> None:
with pytest.warns(DeprecationWarning, match="The `cancellable=`"):
await to_thread.run_sync(bool, cancellable=True)
async def test_contextvar_propagation() -> None:
var = ContextVar("var", default=1)
var.set(6)
assert await to_thread.run_sync(var.get) == 6
async def test_asynclib_detection() -> None:
with pytest.raises(sniffio.AsyncLibraryNotFoundError):
await to_thread.run_sync(sniffio.current_async_library)
@pytest.mark.parametrize("anyio_backend", asyncio_params)
async def test_asyncio_cancel_native_task() -> None:
task: asyncio.Task[None] | None = None
async def run_in_thread() -> None:
nonlocal task
task = asyncio.current_task()
await to_thread.run_sync(time.sleep, 0.2, abandon_on_cancel=True)
async with create_task_group() as tg:
tg.start_soon(run_in_thread)
await wait_all_tasks_blocked()
assert task is not None
task.cancel()
def test_asyncio_no_root_task(asyncio_event_loop: asyncio.AbstractEventLoop) -> None:
"""
Regression test for #264.
Ensures that to_thread.run_sync() does not raise an error when there is no root
task, but instead tries to find the top most parent task by traversing the cancel
scope tree, or failing that, uses the current task to set up a shutdown callback.
"""
async def run_in_thread() -> None:
try:
await to_thread.run_sync(time.sleep, 0)
finally:
asyncio_event_loop.call_soon(asyncio_event_loop.stop)
task = asyncio_event_loop.create_task(run_in_thread())
asyncio_event_loop.run_forever()
task.result()
# Wait for worker threads to exit
for t in threading.enumerate():
if t.name == "AnyIO worker thread":
t.join(2)
assert not t.is_alive()
def test_asyncio_future_callback_partial(
asyncio_event_loop: asyncio.AbstractEventLoop,
) -> None:
"""
Regression test for #272.
Ensures that futures with partial callbacks are handled correctly when the root task
cannot be determined.
"""
def func(future: object) -> None:
pass
async def sleep_sync() -> None:
return await to_thread.run_sync(time.sleep, 0)
task = asyncio_event_loop.create_task(sleep_sync())
task.add_done_callback(partial(func))
asyncio_event_loop.run_until_complete(task)
def test_asyncio_run_sync_no_asyncio_run(
asyncio_event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Test that the thread pool shutdown callback does not raise an exception."""
def exception_handler(loop: object, context: Any = None) -> None:
exceptions.append(context["exception"])
exceptions: list[BaseException] = []
asyncio_event_loop.set_exception_handler(exception_handler)
asyncio_event_loop.run_until_complete(to_thread.run_sync(time.sleep, 0))
assert not exceptions
def test_asyncio_run_sync_multiple(
asyncio_event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Regression test for #304."""
asyncio_event_loop.call_later(0.5, asyncio_event_loop.stop)
for _ in range(3):
asyncio_event_loop.run_until_complete(to_thread.run_sync(time.sleep, 0))
for t in threading.enumerate():
if t.name == "AnyIO worker thread":
t.join(2)
assert not t.is_alive()
def test_asyncio_no_recycle_stopping_worker(
asyncio_event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Regression test for #323."""
async def taskfunc1() -> None:
await anyio.to_thread.run_sync(time.sleep, 0)
event1.set()
await event2.wait()
async def taskfunc2() -> None:
await event1.wait()
asyncio_event_loop.call_soon(event2.set)
await anyio.to_thread.run_sync(time.sleep, 0)
# At this point, the worker would be stopped but still in the idle workers pool,
# so the following would hang prior to the fix
await anyio.to_thread.run_sync(time.sleep, 0)
event1 = asyncio.Event()
event2 = asyncio.Event()
task1 = asyncio_event_loop.create_task(taskfunc1())
task2 = asyncio_event_loop.create_task(taskfunc2())
asyncio_event_loop.run_until_complete(asyncio.gather(task1, task2))
async def test_stopiteration() -> None:
"""
Test that raising StopIteration in a worker thread raises a RuntimeError on the
caller.
"""
def raise_stopiteration() -> NoReturn:
raise StopIteration
with pytest.raises(RuntimeError, match="coroutine raised StopIteration"):
await to_thread.run_sync(raise_stopiteration)
class TestBlockingPortalProvider:
@pytest.fixture
def provider(
self, anyio_backend_name: str, anyio_backend_options: dict[str, Any]
) -> BlockingPortalProvider:
return BlockingPortalProvider(
backend=anyio_backend_name, backend_options=anyio_backend_options
)
def test_single_thread(
self, provider: BlockingPortalProvider, anyio_backend_name: str
) -> None:
threads: set[threading.Thread] = set()
async def check_thread() -> None:
assert sniffio.current_async_library() == anyio_backend_name
threads.add(threading.current_thread())
active_threads_before = threading.active_count()
for _ in range(3):
with provider as portal:
portal.call(check_thread)
assert len(threads) == 3
assert threading.active_count() == active_threads_before
def test_single_thread_overlapping(
self, provider: BlockingPortalProvider, anyio_backend_name: str
) -> None:
threads: set[threading.Thread] = set()
async def check_thread() -> None:
assert sniffio.current_async_library() == anyio_backend_name
threads.add(threading.current_thread())
with provider as portal1:
with provider as portal2:
assert portal1 is portal2
portal2.call(check_thread)
portal1.call(check_thread)
assert len(threads) == 1
def test_multiple_threads(
self, provider: BlockingPortalProvider, anyio_backend_name: str
) -> None:
threads: set[threading.Thread] = set()
event = Event()
async def check_thread() -> None:
assert sniffio.current_async_library() == anyio_backend_name
await event.wait()
threads.add(threading.current_thread())
def dummy() -> None:
with provider as portal:
portal.call(check_thread)
with ThreadPoolExecutor(max_workers=3) as pool:
for _ in range(3):
pool.submit(dummy)
with provider as portal:
portal.call(wait_all_tasks_blocked)
portal.call(event.set)
assert len(threads) == 1
@pytest.mark.skipif(
sys.implementation.name == "pypy",
reason=(
"gc.get_referrers is broken on PyPy (see "
"https://github.com/pypy/pypy/issues/5075)"
),
)
async def test_run_sync_worker_cyclic_references() -> None:
class Foo:
pass
def foo(_: Foo) -> None:
pass
cvar = ContextVar[Foo]("cvar")
contextval = Foo()
arg = Foo()
cvar.set(contextval)
await to_thread.run_sync(foo, arg)
cvar.set(Foo())
gc.collect()
assert gc.get_referrers(contextval) == no_other_refs()
assert gc.get_referrers(foo) == no_other_refs()
assert gc.get_referrers(arg) == no_other_refs()
anyio-4.11.0/tests/test_typedattr.py 0000664 0000000 0000000 00000001237 15064462627 0017526 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Callable
import pytest
from anyio import TypedAttributeProvider
class DummyAttributeProvider(TypedAttributeProvider):
def get_dummyattr(self) -> str:
raise KeyError("foo")
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return {str: self.get_dummyattr}
def test_typedattr_keyerror() -> None:
"""
Test that if the extra attribute getter raises KeyError, it won't be confused for a
missing attribute.
"""
with pytest.raises(KeyError, match="^'foo'$"):
DummyAttributeProvider().extra(str)