pax_global_header00006660000000000000000000000064141672413430014517gustar00rootroot0000000000000052 comment=ac3e7c619913bd0ddf9c36b6e633b278d07405b7 anyio-3.5.0/000077500000000000000000000000001416724134300126435ustar00rootroot00000000000000anyio-3.5.0/.github/000077500000000000000000000000001416724134300142035ustar00rootroot00000000000000anyio-3.5.0/.github/workflows/000077500000000000000000000000001416724134300162405ustar00rootroot00000000000000anyio-3.5.0/.github/workflows/publish.yml000066400000000000000000000011771416724134300204370ustar00rootroot00000000000000name: Publish packages to PyPI on: push: tags: - "[0-9]+.[0-9]+.[0-9]+" - "[0-9]+.[0-9]+.[0-9]+[a-b][0-9]+" - "[0-9]+.[0-9]+.[0-9]+rc[0-9]+" jobs: publish: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: 3.x - name: Install dependencies run: pip install build - name: Create packages run: python -m build -s -w . - name: Upload packages uses: pypa/gh-action-pypi-publish@master with: user: __token__ password: ${{ secrets.pypi_password }} anyio-3.5.0/.github/workflows/test.yml000066400000000000000000000044041416724134300177440ustar00rootroot00000000000000name: Run the test suite on: push: branches: [master] pull_request: jobs: pyright: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: 3.x - uses: actions/cache@v2 with: path: ~/.cache/pip key: pip-pyright - name: Install dependencies run: pip install -e . pyright pytest - name: Run pyright run: pyright --verifytypes anyio test: strategy: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", pypy-3.7] exclude: - os: macos-latest python-version: "3.7" - os: macos-latest python-version: "3.8" - os: macos-latest python-version: "3.9" - os: macos-latest python-version: pypy-3.7 - os: windows-latest python-version: "3.7" - os: windows-latest python-version: "3.8" - os: windows-latest python-version: "3.9" - os: windows-latest python-version: pypy-3.7 # https://github.com/python-trio/trio/issues/1361 runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - uses: actions/cache@v2 with: path: ~/.cache/pip key: pip-test-${{ matrix.python-version }}-${{ matrix.os }} - name: Install dependencies run: pip install .[test,trio] coveralls - name: Test with pytest run: coverage run -m pytest env: PYTEST_DISABLE_PLUGIN_AUTOLOAD: 1 - name: Upload Coverage run: coveralls --service=github env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COVERALLS_FLAG_NAME: ${{ matrix.test-name }} COVERALLS_PARALLEL: true coveralls: name: Finish Coveralls needs: test runs-on: ubuntu-latest container: python:3-slim steps: - name: Finished run: | pip install coveralls coveralls --service=github --finish env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} anyio-3.5.0/.gitignore000066400000000000000000000002231416724134300146300ustar00rootroot00000000000000*.egg-info *.dist-info *.pyc build dist docs/_build __pycache__ .coverage .pytest_cache/ .mypy_cache/ .hypothesis/ .eggs/ .tox .idea .cache .local anyio-3.5.0/.pre-commit-config.yaml000066400000000000000000000026261416724134300171320ustar00rootroot00000000000000# This is the configuration file for pre-commit (https://pre-commit.com/). # To use: # * Install pre-commit (https://pre-commit.com/#installation) # * Copy this file as ".pre-commit-config.yaml" # * Run "pre-commit install". repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 hooks: - id: check-toml - id: check-yaml - id: debug-statements - id: end-of-file-fixer - id: mixed-line-ending args: [ "--fix=lf" ] - id: trailing-whitespace - repo: https://github.com/asottile/pyupgrade rev: v2.31.0 hooks: - id: pyupgrade args: [ "--py36-plus", "--keep-mock" ] - repo: https://github.com/pre-commit/mirrors-autopep8 rev: v1.6.0 hooks: - id: autopep8 - repo: https://github.com/pycqa/isort rev: 5.10.1 hooks: - id: isort - repo: https://github.com/csachs/pyproject-flake8 rev: v0.0.1a2.post1 hooks: - id: pyproject-flake8 - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.931 hooks: - id: mypy additional_dependencies: [ "pytest", "trio-typing", "packaging" ] - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.9.0 hooks: - id: python-check-blanket-noqa - id: python-check-blanket-type-ignore - id: python-no-eval - id: rst-backticks - id: rst-directive-colons - id: rst-inline-touching-normal anyio-3.5.0/.readthedocs.yml000066400000000000000000000002171416724134300157310ustar00rootroot00000000000000version: 2 formats: [htmlzip, pdf] python: version: "3.8" install: - method: pip path: . extra_requirements: [doc] anyio-3.5.0/LICENSE000066400000000000000000000020711416724134300136500ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2018 Alex Grönholm Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. anyio-3.5.0/README.rst000066400000000000000000000046311416724134300143360ustar00rootroot00000000000000.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg :target: https://github.com/agronholm/anyio/actions/workflows/test.yml :alt: Build Status .. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master :target: https://coveralls.io/github/agronholm/anyio?branch=master :alt: Code Coverage .. image:: https://readthedocs.org/projects/anyio/badge/?version=latest :target: https://anyio.readthedocs.io/en/latest/?badge=latest :alt: Documentation .. image:: https://badges.gitter.im/gitterHQ/gitter.svg :target: https://gitter.im/python-trio/AnyIO :alt: Gitter chat AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio, and works in harmony with the native SC of trio itself. Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full refactoring necessary. It will blend in with native libraries of your chosen backend. Documentation ------------- View full documentation at: https://anyio.readthedocs.io/ Features -------- AnyIO offers the following functionality: * Task groups (nurseries_ in trio terminology) * High level networking (TCP, UDP and UNIX sockets) * `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python 3.8) * async/await style UDP sockets (unlike asyncio where you still have to use Transports and Protocols) * A versatile API for byte streams and object streams * Inter-task synchronization and communication (locks, conditions, events, semaphores, object streams) * Worker threads * Subprocesses * Asynchronous file I/O (using worker threads) * Signal handling AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures. It even works with the popular Hypothesis_ library. .. _asyncio: https://docs.python.org/3/library/asyncio.html .. _trio: https://github.com/python-trio/trio .. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency .. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning .. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs .. _pytest: https://docs.pytest.org/en/latest/ .. _Hypothesis: https://hypothesis.works/ anyio-3.5.0/docs/000077500000000000000000000000001416724134300135735ustar00rootroot00000000000000anyio-3.5.0/docs/api.rst000066400000000000000000000134611416724134300151030ustar00rootroot00000000000000API reference ============= Event loop ---------- .. autofunction:: anyio.run .. autofunction:: anyio.get_all_backends .. autofunction:: anyio.get_cancelled_exc_class .. autofunction:: anyio.sleep .. autofunction:: anyio.sleep_forever .. autofunction:: anyio.sleep_until .. autofunction:: anyio.current_time Asynchronous resources ---------------------- .. autofunction:: anyio.aclose_forcefully .. autoclass:: anyio.abc.AsyncResource Typed attributes ---------------- .. autofunction:: anyio.typed_attribute .. autoclass:: anyio.TypedAttributeSet .. autoclass:: anyio.TypedAttributeProvider Timeouts and cancellation ------------------------- .. autofunction:: anyio.open_cancel_scope .. autofunction:: anyio.move_on_after .. autofunction:: anyio.fail_after .. autofunction:: anyio.current_effective_deadline .. autoclass:: anyio.CancelScope Task groups ----------- .. autofunction:: anyio.create_task_group .. autoclass:: anyio.abc.TaskGroup .. autoclass:: anyio.abc.TaskStatus Running code in worker threads ------------------------------ .. autofunction:: anyio.to_thread.run_sync .. autofunction:: anyio.to_thread.current_default_thread_limiter Running code in worker processes -------------------------------- .. autofunction:: anyio.to_process.run_sync .. autofunction:: anyio.to_process.current_default_process_limiter Running asynchronous code from other threads -------------------------------------------- .. autofunction:: anyio.from_thread.run .. autofunction:: anyio.from_thread.run_sync .. autofunction:: anyio.from_thread.create_blocking_portal .. autofunction:: anyio.from_thread.start_blocking_portal .. autoclass:: anyio.abc.BlockingPortal Async file I/O -------------- .. autofunction:: anyio.open_file .. autofunction:: anyio.wrap_file .. autoclass:: anyio.AsyncFile .. autoclass:: anyio.Path Streams and stream wrappers --------------------------- .. autofunction:: anyio.create_memory_object_stream .. autoclass:: anyio.abc.UnreliableObjectReceiveStream() .. autoclass:: anyio.abc.UnreliableObjectSendStream() .. autoclass:: anyio.abc.UnreliableObjectStream() .. autoclass:: anyio.abc.ObjectReceiveStream() .. autoclass:: anyio.abc.ObjectSendStream() .. autoclass:: anyio.abc.ObjectStream() .. autoclass:: anyio.abc.ByteReceiveStream .. autoclass:: anyio.abc.ByteSendStream .. autoclass:: anyio.abc.ByteStream .. autoclass:: anyio.abc.Listener .. autodata:: anyio.abc.AnyUnreliableByteReceiveStream .. autodata:: anyio.abc.AnyUnreliableByteSendStream .. autodata:: anyio.abc.AnyUnreliableByteStream .. autodata:: anyio.abc.AnyByteReceiveStream .. autodata:: anyio.abc.AnyByteSendStream .. autodata:: anyio.abc.AnyByteStream .. autoclass:: anyio.streams.buffered.BufferedByteReceiveStream .. autoclass:: anyio.streams.file.FileStreamAttribute .. autoclass:: anyio.streams.file.FileReadStream .. autoclass:: anyio.streams.file.FileWriteStream .. autoclass:: anyio.streams.memory.MemoryObjectReceiveStream .. autoclass:: anyio.streams.memory.MemoryObjectSendStream .. autoclass:: anyio.streams.memory.MemoryObjectStreamStatistics .. autoclass:: anyio.streams.stapled.MultiListener .. autoclass:: anyio.streams.stapled.StapledByteStream .. autoclass:: anyio.streams.stapled.StapledObjectStream .. autoclass:: anyio.streams.text.TextReceiveStream .. autoclass:: anyio.streams.text.TextSendStream .. autoclass:: anyio.streams.text.TextStream .. autoclass:: anyio.streams.tls.TLSAttribute .. autoclass:: anyio.streams.tls.TLSStream .. autoclass:: anyio.streams.tls.TLSListener Sockets and networking ---------------------- .. autofunction:: anyio.connect_tcp .. autofunction:: anyio.connect_unix .. autofunction:: anyio.create_tcp_listener .. autofunction:: anyio.create_unix_listener .. autofunction:: anyio.create_udp_socket .. autofunction:: anyio.create_connected_udp_socket .. autofunction:: anyio.getaddrinfo .. autofunction:: anyio.getnameinfo .. autofunction:: anyio.wait_socket_readable .. autofunction:: anyio.wait_socket_writable .. autoclass:: anyio.abc.SocketAttribute .. autoclass:: anyio.abc.SocketStream() .. autoclass:: anyio.abc.SocketListener() .. autoclass:: anyio.abc.UDPSocket() .. autoclass:: anyio.abc.ConnectedUDPSocket() Subprocesses ------------ .. autofunction:: anyio.run_process .. autofunction:: anyio.open_process .. autoclass:: anyio.abc.Process Synchronization --------------- .. autoclass:: anyio.Event .. autoclass:: anyio.Lock .. autoclass:: anyio.Condition .. autoclass:: anyio.Semaphore .. autoclass:: anyio.CapacityLimiter .. autoclass:: anyio.LockStatistics .. autoclass:: anyio.EventStatistics .. autoclass:: anyio.ConditionStatistics .. autoclass:: anyio.CapacityLimiterStatistics .. autofunction:: anyio.create_event .. autofunction:: anyio.create_lock .. autofunction:: anyio.create_condition .. autofunction:: anyio.create_semaphore .. autofunction:: anyio.create_capacity_limiter Operating system signals ------------------------ .. autofunction:: anyio.open_signal_receiver Low level operations -------------------- .. autofunction:: anyio.lowlevel.checkpoint .. autofunction:: anyio.lowlevel.checkpoint_if_cancelled .. autofunction:: anyio.lowlevel.cancel_shielded_checkpoint .. autoclass:: anyio.lowlevel.RunVar Compatibility ------------- .. autofunction:: anyio.maybe_async .. autofunction:: anyio.maybe_async_cm Testing and debugging --------------------- .. autoclass:: anyio.TaskInfo .. autofunction:: anyio.get_current_task .. autofunction:: anyio.get_running_tasks .. autofunction:: anyio.wait_all_tasks_blocked Exceptions ---------- .. autoexception:: anyio.BrokenResourceError .. autoexception:: anyio.BusyResourceError .. autoexception:: anyio.ClosedResourceError .. autoexception:: anyio.DelimiterNotFound .. autoexception:: anyio.EndOfStream .. autoexception:: anyio.ExceptionGroup .. autoexception:: anyio.IncompleteRead .. autoexception:: anyio.TypedAttributeLookupError .. autoexception:: anyio.WouldBlock anyio-3.5.0/docs/basics.rst000066400000000000000000000051741416724134300156000ustar00rootroot00000000000000The basics ========== .. py:currentmodule:: anyio AnyIO requires Python 3.6.2 or later to run. It is recommended that you set up a virtualenv_ when developing or playing around with AnyIO. Installation ------------ To install AnyIO, run: .. code-block:: bash pip install anyio To install a supported version of trio_, you can install it as an extra like this: .. code-block:: bash pip install anyio[trio] Running async programs ---------------------- The simplest possible AnyIO program looks like this:: from anyio import run async def main(): print('Hello, world!') run(main) This will run the program above on the default backend (asyncio). To run it on another supported backend, say trio_, you can use the ``backend`` argument, like so:: run(main, backend='trio') But AnyIO code is not required to be run via :func:`run`. You can just as well use the native ``run()`` function of the backend library:: import sniffio import trio from anyio import sleep async def main(): print('Hello') await sleep(1) print("I'm running on", sniffio.current_async_library()) trio.run(main) .. _backend options: Backend specific options ------------------------ Asyncio: * ``debug`` (``bool``, default=False): Enables `debug mode`_ in the event loop * ``use_uvloop`` (``bool``, default=False): Use the faster uvloop_ event loop implementation, if available * ``policy`` (``AbstractEventLoopPolicy``, default=None): the event loop policy instance to use for creating a new event loop (overrides ``use_uvloop``) Trio: options covered in the `official documentation `_ .. note:: The default value of ``use_uvloop`` was ``True`` before v3.2.0. .. _debug mode: https://docs.python.org/3/library/asyncio-eventloop.html#enabling-debug-mode .. _uvloop: https://pypi.org/project/uvloop/ Using native async libraries ---------------------------- AnyIO lets you mix and match code written for AnyIO and code written for the asynchronous framework of your choice. There are a few rules to keep in mind however: * You can only use "native" libraries for the backend you're running, so you cannot, for example, use a library written for trio together with a library written for asyncio. * Tasks spawned by these "native" libraries on backends other than trio_ are not subject to the cancellation rules enforced by AnyIO * Threads spawned outside of AnyIO cannot use :func:`.from_thread.run` to call asynchronous code .. _virtualenv: https://docs.python-guide.org/dev/virtualenvs/ .. _trio: https://github.com/python-trio/trio anyio-3.5.0/docs/cancellation.rst000066400000000000000000000206121416724134300167620ustar00rootroot00000000000000Cancellation and timeouts ========================= .. py:currentmodule:: anyio The ability to cancel tasks is the foremost advantage of the asynchronous programming model. Threads, on the other hand, cannot be forcibly killed and shutting them down will require perfect cooperation from the code running in them. Cancellation in AnyIO follows the model established by the trio_ framework. This means that cancellation of tasks is done via so called *cancel scopes*. Cancel scopes are used as context managers and can be nested. Cancelling a cancel scope cancels all cancel scopes nested within it. If a task is waiting on something, it is cancelled immediately. If the task is just starting, it will run until it first tries to run an operation requiring waiting, such as :func:`~sleep`. A task group contains its own cancel scope. The entire task group can be cancelled by cancelling this scope. .. _trio: https://trio.readthedocs.io/en/latest/reference-core.html#cancellation-and-timeouts Timeouts -------- Networked operations can often take a long time, and you usually want to set up some kind of a timeout to ensure that your application doesn't stall forever. There are two principal ways to do this: :func:`~move_on_after` and :func:`~fail_after`. Both are used as synchronous context managers. The difference between these two is that the former simply exits the context block prematurely on a timeout, while the other raises a :exc:`TimeoutError`. Both methods create a new cancel scope, and you can check the deadline by accessing the :attr:`~.abc.CancelScope.deadline` attribute. Note, however, that an outer cancel scope may have an earlier deadline than your current cancel scope. To check the actual deadline, you can use the :func:`~current_effective_deadline` function. Here's how you typically use timeouts:: from anyio import create_task_group, move_on_after, sleep, run async def main(): async with create_task_group() as tg: with move_on_after(1) as scope: print('Starting sleep') await sleep(2) print('This should never be printed') # The cancel_called property will be True if timeout was reached print('Exited cancel scope, cancelled =', scope.cancel_called) run(main) Shielding --------- There are cases where you want to shield your task from cancellation, at least temporarily. The most important such use case is performing shutdown procedures on asynchronous resources. To accomplish this, open a new cancel scope with the ``shield=True`` argument:: from anyio import CancelScope, create_task_group, sleep, run async def external_task(): print('Started sleeping in the external task') await sleep(1) print('This line should never be seen') async def main(): async with create_task_group() as tg: with CancelScope(shield=True) as scope: tg.start_soon(external_task) tg.cancel_scope.cancel() print('Started sleeping in the host task') await sleep(1) print('Finished sleeping in the host task') run(main) The shielded block will be exempt from cancellation except when the shielded block itself is being cancelled. Shielding a cancel scope is often best combined with :func:`~move_on_after` or :func:`~fail_after`, both of which also accept ``shield=True``. Finalization ------------ Sometimes you may want to perform cleanup operations in response to the failure of the operation:: async def do_something(): try: await run_async_stuff() except BaseException: # (perform cleanup) raise In some specific cases, you might only want to catch the cancellation exception. This is tricky because each async framework has its own exception class for that and AnyIO cannot control which exception is raised in the task when it's cancelled. To work around that, AnyIO provides a way to retrieve the exception class specific to the currently running async framework, using :func:`~get_cancelled_exc_class`:: from anyio import get_cancelled_exc_class async def do_something(): try: await run_async_stuff() except get_cancelled_exc_class(): # (perform cleanup) raise .. warning:: Always reraise the cancellation exception if you catch it. Failing to do so may cause undefined behavior in your application. If you need to use ``await`` during finalization, you need to enclose it in a shielded cancel scope, or the operation will be cancelled immediately since it's in an already cancelled scope:: async def do_something(): try: await run_async_stuff() except get_cancelled_exc_class(): with CancelScope(shield=True): await some_cleanup_function() raise Avoiding cancel scope stack corruption -------------------------------------- When using cancel scopes, it is important that they are entered and exited in LIFO (last in, first out) order within each task. This is usually not an issue since cancel scopes are normally used as context managers. However, in certain situations, cancel scope stack corruption might still occur: * Manually calling ``CancelScope.__enter__()`` and ``CancelScope.__exit__()``, usually from another context manager class, in the wrong order * Using cancel scopes with ``[Async]ExitStack`` in a manner that couldn't be achieved by nesting them as context managers * Using the low level coroutine protocol to execute parts of the coroutine function in different cancel scopes * Yielding in an async generator while enclosed in a cancel scope Remember that task groups contain their own cancel scopes so the same list of risky situations applies to them too. As an example, the following code is highly dubious:: # Bad! async def some_generator(): async with create_task_group() as tg: tg.start_soon(foo) yield The problem with this code is that it violates structural concurrency: what happens if the spawned task raises an exception? The host task would be cancelled as a result, but the host task might be long gone by the time that happens. Even if it weren't, any enclosing ``try...except`` in the generator would not be triggered. Unfortunately there is currently no way to automatically detect this condition in AnyIO, so in practice you may simply experience some weird behavior in your application as a consequence of running code like above. Depending on how they are used, this pattern is, however, *usually* safe to use in asynchronous context managers, so long as you make sure that the same host task keeps running throughout the entire enclosed code block:: # Okay in most cases! @async_context_manager async def some_context_manager(): async with create_task_group() as tg: tg.start_soon(foo) yield However, in pytest fixtures, even this pattern can be problematic because the AnyIO pytest plugin executes the setup and teardown phases of an async fixture in **separate tasks**, so if you try to host a task group there, it will wreak havoc with your test suite, at least in the teardown phase:: # Not okay, will raise an exception! @pytest.fixture async def some_background_service(): async with create_task_group() as tg: tg.start_soon(foo) yield When you're implementing the async context manager protocol manually and your async context manager needs to use other context managers, you may find it necessary to call their ``__aenter__()`` and ``__aexit__()`` directly. In such cases, it is absolutely vital to ensure that their ``__aexit__()`` methods are called in the exact reverse order of the ``__aenter__()`` calls. To this end, you may find the :class:`~contextlib.AsyncExitStack` (available from Python 3.7 up, or as a backport_) class very useful:: from contextlib import AsyncExitStack from anyio import create_task_group class MyAsyncContextManager: async def __aenter__(self): self._exitstack = AsyncExitStack() await self._exitstack.__aenter__() self._task_group = await self._exitstack.enter_async_context(create_task_group()) async def __aexit__(self, exc_type, exc_val, exc_tb): return await self._exitstack.__aexit__(exc_type, exc_val, exc_tb) .. _backport: https://pypi.org/project/async-exit-stack/ anyio-3.5.0/docs/conf.py000066400000000000000000000014331416724134300150730ustar00rootroot00000000000000#!/usr/bin/env python3 from importlib.metadata import version as get_version from packaging.version import parse extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx_autodoc_typehints' ] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = 'AnyIO' author = 'Alex Grönholm' copyright = '2018, ' + author v = parse(get_version('anyio')) version = v.base_version release = v.public language = None exclude_patterns = ['_build'] pygments_style = 'sphinx' autodoc_default_options = { 'members': True, 'show-inheritance': True } todo_include_todos = False html_theme = 'sphinx_rtd_theme' html_static_path = ['_static'] htmlhelp_basename = 'anyiodoc' intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)} anyio-3.5.0/docs/contributing.rst000066400000000000000000000046041416724134300170400ustar00rootroot00000000000000Contributing to AnyIO ===================== If you wish to contribute a fix or feature to AnyIO, please follow the following guidelines. When you make a pull request against the main AnyIO codebase, Github runs the AnyIO test suite against your modified code. Before making a pull request, you should ensure that the modified code passes tests locally. To that end, the use of tox_ is recommended. The default tox run first runs code style fixing tools and then the actual test suite. To only run the code style fixers, run ``tox -e lint``. To run the checks on all environments in parallel, invoke tox with ``tox -p``. To build the documentation, run ``tox -e docs`` which will generate a directory named ``build`` in which you may view the formatted HTML documentation. AnyIO uses pre-commit_ to perform several code style/quality checks. It is recommended to activate pre-commit_ on your local clone of the repository (using ``pre-commit install``) to ensure that your changes will pass the same checks on GitHub. .. _tox: https://tox.readthedocs.io/en/latest/install.html .. _pre-commit: https://pre-commit.com/#installation Making a pull request on Github ------------------------------- To get your changes merged to the main codebase, you need a Github account. #. Fork the repository (if you don't have your own fork of it yet) by navigating to the `main AnyIO repository`_ and clicking on "Fork" near the top right corner. #. Clone the forked repository to your local machine with ``git clone git@github.com/yourusername/anyio``. #. Create a branch for your pull request, like ``git checkout -b myfixname`` #. Make the desired changes to the code base. #. Commit your changes locally. If your changes close an existing issue, add the text ``Fixes XXX.`` or ``Closes XXX.`` to the commit message (where XXX is the issue number). #. Push the changeset(s) to your forked repository (``git push``) #. Navigate to Pull requests page on the original repository (not your fork) and click "New pull request" #. Click on the text "compare across forks". #. Select your own fork as the head repository and then select the correct branch name. #. Click on "Create pull request". If you have trouble, consult the `pull request making guide`_ on opensource.com. .. _main AnyIO repository: https://github.com/agronholm/anyio .. _pull request making guide: https://opensource.com/article/19/7/create-pull-request-github anyio-3.5.0/docs/faq.rst000066400000000000000000000025601416724134300150770ustar00rootroot00000000000000Frequently Asked Questions ========================== Why is Curio not supported as a backend? ---------------------------------------- Curio_ was supported in AnyIO before v3.0. Support for it was dropped for two reasons: #. Its interface allowed only coroutine functions to access the Curio_ kernel. This forced AnyIO to follow suit in its own API design, making it difficult to adapt existing applications that relied on synchronous callbacks to use AnyIO. It also interfered with the goal of matching Trio's API in functions with the same purpose (e.g. ``Event.set()``). #. The maintainer specifically requested Curio_ support to be removed from AnyIO (`issue 185 `_). .. _Curio: https://github.com/dabeaz/curio Why is Twisted not supported as a backend? ------------------------------------------ The minimum requirement to support Twisted_ would be for sniffio_ to be able to detect a running Twisted event loop (and be able to tell when Twisted_ is being run on top of its asyncio reactor). This is not currently supported in sniffio_, so AnyIO cannot support Twisted either. There is a Twisted `issue `_ that you can follow if you're interested in Twisted support in AnyIO. .. _Twisted: https://twistedmatrix.com/trac/ .. _sniffio: https://github.com/python-trio/sniffio anyio-3.5.0/docs/fileio.rst000066400000000000000000000050161416724134300155760ustar00rootroot00000000000000Asynchronous file I/O support ============================= .. py:currentmodule:: anyio AnyIO provides asynchronous wrappers for blocking file operations. These wrappers run blocking operations in worker threads. Example:: from anyio import open_file, run async def main(): async with await open_file('/some/path/somewhere') as f: contents = await f.read() print(contents) run(main) The wrappers also support asynchronous iteration of the file line by line, just as the standard file objects support synchronous iteration:: from anyio import open_file, run async def main(): async with await open_file('/some/path/somewhere') as f: async for line in f: print(line, end='') run(main) To wrap an existing open file object as an asynchronous file, you can use :func:`.wrap_file`:: from anyio import wrap_file, run async def main(): with open('/some/path/somewhere') as f: async for line in wrap_file(f): print(line, end='') run(main) .. note:: Closing the wrapper also closes the underlying synchronous file object. .. seealso:: :ref:`FileStreams` Asynchronous path operations ---------------------------- AnyIO provides an asynchronous version of the :class:`pathlib.Path` class. It differs with the original in a number of ways: * Operations that perform disk I/O (like :meth:`~pathlib.Path.read_bytes``) are run in a worker thread and thus require an ``await`` * Methods like :meth:`~pathlib.Path.glob` return an asynchronous iterator that yields asynchronous :class:`~.Path` objects * Properties and methods that normally return :class:`pathlib.Path` objects return :class:`~.Path` objects instead * Methods and properties from the Python 3.10 API are available on all versions * Use as a context manager is not supported, as it is deprecated in pathlib For example, to create a file with binary content:: from anyio import Path, run async def main(): path = Path('/foo/bar') await path.write_bytes(b'hello, world') run(main) Asynchronously iterating a directory contents can be done as follows:: from anyio import Path, run async def main(): # Print the contents of every file (assumed to be text) in the directory /foo/bar dir_path = Path('/foo/bar') async for path in dir_path.iterdir(): if await path.is_file(): print(await path.read_text()) print('---------------------') run(main) anyio-3.5.0/docs/index.rst000066400000000000000000000004761416724134300154430ustar00rootroot00000000000000AnyIO ===== .. include:: ../README.rst The manual ---------- .. toctree:: :maxdepth: 2 basics tasks cancellation synchronization streams typedattrs networking threads subprocesses fileio signals testing api migration faq support contributing versionhistory anyio-3.5.0/docs/migration.rst000066400000000000000000000132151416724134300163200ustar00rootroot00000000000000Migrating from AnyIO 2 to AnyIO 3 ================================= .. py:currentmodule:: anyio AnyIO 3 changed some functions and methods in a way that needs some adaptation in your code. All deprecated functions and methods will be removed in AnyIO 4. Asynchronous functions converted to synchronous ----------------------------------------------- AnyIO 3 changed several previously asynchronous functions and methods into regular ones for two reasons: #. to better serve use cases where synchronous callbacks are used by third party libraries #. to better match the API of trio_ The following functions and methods were changed: * :func:`current_time` * :func:`current_effective_deadline` * :meth:`CancelScope.cancel() <.abc.CancelScope.cancel>` * :meth:`CapacityLimiter.acquire_nowait` * :meth:`CapacityLimiter.acquire_on_behalf_of_nowait` * :meth:`Condition.release` * :meth:`Event.set` * :func:`get_current_task` * :func:`get_running_tasks` * :meth:`Lock.release` * :meth:`MemoryObjectReceiveStream.receive_nowait() <.streams.memory.MemoryObjectReceiveStream.receive_nowait>` * :meth:`MemoryObjectSendStream.send_nowait() <.streams.memory.MemoryObjectSendStream.send_nowait>` * :func:`open_signal_receiver` * :meth:`Semaphore.release` When migrating to AnyIO 3, simply remove the ``await`` from each call to these. .. note:: For backwards compatibility reasons, :func:`current_time`, :func:`current_effective_deadline` and :func:`get_running_tasks` return objects which are awaitable versions of their original types (:class:`float` and :class:`list`, respectively). These awaitable versions are subclasses of the original types so they should behave as their originals, but if you absolutely need the pristine original types, you can either use :func:`maybe_async` or ``float()`` / ``list()`` on the returned value as appropriate. The following async context managers changed to regular context managers: * :func:`fail_after` * :func:`move_on_after` * :func:`open_cancel_scope` (now just ``CancelScope()``) When migrating, just change ``async with`` into a plain ``with``. With the exception of :meth:`MemoryObjectReceiveStream.receive_nowait() <.streams.memory.MemoryObjectReceiveStream.receive_nowait>`, all of them can still be used like before – they will raise :exc:`DeprecationWarning` when used this way on AnyIO 3, however. If you're writing a library that needs to be compatible with both major releases, you will need to use the compatibility functions added in AnyIO 2.2: :func:`maybe_async` and :func:`maybe_async_cm`. These will let you safely use functions/methods and context managers (respectively) regardless of which major release is currently installed. Example 1 – setting an event:: from anyio.abc import Event from anyio import maybe_async async def foo(event: Event): await maybe_async(event.set()) ... Example 2 – opening a cancel scope:: from anyio import CancelScope, maybe_async_cm async def foo(): async with maybe_async_cm(CancelScope()) as scope: ... .. _trio: https://github.com/python-trio/trio Starting tasks -------------- The :meth:`TaskGroup.spawn` coroutine method has been deprecated in favor of the synchronous method :meth:`TaskGroup.start_soon` (which mirrors ``start_soon()`` in trio's nurseries). If you're fully migrating to AnyIO 3, simply switch to calling the new method (and remove the ``await``). If your code needs to work with both AnyIO 2 and 3, you can keep using :meth:`~TaskGroup.spawn` (until AnyIO 4) and suppress the deprecation warning:: import warnings async def foo(): async with create_task_group() as tg: with warnings.catch_warnings(): await tg.spawn(otherfunc) Blocking portal changes ----------------------- AnyIO now **requires** :func:`.from_thread.start_blocking_portal` to be used as a context manager:: from anyio import sleep from anyio.from_thread import start_blocking_portal with start_blocking_portal() as portal: portal.call(sleep, 1) As with :meth:`TaskGroup.spawn`, the :meth:`BlockingPortal.spawn_task` method has also been renamed to :meth:`~BlockingPortal.start_task_soon`, so as to be consistent with task groups. The :func:`create_blocking_portal` factory function was also deprecated in favor of instantiating :class:`BlockingPortal` directly. For code requiring cross compatibility, catching the deprecation warning (as above) should work. Synchronization primitives -------------------------- Synchronization primitive factories (:func:`create_event` etc.) were deprecated in favor of instantiating the classes directly. So convert code like this:: from anyio import create_event async def main(): event = create_event() into this:: from anyio import Event async def main(): event = Event() or, if you need to work with both AnyIO 2 and 3:: try: from anyio import Event create_event = Event except ImportError: from anyio import create_event from anyio.abc import Event async def foo() -> Event: return create_event() Threading functions moved ------------------------- Threading functions were restructured to submodules, following the example of trio: * ``current_default_worker_thread_limiter`` → :func:`.to_thread.current_default_thread_limiter` (NOTE: the function was renamed too!) * ``run_sync_in_worker_thread()`` → :func:`.to_thread.run_sync` * ``run_async_from_thread()`` → :func:`.from_thread.run` * ``run_sync_from_thread()`` → :func:`.from_thread.run_sync` The old versions are still in place but emit deprecation warnings when called. anyio-3.5.0/docs/networking.rst000066400000000000000000000136411416724134300165210ustar00rootroot00000000000000Using sockets and streams ========================= .. py:currentmodule:: anyio Networking capabilities are arguably the most important part of any asynchronous library. AnyIO contains its own high level implementation of networking on top of low level primitives offered by each of its supported backends. Currently AnyIO offers the following networking functionality: * TCP sockets (client + server) * UNIX domain sockets (client + server) * UDP sockets More exotic forms of networking such as raw sockets and SCTP are currently not supported. .. warning:: Unlike the standard BSD sockets interface and most other networking libraries, AnyIO (from 2.0 onwards) signals the end of any stream by raising the :exc:`~EndOfStream` exception instead of returning an empty bytes object. Working with TCP sockets ------------------------ TCP (Transmission Control Protocol) is the most commonly used protocol on the Internet. It allows one to connect to a port on a remote host and send and receive data in a reliable manner. To connect to a listening TCP socket somewhere, you can use :func:`~connect_tcp`:: from anyio import connect_tcp, run async def main(): async with await connect_tcp('hostname', 1234) as client: await client.send(b'Client\n') response = await client.receive() print(response) run(main) As a convenience, you can also use :func:`~connect_tcp` to establish a TLS session with the peer after connection, by passing ``tls=True`` or by passing a nonempty value for either ``ssl_context`` or ``tls_hostname``. To receive incoming TCP connections, you first create a TCP listener with :func:`create_tcp_listener` and call :meth:`~.abc.Listener.serve` on it:: from anyio import create_tcp_listener, run async def handle(client): async with client: name = await client.receive(1024) await client.send(b'Hello, %s\n' % name) async def main(): listener = await create_tcp_listener(local_port=1234) await listener.serve(handle) run(main) See the section on :ref:`TLS` for more information. Working with UNIX sockets ------------------------- UNIX domain sockets are a form of interprocess communication on UNIX-like operating systems. They cannot be used to connect to remote hosts and do not work on Windows. The API for UNIX domain sockets is much like the one for TCP sockets, except that instead of host/port combinations, you use file system paths. This is what the client from the TCP example looks like when converted to use UNIX sockets:: from anyio import connect_unix, run async def main(): async with await connect_unix('/tmp/mysock') as client: await client.send(b'Client\n') response = await client.receive(1024) print(response) run(main) And the listener:: from anyio import create_unix_listener, run async def handle(client): async with client: name = await client.receive(1024) await client.send(b'Hello, %s\n' % name) async def main(): listener = await create_unix_listener('/tmp/mysock') await listener.serve(handle) run(main) .. note:: The UNIX socket listener does not remove the socket it creates, so you may need to delete them manually. Sending and receiving file descriptors ++++++++++++++++++++++++++++++++++++++ UNIX sockets can be used to pass open file descriptors (sockets and files) to another process. The receiving end can then use either :func:`os.fdopen` or :func:`socket.socket` to get a usable file or socket object, respectively. The following is an example where a client connects to a UNIX socket server and receives the descriptor of a file opened on the server, reads the contents of the file and then prints them on standard output. Client:: import os from anyio import connect_unix, run async def main(): async with await connect_unix('/tmp/mysock') as client: _, fds = await client.receive_fds(0, 1) with os.fdopen(fds[0]) as file: print(file.read()) run(main) Server:: from pathlib import Path from anyio import create_unix_listener, run async def handle(client): async with client: with path.open('r') as file: await client.send_fds(b'this message is ignored', [file]) async def main(): listener = await create_unix_listener('/tmp/mysock') await listener.serve(handle) path = Path('/tmp/examplefile') path.write_text('Test file') run(main) Working with UDP sockets ------------------------ UDP (User Datagram Protocol) is a way of sending packets over the network without features like connections, retries or error correction. For example, if you wanted to create a UDP "hello" service that just reads a packet and then sends a packet to the sender with the contents prepended with "Hello, ", you would do this:: import socket from anyio import create_udp_socket, run async def main(): async with await create_udp_socket(family=socket.AF_INET, local_port=1234) as udp: async for packet, (host, port) in udp: await udp.sendto(b'Hello, ' + packet, host, port) run(main) .. note:: If you are testing on your local machine or don't know which family socket to use, it is a good idea to replace ``family=socket.AF_INET`` by ``local_host='localhost'`` in the previous example. If your use case involves sending lots of packets to a single destination, you can still "connect" your UDP socket to a specific host and port to avoid having to pass the address and port every time you send data to the peer:: from anyio import create_connected_udp_socket, run async def main(): async with await create_connected_udp_socket( remote_host='hostname', remote_port=1234) as udp: await udp.send(b'Hi there!\n') run(main) anyio-3.5.0/docs/signals.rst000066400000000000000000000050061416724134300157660ustar00rootroot00000000000000Receiving operating system signals ================================== .. py:currentmodule:: anyio You may occasionally find it useful to receive signals sent to your application in a meaningful way. For example, when you receive a ``signal.SIGTERM`` signal, your application is expected to shut down gracefully. Likewise, ``SIGHUP`` is often used as a means to ask the application to reload its configuration. AnyIO provides a simple mechanism for you to receive the signals you're interested in:: import signal from anyio import open_signal_receiver, run async def main(): with open_signal_receiver(signal.SIGTERM, signal.SIGHUP) as signals: async for signum in signals: if signum == signal.SIGTERM: return elif signum == signal.SIGHUP: print('Reloading configuration') run(main) .. note:: Signal handlers can only be installed in the main thread, so they will not work when the event loop is being run through :func:`~start_blocking_portal`, for instance. .. note:: Windows does not natively support signals so do not rely on this in a cross platform application. Handling KeyboardInterrupt and SystemExit ----------------------------------------- By default, different backends handle the Ctrl+C (or Ctrl+Break on Windows) key combination and external termination (:exc:`KeyboardInterrupt` and :exc:`SystemExit`, respectively) differently: trio raises the relevant exception inside the application while asyncio shuts down all the tasks and exits. If you need to do your own cleanup in these situations, you will need to install a signal handler:: import signal from anyio import open_signal_receiver, create_task_group, run from anyio.abc import CancelScope async def signal_handler(scope: CancelScope): with open_signal_receiver(signal.SIGINT, signal.SIGTERM) as signals: async for signum in signals: if signum == signal.SIGINT: print('Ctrl+C pressed!') else: print('Terminated!') scope.cancel() return async def main(): async with create_task_group() as tg: tg.start_soon(signal_handler, tg.cancel_scope) ... # proceed with starting the actual application logic run(main) .. note:: Windows does not support the :data:`~signal.SIGTERM` signal so if you need a mechanism for graceful shutdown on Windows, you will have to find another way. anyio-3.5.0/docs/streams.rst000066400000000000000000000277511416724134300160170ustar00rootroot00000000000000Streams ======= .. py:currentmodule:: anyio A "stream" in AnyIO is a simple interface for transporting information from one place to another. It can mean either in-process communication or sending data over a network. AnyIO divides streams into two categories: byte streams and object streams. Byte streams ("Streams" in Trio lingo) are objects that receive and/or send chunks of bytes. They are modelled after the limitations of the stream sockets, meaning the boundaries are not respected. In practice this means that if, for example, you call ``.send(b'hello ')`` and then ``.send(b'world')``, the other end will receive the data chunked in any arbitrary way, like (``b'hello'`` and ``b'world'``), ``b'hello world'`` or (``b'hel'``, ``b'lo wo'``, ``b'rld'``). Object streams ("Channels" in Trio lingo), on the other hand, deal with Python objects. The most commonly used implementation of these is the memory object stream. The exact semantics of object streams vary a lot by implementation. Many stream implementations wrap other streams. Of these, some can wrap any bytes-oriented streams, meaning ``ObjectStream[bytes]`` and ``ByteStream``. This enables many interesting use cases. Memory object streams --------------------- Memory object streams are intended for implementing a producer-consumer pattern with multiple tasks. Using :func:`~create_memory_object_stream`, you get a pair of object streams: one for sending, one for receiving. They essentially work like queues, but with support for closing and asynchronous iteration. By default, memory object streams are created with a buffer size of 0. This means that :meth:`~.streams.memory.MemoryObjectSendStream.send` will block until there's another task that calls :meth:`~.streams.memory.MemoryObjectReceiveStream.receive`. You can set the buffer size to a value of your choosing when creating the stream. It is also possible to have an unbounded buffer by passing :data:`math.inf` as the buffer size but this is not recommended. Memory object streams can be cloned by calling the ``clone()`` method. Each clone can be closed separately, but each end of the stream is only considered closed once all of its clones have been closed. For example, if you have two clones of the receive stream, the send stream will start raising :exc:`~BrokenResourceError` only when both receive streams have been closed. Multiple tasks can send and receive on the same memory object stream (or its clones) but each sent item is only ever delivered to a single recipient. The receive ends of memory object streams can be iterated using the async iteration protocol. The loop exits when all clones of the send stream have been closed. Example:: from anyio import create_task_group, create_memory_object_stream, run async def process_items(receive_stream): async with receive_stream: async for item in receive_stream: print('received', item) async def main(): send_stream, receive_stream = create_memory_object_stream() async with create_task_group() as tg: tg.start_soon(process_items, receive_stream) async with send_stream: for num in range(10): await send_stream.send(f'number {num}') run(main) In contrast to other AnyIO streams (but in line with trio's Channels), memory object streams can be closed synchronously, using either the ``close()`` method or by using the stream as a context manager:: def synchronous_callback(send_stream: MemoryObjectSendStream) -> None: with send_stream: send_stream.send_nowait('hello') Stapled streams --------------- A stapled stream combines any mutually compatible receive and send stream together, forming a single bidirectional stream. It comes in two variants: * :class:`~.streams.stapled.StapledByteStream` (combines a :class:`~.abc.ByteReceiveStream` with a :class:`~.abc.ByteSendStream`) * :class:`~.streams.stapled.StapledObjectStream` (combines an :class:`~.abc.ObjectReceiveStream` with a compatible :class:`~.abc.ObjectSendStream`) Buffered byte streams --------------------- A buffered byte stream wraps an existing bytes-oriented receive stream and provides certain amenities that require buffering, such as receiving an exact number of bytes, or receiving until the given delimiter is found. Example:: from anyio import run, create_memory_object_stream from anyio.streams.buffered import BufferedByteReceiveStream async def main(): send, receive = create_memory_object_stream(4) buffered = BufferedByteReceiveStream(receive) for part in b'hel', b'lo, ', b'wo', b'rld!': await send.send(part) result = await buffered.receive_exactly(8) print(repr(result)) result = await buffered.receive_until(b'!', 10) print(repr(result)) run(main) The above script gives the following output:: b'hello, w' b'orld' Text streams ------------ Text streams wrap existing receive/send streams and encode/decode strings to bytes and vice versa. Example:: from anyio import run, create_memory_object_stream from anyio.streams.text import TextReceiveStream, TextSendStream async def main(): bytes_send, bytes_receive = create_memory_object_stream(1) text_send = TextSendStream(bytes_send) await text_send.send('åäö') result = await bytes_receive.receive() print(repr(result)) text_receive = TextReceiveStream(bytes_receive) await bytes_send.send(result) result = await text_receive.receive() print(repr(result)) run(main) The above script gives the following output:: b'\xc3\xa5\xc3\xa4\xc3\xb6' 'åäö' .. _FileStreams: File streams ------------ File streams read from or write to files on the file system. They can be useful for substituting a file for another source of data, or writing output to a file for logging or debugging purposes. Example:: from anyio import run from anyio.streams.file import FileReadStream, FileWriteStream async def main(): path = '/tmp/testfile' async with await FileWriteStream.from_path(path) as stream: await stream.send(b'Hello, World!') async with await FileReadStream.from_path(path) as stream: async for chunk in stream: print(chunk.decode(), end='') print() run(main) .. versionadded:: 3.0 .. _TLS: TLS streams ----------- TLS (Transport Layer Security), the successor to SSL (Secure Sockets Layer), is the supported way of providing authenticity and confidentiality for TCP streams in AnyIO. TLS is typically established right after the connection has been made. The handshake involves the following steps: * Sending the certificate to the peer (usually just by the server) * Checking the peer certificate(s) against trusted CA certificates * Checking that the peer host name matches the certificate Obtaining a server certificate ****************************** There are three principal ways you can get an X.509 certificate for your server: #. Create a self signed certificate #. Use certbot_ or a similar software to automatically obtain certificates from `Let's Encrypt`_ #. Buy one from a certificate vendor The first option is probably the easiest, but this requires that the any client connecting to your server adds the self signed certificate to their list of trusted certificates. This is of course impractical outside of local development and is strongly discouraged in production use. The second option is nowadays the recommended method, as long as you have an environment where running certbot_ or similar software can automatically replace the certificate with a newer one when necessary, and that you don't need any extra features like class 2 validation. The third option may be your only valid choice when you have special requirements for the certificate that only a certificate vendor can fulfill, or that automatically renewing the certificates is not possible or practical in your environment. .. _certbot: https://certbot.eff.org/ .. _Let's Encrypt: https://letsencrypt.org/ Using self signed certificates ****************************** To create a self signed certificate for ``localhost``, you can use the openssl_ command line tool: .. code-block:: bash openssl req -x509 -newkey rsa:2048 -subj '/CN=localhost' -keyout key.pem -out cert.pem -nodes -days 365 This creates a (2048 bit) private RSA key (``key.pem``) and a certificate (``cert.pem``) matching the host name "localhost". The certificate will be valid for one year with these settings. To set up a server using this key-certificate pair:: import ssl from anyio import create_tcp_listener, run from anyio.streams.tls import TLSListener async def handle(client): async with client: name = await client.receive() await client.send(b'Hello, %s\n' % name) async def main(): # Create a context for the purpose of authenticating clients context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) # Load the server certificate and private key context.load_cert_chain(certfile='cert.pem', keyfile='key.pem') # Create the listener and start serving connections listener = TLSListener(await create_tcp_listener(local_port=1234), context) await listener.serve(handle) run(main) Connecting to this server can then be done as follows:: import ssl from anyio import connect_tcp, run async def main(): # These two steps are only required for certificates that are not trusted by the # installed CA certificates on your machine, so you can skip this part if you use # Let's Encrypt or a commercial certificate vendor context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) context.load_verify_locations(cafile='cert.pem') async with await connect_tcp('localhost', 1234, ssl_context=context) as client: await client.send(b'Client\n') response = await client.receive() print(response) run(main) .. _openssl: https://www.openssl.org/ Creating self-signed certificates on the fly ******************************************** When testing your TLS enabled service, it would be convenient to generate the certificates on the fly. To this end, you can use the trustme_ library:: import ssl import pytest import trustme @pytest.fixture(scope='session') def ca(): return trustme.CA() @pytest.fixture(scope='session') def server_context(ca): server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ca.issue_cert('localhost').configure_cert(server_context) return server_context @pytest.fixture(scope='session') def client_context(ca): client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) ca.configure_trust(client_context) return client_context You can then pass the server and client contexts from the above fixtures to :class:`~.streams.tls.TLSListener`, :meth:`~.streams.tls.TLSStream.wrap` or whatever you use on either side. .. _trustme: https://pypi.org/project/trustme/ Dealing with ragged EOFs ************************ According to the `TLS standard`_, encrypted connections should end with a closing handshake. This practice prevents so-called `truncation attacks`_. However, broadly available implementations for protocols such as HTTP, widely ignore this requirement because the protocol level closing signal would make the shutdown handshake redundant. AnyIO follows the standard by default (unlike the Python standard library's :mod:`ssl` module). The practical implication of this is that if you're implementing a protocol that is expected to skip the TLS closing handshake, you need to pass the ``standard_compatible=False`` option to :meth:`~.streams.tls.TLSStream.wrap` or :class:`~.streams.tls.TLSListener`. .. _TLS standard: https://tools.ietf.org/html/draft-ietf-tls-tls13-28 .. _truncation attacks: https://en.wikipedia.org/wiki/Transport_Layer_Security#Attacks_against_TLS/SSL anyio-3.5.0/docs/subprocesses.rst000066400000000000000000000077331416724134300170570ustar00rootroot00000000000000Using subprocesses ================== .. py:currentmodule:: anyio AnyIO allows you to run arbitrary executables in subprocesses, either as a one-shot call or by opening a process handle for you that gives you more control over the subprocess. You can either give the command as a string, in which case it is passed to your default shell (equivalent to ``shell=True`` in :func:`subprocess.run`), or as a sequence of strings (``shell=False``) in which case the executable is the first item in the sequence and the rest are arguments passed to it. .. note:: On Windows and Python 3.7 and earlier, asyncio uses :class:`~asyncio.SelectorEventLoop` by default which does not support subprocesses. It is recommended to upgrade to at least Python 3.8 to overcome this limitation. Running one-shot commands ------------------------- To run an external command with one call, use :func:`~run_process`:: from anyio import run_process, run async def main(): result = await run_process('ps') print(result.stdout.decode()) run(main) The snippet above runs the ``ps`` command within a shell. To run it directly:: from anyio import run_process, run async def main(): result = await run_process(['ps']) print(result.stdout.decode()) run(main) Working with processes ---------------------- When you have more complex requirements for your interaction with subprocesses, you can launch one with :func:`~open_process`:: from anyio import open_process, run from anyio.streams.text import TextReceiveStream async def main(): async with await open_process(['ps']) as process: async for text in TextReceiveStream(process.stdout): print(text) run(main) See the API documentation of :class:`~.abc.Process` for more information. .. _RunInProcess: Running functions in worker processes ------------------------------------- When you need to run CPU intensive code, worker processes are better than threads because current implementations of Python cannot run Python code in multiple threads at once. Exceptions to this rule are: #. Blocking I/O operations #. C extension code that explicitly releases the Global Interpreter Lock If the code you wish to run does not belong in this category, it's best to use worker processes instead in order to take advantage of multiple CPU cores. This is done by using :func:`.to_process.run_sync`:: import time from anyio import run, to_process def cpu_intensive_function(arg1, arg2): time.sleep(1) return arg1 + arg2 async def main(): result = await to_process.run_sync(cpu_intensive_function, 'Hello, ', 'world!') print(result) # This check is important when the application uses run_sync_in_process() if __name__ == '__main__': run(main) Technical details ***************** There are some limitations regarding the arguments and return values passed: * the arguments must be pickleable (using the highest available protocol) * the return value must be pickleable (using the highest available protocol) * the target callable must be importable (lambdas and inner functions won't work) Other considerations: * Even ``cancellable=False`` runs can be cancelled before the request has been sent to the worker process * If a cancellable call is cancelled during execution on the worker process, the worker process will be killed * The worker process imports the parent's ``__main__`` module, so guarding for any import time side effects using ``if __name__ == '__main__':`` is required to avoid infinite recursion * ``sys.stdin`` and ``sys.stdout``, ``sys.stderr`` are redirected to ``/dev/null`` so :func:`print` and :func:`input` won't work * Worker processes terminate after 5 minutes of inactivity, or when the event loop is finished * On asyncio, either :func:`asyncio.run` or :func:`anyio.run` must be used for proper cleanup to happen * Multiprocessing-style synchronization primitives are currently not available anyio-3.5.0/docs/support.rst000066400000000000000000000013421416724134300160410ustar00rootroot00000000000000Getting help ============ If you are having trouble with AnyIO, make sure you've first checked the :doc:`FAQ ` to see if your question is answered there. If not, you have a couple ways for getting support: * Post a question on `Stack Overflow`_ and use the ``anyio`` tag * Join the `python-trio/AnyIO`_ room on Gitter .. _Stack Overflow: https://stackoverflow.com/ .. _python-trio/AnyIO: https://gitter.im/python-trio/AnyIO Reporting bugs ============== If you're fairly certain that you have discovered a bug, you can `file an issue`_ on Github. If you feel unsure, come talk to us first! The issue tracker is **not** the proper venue for asking support questions. .. _file an issue: https://github.com/agronholm/anyio/issues anyio-3.5.0/docs/synchronization.rst000066400000000000000000000113351416724134300175710ustar00rootroot00000000000000Using synchronization primitives ================================ .. py:currentmodule:: anyio Synchronization primitives are objects that are used by tasks to communicate and coordinate with each other. They are useful for things like distributing workload, notifying other tasks and guarding access to shared resources. .. note:: AnyIO primitives are not thread-safe, therefore they should not be used directly from worker threads. Use :func:`~from_thread.run_sync` for that. Events ------ Events are used to notify tasks that something they've been waiting to happen has happened. An event object can have multiple listeners and they are all notified when the event is triggered. Example:: from anyio import Event, create_task_group, run async def notify(event): event.set() async def main(): event = Event() async with create_task_group() as tg: tg.start_soon(notify, event) await event.wait() print('Received notification!') run(main) .. note:: Unlike standard library Events, AnyIO events cannot be reused, and must be replaced instead. This practice prevents a class of race conditions, and matches the semantics of the trio library. Semaphores ---------- Semaphores are used for limiting access to a shared resource. A semaphore starts with a maximum value, which is decremented each time the semaphore is acquired by a task and incremented when it is released. If the value drops to zero, any attempt to acquire the semaphore will block until another task frees it. Example:: from anyio import Semaphore, create_task_group, sleep, run async def use_resource(tasknum, semaphore): async with semaphore: print('Task number', tasknum, 'is now working with the shared resource') await sleep(1) async def main(): semaphore = Semaphore(2) async with create_task_group() as tg: for num in range(10): tg.start_soon(use_resource, num, semaphore) run(main) Locks ----- Locks are used to guard shared resources to ensure sole access to a single task at once. They function much like semaphores with a maximum value of 1, except that only the task that acquired the lock is allowed to release it. Example:: from anyio import Lock, create_task_group, sleep, run async def use_resource(tasknum, lock): async with lock: print('Task number', tasknum, 'is now working with the shared resource') await sleep(1) async def main(): lock = Lock() async with create_task_group() as tg: for num in range(4): tg.start_soon(use_resource, num, lock) run(main) Conditions ---------- A condition is basically a combination of an event and a lock. It first acquires a lock and then waits for a notification from the event. Once the condition receives a notification, it releases the lock. The notifying task can also choose to wake up more than one listener at once, or even all of them. Like :class:`Lock`, :class:`Condition` also requires that the task which locked it also the one to release it. Example:: from anyio import Condition, create_task_group, sleep, run async def listen(tasknum, condition): async with condition: await condition.wait() print('Woke up task number', tasknum) async def main(): condition = Condition() async with create_task_group() as tg: for tasknum in range(6): tg.start_soon(listen, tasknum, condition) await sleep(1) async with condition: condition.notify(1) await sleep(1) async with condition: condition.notify(2) await sleep(1) async with condition: condition.notify_all() run(main) Capacity limiters ----------------- Capacity limiters are like semaphores except that a single borrower (the current task by default) can only hold a single token at a time. It is also possible to borrow a token on behalf of any arbitrary object, so long as that object is hashable. Example:: from anyio import CapacityLimiter, create_task_group, sleep, run async def use_resource(tasknum, limiter): async with limiter: print('Task number', tasknum, 'is now working with the shared resource') await sleep(1) async def main(): limiter = CapacityLimiter(2) async with create_task_group() as tg: for num in range(10): tg.start_soon(use_resource, num, limiter) run(main) You can adjust the total number of tokens by setting a different value on the limiter's ``total_tokens`` property. anyio-3.5.0/docs/tasks.rst000066400000000000000000000110661416724134300154560ustar00rootroot00000000000000Creating and managing tasks =========================== .. py:currentmodule:: anyio A *task* is a unit of execution that lets you do many things concurrently that need waiting on. This works so that while you can have any number of tasks, the asynchronous event loop can only run one of them at a time. When the task encounters an ``await`` statement that requires the task to sleep until something happens, the event loop is then free to work on another task. When the thing the first task was waiting is complete, the event loop will resume the execution of that task on the first opportunity it gets. Task handling in AnyIO loosely follows the trio_ model. Tasks can be created (*spawned*) using *task groups*. A task group is an asynchronous context manager that makes sure that all its child tasks are finished one way or another after the context block is exited. If a child task, or the code in the enclosed context block raises an exception, all child tasks are cancelled. Otherwise the context manager just waits until all child tasks have exited before proceeding. Here's a demonstration:: from anyio import sleep, create_task_group, run async def sometask(num): print('Task', num, 'running') await sleep(1) print('Task', num, 'finished') async def main(): async with create_task_group() as tg: for num in range(5): tg.start_soon(sometask, num) print('All tasks finished!') run(main) .. _trio: https://trio.readthedocs.io/en/latest/reference-core.html#tasks-let-you-do-multiple-things-at-once Starting and initializing tasks ------------------------------- Sometimes it is very useful to be able to wait until a task has successfully initialized itself. For example, when starting network services, you can have your task start the listener and then signal the caller that initialization is done. That way, the caller can now start another task that depends on that service being up and running. Also, if the socket bind fails or something else goes wrong during initialization, the exception will be propagated to the caller which can then catch and handle it. This can be done with :meth:`TaskGroup.start() <.abc.TaskGroup.start>`:: from anyio import TASK_STATUS_IGNORED, create_task_group, connect_tcp, create_tcp_listener, run from anyio.abc import TaskStatus async def handler(stream): ... async def start_some_service(port: int, *, task_status: TaskStatus = TASK_STATUS_IGNORED): async with await create_tcp_listener(local_host='127.0.0.1', local_port=port) as listener: task_status.started() await listener.serve(handler) async def main(): async with create_task_group() as tg: await tg.start(start_some_service, 5000) async with await connect_tcp('127.0.0.1', 5000) as stream: ... run(main) The target coroutine function **must** call ``task_status.started()`` because the task that is calling with :meth:`TaskGroup.start() <.abc.TaskGroup.start>` will be blocked until then. If the spawned task never calls it, then the :meth:`TaskGroup.start() <.abc.TaskGroup.start>` call will raise a ``RuntimeError``. .. note:: Unlike :meth:`~.abc.TaskGroup.start_soon`, :meth:`~.abc.TaskGroup.start` needs an ``await``. Handling multiple errors in a task group ---------------------------------------- It is possible for more than one task to raise an exception in a task group. This can happen when a task reacts to cancellation by entering either an exception handler block or a ``finally:`` block and raises an exception there. This raises the question: which exception is propagated from the task group context manager? The answer is "both". In practice this means that a special exception, :exc:`~ExceptionGroup` is raised which contains both exception objects. Unfortunately this complicates any code that wishes to catch a specific exception because it could be wrapped in an :exc:`~ExceptionGroup`. Context propagation ------------------- Whenever a new task is spawned, `context`_ will be copied to the new task. It is important to note *which* content will be copied to the newly spawned task. It is not the context of the task group's host task that will be copied, but the context of the task that calls :meth:`TaskGroup.start() <.abc.TaskGroup.start>` or :meth:`TaskGroup.start_soon() <.abc.TaskGroup.start_soon>`. .. note:: Context propagation **does not work** on asyncio when using Python 3.6, as asyncio support for this only landed in v3.7. .. _context: https://docs.python.org/3/library/contextvars.html anyio-3.5.0/docs/testing.rst000066400000000000000000000120141416724134300160000ustar00rootroot00000000000000Testing with AnyIO ================== AnyIO provides built-in support for testing your library or application in the form of a pytest_ plugin. .. _pytest: https://docs.pytest.org/en/latest/ Creating asynchronous tests --------------------------- Pytest does not natively support running asynchronous test functions, so they have to be marked for the AnyIO pytest plugin to pick them up. This can be done in one of two ways: #. Using the ``pytest.mark.anyio`` marker #. Using the ``anyio_backend`` fixture, either directly or via another fixture The simplest way is thus the following:: import pytest # This is the same as using the @pytest.mark.anyio on all test functions in the module pytestmark = pytest.mark.anyio async def test_something(): ... Marking modules, classes or functions with this marker has the same effect as applying the ``pytest.mark.usefixtures('anyio_backend')`` on them. Thus, you can also require the fixture directly in your tests and fixtures:: import pytest async def test_something(anyio_backend): ... Specifying the backends to run on --------------------------------- The ``anyio_backend`` fixture determines the backends and their options that tests and fixtures are run with. The AnyIO pytest plugin comes with a function scoped fixture with this name which runs everything on all supported backends. If you change the backends/options for the entire project, then put something like this in your top level ``conftest.py``:: @pytest.fixture def anyio_backend(): return 'asyncio' If you want to specify different options for the selected backend, you can do so by passing a tuple of (backend name, options dict):: @pytest.fixture(params=[ pytest.param(('asyncio', {'use_uvloop': True}), id='asyncio+uvloop'), pytest.param(('asyncio', {'use_uvloop': False}), id='asyncio'), pytest.param(('trio', {'restrict_keyboard_interrupt_to_checkpoints': True}), id='trio') ]) def anyio_backend(request): return request.param If you need to run a single test on a specific backend, you can use ``@pytest.mark.parametrize`` (remember to add the ``anyio_backend`` parameter to the actual test function, or pytest will complain):: @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_on_asyncio_only(anyio_backend): ... Because the ``anyio_backend`` fixture can return either a string or a tuple, there are two additional function-scoped fixtures (which themselves depend on the ``anyio_backend`` fixture) provided for your convenience: * ``anyio_backend_name``: the name of the backend (e.g. ``asyncio``) * ``anyio_backend_options``: the dictionary of option keywords used to run the backend Asynchronous fixtures --------------------- The plugin also supports coroutine functions as fixtures, for the purpose of setting up and tearing down asynchronous services used for tests. There are two ways to get the AnyIO pytest plugin to run your asynchronous fixtures: #. Use them in AnyIO enabled tests (see the first section) #. Use the ``anyio_backend`` fixture (or any other fixture using it) in the fixture itself The simplest way is using the first option:: import pytest pytestmark = pytest.mark.anyio @pytest.fixture async def server(): server = await setup_server() yield server await server.shutdown() async def test_server(server): result = await server.do_something() assert result == 'foo' For ``autouse=True`` fixtures, you may need to use the other approach:: @pytest.fixture(autouse=True) async def server(anyio_backend): server = await setup_server() yield await server.shutdown() async def test_server(): result = await client.do_something_on_the_server() assert result == 'foo' Using async fixtures with higher scopes --------------------------------------- For async fixtures with scopes other than ``function``, you will need to define your own ``anyio_backend`` fixture because the default ``anyio_backend`` fixture is function scoped:: @pytest.fixture(scope='module') def anyio_backend(): return 'asyncio' @pytest.fixture(scope='module') async def server(anyio_backend): server = await setup_server() yield await server.shutdown() Technical details ----------------- The fixtures and tests are run by a "test runner", implemented separately for each backend. The test runner keeps an event loop open during the request, making it possible for code in fixtures to communicate with the code in the tests (and each other). The test runner is created when the first matching async test or fixture is about to be run, and shut down when that same fixture is being torn down or the test has finished running. As such, if no async fixtures are used, a separate test runner is created for each test. Conversely, if even one async fixture (scoped higher than ``function``) is shared across all tests, only one test runner will be created during the test session. anyio-3.5.0/docs/threads.rst000066400000000000000000000146541416724134300157710ustar00rootroot00000000000000Working with threads ==================== .. py:currentmodule:: anyio Practical asynchronous applications occasionally need to run network, file or computationally expensive operations. Such operations would normally block the asynchronous event loop, leading to performance issues. The solution is to run such code in *worker threads*. Using worker threads lets the event loop continue running other tasks while the worker thread runs the blocking call. .. caution:: Do not spawn too many threads, as the context switching overhead may cause your system to slow down to a crawl. A few dozen threads should be fine, but hundreds are probably bad. Consider using AnyIO's semaphores to limit the maximum number of threads. Running a function in a worker thread ------------------------------------- To run a (synchronous) callable in a worker thread:: import time from anyio import to_thread, run async def main(): await to_thread.run_sync(time.sleep, 5) run(main) By default, tasks are shielded from cancellation while they are waiting for a worker thread to finish. You can pass the ``cancellable=True`` parameter to allow such tasks to be cancelled. Note, however, that the thread will still continue running – only its outcome will be ignored. .. seealso:: :ref:`RunInProcess` Calling asynchronous code from a worker thread ---------------------------------------------- If you need to call a coroutine function from a worker thread, you can do this:: from anyio import from_thread, sleep, to_thread, run def blocking_function(): from_thread.run(sleep, 5) async def main(): await to_thread.run_sync(blocking_function) run(main) .. note:: The worker thread must have been spawned using :func:`~run_sync_in_worker_thread` for this to work. Calling synchronous code from a worker thread --------------------------------------------- Occasionally you may need to call synchronous code in the event loop thread from a worker thread. Common cases include setting asynchronous events or sending data to a memory object stream. Because these methods aren't thread safe, you need to arrange them to be called inside the event loop thread using :func:`~from_thread.run_sync`:: import time from anyio import Event, from_thread, to_thread, run def worker(event): time.sleep(1) from_thread.run_sync(event.set) async def main(): event = Event() await to_thread.run_sync(worker, event) await event.wait() run(main) Calling asynchronous code from an external thread ------------------------------------------------- If you need to run async code from a thread that is not a worker thread spawned by the event loop, you need a *blocking portal*. This needs to be obtained from within the event loop thread. One way to do this is to start a new event loop with a portal, using :func:`~start_blocking_portal` (which takes mostly the same arguments as :func:`~run`:: from anyio.from_thread import start_blocking_portal with start_blocking_portal(backend='trio') as portal: portal.call(...) If you already have an event loop running and wish to grant access to external threads, you can create a :class:`~.BlockingPortal` directly:: from anyio import run from anyio.from_thread import BlockingPortal async def main(): async with BlockingPortal() as portal: # ...hand off the portal to external threads... await portal.sleep_until_stopped() anyio.run(main) Spawning tasks from worker threads ---------------------------------- When you need to spawn a task to be run in the background, you can do so using :meth:`~.BlockingPortal.start_task_soon`:: from concurrent.futures import as_completed from anyio import sleep from anyio.from_thread import start_blocking_portal async def long_running_task(index): await sleep(1) print(f'Task {index} running...') await sleep(index) return f'Task {index} return value' with start_blocking_portal() as portal: futures = [portal.start_task_soon(long_running_task, i) for i in range(1, 5)] for future in as_completed(futures): print(future.result()) Cancelling tasks spawned this way can be done by cancelling the returned :class:`~concurrent.futures.Future`. Blocking portals also have a method similar to :meth:`TaskGroup.start() <.abc.TaskGroup.start>`: :meth:`~.BlockingPortal.start_task` which, like its counterpart, waits for the callable to signal readiness by calling ``task_status.started()``:: from anyio import sleep, TASK_STATUS_IGNORED from anyio.from_thread import start_blocking_portal async def service_task(*, task_status=TASK_STATUS_IGNORED): task_status.started('STARTED') await sleep(1) return 'DONE' with start_blocking_portal() as portal: future, start_value = portal.start_task(service_task) print('Task has started with value', start_value) return_value = future.result() print('Task has finished with return value', return_value) Using asynchronous context managers from worker threads ------------------------------------------------------- You can use :meth:`~.BlockingPortal.wrap_async_context_manager` to wrap an asynchronous context managers as a synchronous one:: from anyio.from_thread import start_blocking_portal class AsyncContextManager: async def __aenter__(self): print('entering') async def __aexit__(self, exc_type, exc_val, exc_tb): print('exiting with', exc_type) async_cm = AsyncContextManager() with start_blocking_portal() as portal, portal.wrap_async_context_manager(async_cm): print('inside the context manager block') .. note:: You cannot use wrapped async context managers in synchronous callbacks inside the event loop thread. Context propagation ------------------- When running functions in worker threads, the current context is copied to the worker thread. Therefore any context variables available on the task will also be available to the code running on the thread. As always with context variables, any changes made to them will not propagate back to the calling asynchronous task. When calling asynchronous code from worker threads, context is again copied to the task that calls the target function in the event loop thread. Note, however, that this **does not work** on asyncio when running on Python 3.6. anyio-3.5.0/docs/typedattrs.rst000066400000000000000000000047631416724134300165420ustar00rootroot00000000000000Using typed attributes ====================== .. py:currentmodule:: anyio On AnyIO, streams and listeners can be layered on top of each other to provide extra functionality. But when you want to look up information from one of the layers down below, you might have to traverse the entire chain to find what you're looking for, which is highly inconvenient. To address this, AnyIO has a system of *typed attributes* where you can look for a specific attribute by its unique key. If a stream or listener wrapper does not have the attribute you're looking for, it will look it up in the wrapped instance, and that wrapper can look in its wrapped instance and so on, until the attribute is either found or the end of the chain is reached. This also lets wrappers override attributes from the wrapped objects when necessary. A common use case is finding the IP address of the remote side of a TCP connection when the stream may be either :class:`~.abc.SocketStream` or :class:`~.streams.tls.TLSStream`:: from anyio import connect_tcp async def connect(host, port, tls: bool): stream = await connect_tcp(host, port, tls=tls) print('Connected to', stream.extra(SocketAttribute.remote_address)) Each typed attribute provider class should document the set of attributes it provides on its own. Defining your own typed attributes ---------------------------------- By convention, typed attributes are stored together in a container class with other attributes of the same category:: from anyio import TypedAttribute, TypedAttributeSet class MyTypedAttribute: string_valued_attribute = TypedAttribute[str]() some_float_attribute = TypedAttribute[float]() To provide values for these attributes, implement the :meth:`~.TypedAttributeProvider.extra_attributes` property in your class:: from anyio import TypedAttributeProvider class MyAttributeProvider(TypedAttributeProvider): def extra_attributes(): return { MyTypedAttribute.string_valued_attribute: lambda: 'my attribute value', MyTypedAttribute.some_float_attribute: lambda: 6.492 } If your class inherits from another typed attribute provider, make sure you include its attributes in the return value:: class AnotherAttributeProvider(MyAttributeProvider): def extra_attributes(): return { **super().extra_attributes, MyTypedAttribute.string_valued_attribute: lambda: 'overridden attribute value' } anyio-3.5.0/docs/versionhistory.rst000066400000000000000000000734321416724134300174450ustar00rootroot00000000000000Version history =============== This library adheres to `Semantic Versioning 2.0 `_. **3.5.0** - Added ``start_new_session`` keyword argument to ``run_process()`` and ``open_process()`` (PR by Jordan Speicher) - Fixed deadlock in synchronization primitives on asyncio which can happen if a task acquiring a primitive is hit with a native (not AnyIO) cancellation with just the right timing, leaving the next acquiring task waiting forever (`#398 `_) - Added workaround for bpo-46313_ to enable compatibility with OpenSSL 3.0 .. _bpo-46313: https://bugs.python.org/issue46313 **3.4.0** - Added context propagation to/from worker threads in ``to_thread.run_sync()``, ``from_thread.run()`` and ``from_thread.run_sync()`` (`#363 `_; partially based on a PR by Sebastián Ramírez) **NOTE**: Requires Python 3.7 to work properly on asyncio! - Fixed race condition in ``Lock`` and ``Semaphore`` classes when a task waiting on ``acquire()`` is cancelled while another task is waiting to acquire the same primitive (`#387 `_) - Fixed async context manager's ``__aexit__()`` method not being called in ``BlockingPortal.wrap_async_context_manager()`` if the host task is cancelled (`#381 `_; PR by Jonathan Slenders) - Fixed worker threads being marked as being event loop threads in sniffio - Fixed task parent ID not getting set to the correct value on asyncio - Enabled the test suite to run without IPv6 support, trio or pytest plugin autoloading **3.3.4** - Fixed ``BrokenResourceError`` instead of ``EndOfStream`` being raised in ``TLSStream`` when the peer abruptly closes the connection while ``TLSStream`` is receiving data with ``standard_compatible=False`` set **3.3.3** - Fixed UNIX socket listener not setting accepted sockets to non-blocking mode on asyncio - Changed unconnected UDP sockets to be always bound to a local port (on "any" interface) to avoid errors on asyncio + Windows **3.3.2** - Fixed cancellation problem on asyncio where level-triggered cancellation for **all** parent cancel scopes would not resume after exiting a shielded nested scope (`#370 `_) **3.3.1** - Added missing documentation for the ``ExceptionGroup.exceptions`` attribute - Changed the asyncio test runner not to use uvloop by default (to match the behavior of ``anyio.run()``) - Fixed ``RuntimeError`` on asyncio when a ``CancelledError`` is raised from a task spawned through a ``BlockingPortal`` (`#357 `_) - Fixed asyncio warning about a ``Future`` with an exception that was never retrieved which happened when a socket was already written to but the peer abruptly closed the connection **3.3.0** - Added asynchronous ``Path`` class - Added the ``wrap_file()`` function for wrapping existing files as asynchronous file objects - Relaxed the type of the ``path`` initializer argument to ``FileReadStream`` and ``FileWriteStream`` so they accept any path-like object (including the new asynchronous ``Path`` class) - Dropped unnecessary dependency on the ``async_generator`` library - Changed the generics in ``AsyncFile`` so that the methods correctly return either ``str`` or ``bytes`` based on the argument to ``open_file()`` - Fixed an asyncio bug where under certain circumstances, a stopping worker thread would still accept new assignments, leading to a hang **3.2.1** - Fixed idle thread pruning on asyncio sometimes causing an expired worker thread to be assigned a task **3.2.0** - Added Python 3.10 compatibility - Added the ability to close memory object streams synchronously (including support for use as a synchronous context manager) - Changed the default value of the ``use_uvloop`` asyncio backend option to ``False`` to prevent unsafe event loop policy changes in different threads - Fixed ``to_thread.run_sync()`` hanging on the second call on asyncio when used with ``loop.run_until_complete()`` - Fixed ``to_thread.run_sync()`` prematurely marking a worker thread inactive when a task await on the result is cancelled - Fixed ``ResourceWarning`` about an unclosed socket when UNIX socket connect fails on asyncio - Fixed the type annotation of ``open_signal_receiver()`` as a synchronous context manager - Fixed the type annotation of ``DeprecatedAwaitable(|List|Float).__await__`` to match the ``typing.Awaitable`` protocol **3.1.0** - Added ``env`` and ``cwd`` keyword arguments to ``run_process()`` and ``open_process``. - Added support for mutation of ``CancelScope.shield`` (PR by John Belmonte) - Added the ``sleep_forever()`` and ``sleep_until()`` functions - Changed asyncio task groups so that if the host and child tasks have only raised ``CancelledErrors``, just one ``CancelledError`` will now be raised instead of an ``ExceptionGroup``, allowing asyncio to ignore it when it propagates out of the task - Changed task names to be converted to ``str`` early on asyncio (PR by Thomas Grainger) - Fixed ``sniffio._impl.AsyncLibraryNotFoundError: unknown async library, or not in async context`` on asyncio and Python 3.6 when ``to_thread.run_sync()`` is used from ``loop.run_until_complete()`` - Fixed odd ``ExceptionGroup: 0 exceptions were raised in the task group`` appearing under certain circumstances on asyncio - Fixed ``wait_all_tasks_blocked()`` returning prematurely on asyncio when a previously blocked task is cancelled (PR by Thomas Grainger) - Fixed declared return type of ``TaskGroup.start()`` (it was declared as ``None``, but anything can be returned from it) - Fixed ``TextStream.extra_attributes`` raising ``AttributeError`` (PR by Thomas Grainger) - Fixed ``await maybe_async(current_task())`` returning ``None`` (PR by Thomas Grainger) - Fixed: ``pickle.dumps(current_task())`` now correctly raises ``TypeError`` instead of pickling to ``None`` (PR by Thomas Grainger) - Fixed return type annotation of ``Event.wait()`` (``bool`` → ``None``) (PR by Thomas Grainger) - Fixed return type annotation of ``RunVar.get()`` to return either the type of the default value or the type of the contained value (PR by Thomas Grainger) - Fixed a deprecation warning message to refer to ``maybe_async()`` and not ``maybe_awaitable()`` (PR by Thomas Grainger) - Filled in argument and return types for all functions and methods previously missing them (PR by Thomas Grainger) **3.0.1** - Fixed ``to_thread.run_sync()`` raising ``RuntimeError`` on asyncio when no "root" task could be found for setting up a cleanup callback. This was a problem at least on Tornado and possibly also Twisted in asyncio compatibility mode. The life of worker threads is now bound to the the host task of the topmost cancel scope hierarchy starting from the current one, or if no cancel scope is active, the current task. **3.0.0** - Curio support has been dropped (see the :doc:`FAQ ` as for why) - API changes: * **BACKWARDS INCOMPATIBLE** Submodules under ``anyio.abc.`` have been made private (use only ``anyio.abc`` from now on). * **BACKWARDS INCOMPATIBLE** The following method was previously a coroutine method and has been converted into a synchronous one: * ``MemoryObjectReceiveStream.receive_nowait()`` * The following functions and methods are no longer asynchronous but can still be awaited on (doing so will emit a deprecation warning): * ``current_time()`` * ``current_effective_deadline()`` * ``get_current_task()`` * ``get_running_tasks()`` * ``CancelScope.cancel()`` * ``CapacityLimiter.acquire_nowait()`` * ``CapacityLimiter.acquire_on_behalf_of_nowait()`` * ``Condition.release()`` * ``Event.set()`` * ``Lock.release()`` * ``MemoryObjectSendStream.send_nowait()`` * ``Semaphore.release()`` * The following functions now return synchronous context managers instead of asynchronous context managers (and emit deprecation warnings if used as async context managers): * ``fail_after()`` * ``move_on_after()`` * ``open_cancel_scope()`` (now just ``CancelScope()``; see below) * ``open_signal_receiver()`` * The following functions and methods have been renamed/moved (will now emit deprecation warnings when you use them by their old names): * ``create_blocking_portal()`` → ``anyio.from_thread.BlockingPortal()`` * ``create_capacity_limiter()`` → ``anyio.CapacityLimiter()`` * ``create_event()`` → ``anyio.Event()`` * ``create_lock()`` → ``anyio.Lock()`` * ``create_condition()`` → ``anyio.Condition()`` * ``create_semaphore()`` → ``anyio.Semaphore()`` * ``current_default_worker_thread_limiter()`` → ``anyio.to_thread.current_default_thread_limiter()`` * ``open_cancel_scope()`` → ``anyio.CancelScope()`` * ``run_sync_in_worker_thread()`` → ``anyio.to_thread.run_sync()`` * ``run_async_from_thread()`` → ``anyio.from_thread.run()`` * ``run_sync_from_thread()`` → ``anyio.from_thread.run_sync()`` * ``BlockingPortal.spawn_task`` → ``BlockingPortal.start_task_soon`` * ``CapacityLimiter.set_total_tokens()`` → ``limiter.total_tokens = ...`` * ``TaskGroup.spawn()`` → ``TaskGroup.start_soon()`` * **BACKWARDS INCOMPATIBLE** ``start_blocking_portal()`` must now be used as a context manager (it no longer returns a BlockingPortal, but a context manager that yields one) * **BACKWARDS INCOMPATIBLE** The ``BlockingPortal.stop_from_external_thread()`` method (use ``portal.call(portal.stop)`` instead now) * **BACKWARDS INCOMPATIBLE** The ``SocketStream`` and ``SocketListener`` classes were made non-generic * Made all non-frozen dataclasses hashable with ``eq=False`` * Removed ``__slots__`` from ``BlockingPortal`` See the :doc:`migration documentation ` for instructions on how to deal with these changes. - Improvements to running synchronous code: * Added the ``run_sync_from_thread()`` function * Added the ``run_sync_in_process()`` function for running code in worker processes (big thanks to Richard Sheridan for his help on this one!) - Improvements to sockets and streaming: * Added the ``UNIXSocketStream`` class which is capable of sending and receiving file descriptors * Added the ``FileReadStream`` and ``FileWriteStream`` classes * ``create_unix_listener()`` now removes any existing socket at the given path before proceeding (instead of raising ``OSError: Address already in use``) - Improvements to task groups and cancellation: * Added the ``TaskGroup.start()`` method and a corresponding ``BlockingPortal.start_task()`` method * Added the ``name`` argument to ``BlockingPortal.start_task_soon()`` (renamed from ``BlockingPortal.spawn_task()``) * Changed ``CancelScope.deadline`` to be writable * Added the following functions in the ``anyio.lowlevel`` module: * ``checkpoint()`` * ``checkpoint_if_cancelled()`` * ``cancel_shielded_checkpoint()`` - Improvements and changes to synchronization primitives: * Added the ``Lock.acquire_nowait()``, ``Condition.acquire_nowait()`` and ``Semaphore.acquire_nowait()`` methods * Added the ``statistics()`` method to ``Event``, ``Lock``, ``Condition``, ``Semaphore``, ``CapacityLimiter``, ``MemoryObjectReceiveStream`` and ``MemoryObjectSendStream`` * ``Lock`` and ``Condition`` can now only be released by the task that acquired them. This behavior is now consistent on all backends whereas previously only Trio enforced this. * The ``CapacityLimiter.total_tokens`` property is now writable and ``CapacityLimiter.set_total_tokens()`` has been deprecated * Added the ``max_value`` property to ``Semaphore`` - Asyncio specific improvements (big thanks to Thomas Grainger for his effort on most of these!): * Cancel scopes are now properly enforced with native asyncio coroutine functions (without any explicit AnyIO checkpoints) * Changed the asyncio ``CancelScope`` to raise a ``RuntimeError`` if a cancel scope is being exited before it was even entered * Changed the asyncio test runner to capture unhandled exceptions from asynchronous callbacks and unbound native tasks which are then raised after the test function (or async fixture setup or teardown) completes * Changed the asyncio ``TaskGroup.start_soon()`` (formerly ``spawn()``) method to call the target function immediately before starting the task, for consistency across backends * Changed the asyncio ``TaskGroup.start_soon()`` (formerly ``spawn()``) method to avoid the use of a coroutine wrapper on Python 3.8+ and added a hint for hiding the wrapper in tracebacks on earlier Pythons (supported by Pytest, Sentry etc.) * Changed the default thread limiter on asyncio to use a ``RunVar`` so it is scoped to the current event loop, thus avoiding potential conflict among multiple running event loops * Thread pooling is now used on asyncio with ``run_sync_in_worker_thread()`` * Fixed ``current_effective_deadline()`` raising ``KeyError`` on asyncio when no cancel scope is active - Added the ``RunVar`` class for scoping variables to the running event loop **2.2.0** - Added the ``maybe_async()`` and ``maybe_async_cm()`` functions to facilitate forward compatibility with AnyIO 3 - Fixed socket stream bug on asyncio where receiving a half-close from the peer would shut down the entire connection - Fixed native task names not being set on asyncio on Python 3.8+ - Fixed ``TLSStream.send_eof()`` raising ``ValueError`` instead of the expected ``NotImplementedError`` - Fixed ``open_signal_receiver()`` on asyncio and curio hanging if the cancel scope was cancelled before the function could run - Fixed Trio test runner causing unwarranted test errors on ``BaseException`` (PR by Matthias Urlichs) - Fixed formatted output of ``ExceptionGroup`` containing too many newlines **2.1.0** - Added the ``spawn_task()`` and ``wrap_async_context_manager()`` methods to ``BlockingPortal`` - Added the ``handshake_timeout`` and ``error_handler`` parameters to ``TLSListener`` - Fixed ``Event`` objects on the trio backend not inheriting from ``anyio.abc.Event`` - Fixed ``run_sync_in_worker_thread()`` raising ``UnboundLocalError`` on asyncio when cancelled - Fixed ``send()`` on socket streams not raising any exception on asyncio, and an unwrapped ``BrokenPipeError`` on trio and curio when the peer has disconnected - Fixed ``MemoryObjectSendStream.send()`` raising ``BrokenResourceError`` when the last receiver is closed right after receiving the item - Fixed ``ValueError: Invalid file descriptor: -1`` when closing a ``SocketListener`` on asyncio **2.0.2** - Fixed one more case of ``AttributeError: 'async_generator_asend' object has no attribute 'cr_await'`` on asyncio **2.0.1** - Fixed broken ``MultiListener.extra()`` (PR by daa) - Fixed ``TLSStream`` returning an empty bytes object instead of raising ``EndOfStream`` when trying to receive from the stream after a closing handshake - Fixed ``AttributeError`` when cancelling a task group's scope inside an async test fixture on asyncio - Fixed ``wait_all_tasks_blocked()`` raising ``AttributeError`` on asyncio if a native task is waiting on an async generator's ``asend()`` method **2.0.0** - General new features: - Added support for subprocesses - Added support for "blocking portals" which allow running functions in the event loop thread from external threads - Added the ``anyio.aclose_forcefully()`` function for closing asynchronous resources as quickly as possible - General changes/fixes: - **BACKWARDS INCOMPATIBLE** Some functions have been renamed or removed (see further below for socket/fileio API changes): - ``finalize()`` → (removed; use ``contextlib.aclosing()`` instead) - ``receive_signals()`` → ``open_signal_receiver()`` - ``run_in_thread()`` → ``run_sync_in_worker_thread()`` - ``current_default_thread_limiter()`` → ``current_default_worker_thread_limiter()`` - ``ResourceBusyError`` → ``BusyResourceError`` - **BACKWARDS INCOMPATIBLE** Exception classes were moved to the top level package - Dropped support for Python 3.5 - Bumped minimum versions of trio and curio to v0.16 and v1.4, respectively - Changed the ``repr()`` of ``ExceptionGroup`` to match trio's ``MultiError`` - Backend specific changes and fixes: - ``asyncio``: Added support for ``ProactorEventLoop``. This allows asyncio applications to use AnyIO on Windows even without using AnyIO as the entry point. - ``asyncio``: The asyncio backend now uses ``asyncio.run()`` behind the scenes which properly shuts down async generators and cancels any leftover native tasks - ``curio``: Worked around the limitation where a task can only be cancelled twice (any cancellations beyond that were ignored) - ``asyncio`` + ``curio``: a cancellation check now calls ``sleep(0)``, allowing the scheduler to switch to a different task - ``asyncio`` + ``curio``: Host name resolution now uses `IDNA 2008`_ (with UTS 46 compatibility mapping, just like trio) - ``asyncio`` + ``curio``: Fixed a bug where a task group would abandon its subtasks if its own cancel scope was cancelled while it was waiting for subtasks to finish - ``asyncio`` + ``curio``: Fixed recursive tracebacks when a single exception from an inner task group is reraised in an outer task group - Socket/stream changes: - **BACKWARDS INCOMPATIBLE** The stream class structure was completely overhauled. There are now separate abstract base classes for receive and send streams, byte streams and reliable and unreliable object streams. Stream wrappers are much better supported by this new ABC structure and a new "typed extra attribute" system that lets you query the wrapper chain for the attributes you want via ``.extra(...)``. - **BACKWARDS INCOMPATIBLE** Socket server functionality has been refactored into a network-agnostic listener system - **BACKWARDS INCOMPATIBLE** TLS functionality has been split off from ``SocketStream`` and can now work over any bidirectional bytes-based stream – you can now establish a TLS encrypted communications pathway over UNIX sockets or even memory object streams. The ``TLSRequired`` exception has also been removed as it is no longer necessary. - **BACKWARDS INCOMPATIBLE** Buffering functionality (``receive_until()`` and ``receive_exactly()``) was split off from ``SocketStream`` into a stream wrapper class (``anyio.streams.buffered.BufferedByteReceiveStream``) - **BACKWARDS INCOMPATIBLE** IPv6 addresses are now reported as 2-tuples. If original 4-tuple form contains a nonzero scope ID, it is appended to the address with ``%`` as the separator. - **BACKWARDS INCOMPATIBLE** Byte streams (including socket streams) now raise ``EndOfStream`` instead of returning an empty bytes object when the stream has been closed from the other end - **BACKWARDS INCOMPATIBLE** The socket API has changes: - ``create_tcp_server()`` → ``create_tcp_listener()`` - ``create_unix_server()`` → ``create_unix_listener()`` - ``create_udp_socket()`` had some of its parameters changed: - ``interface`` → ``local_address`` - ``port`` → ``local_port`` - ``reuse_address`` was replaced with ``reuse_port`` (and sets ``SO_REUSEPORT`` instead of ``SO_REUSEADDR``) - ``connect_tcp()`` had some of its parameters changed: - ``address`` → ``remote_address`` - ``port`` → ``remote_port`` - ``bind_host`` → ``local_address`` - ``bind_port`` → (removed) - ``autostart_tls`` → ``tls`` - ``tls_hostname`` (new parameter, when you want to match the certificate against against something else than ``remote_address``) - ``connect_tcp()`` now returns a ``TLSStream`` if TLS was enabled - ``notify_socket_closing()`` was removed, as it is no longer used by AnyIO - ``SocketStream`` has changes to its methods and attributes: - ``address`` → ``.extra(SocketAttribute.local_address)`` - ``alpn_protocol`` → ``.extra(TLSAttribute.alpn_protocol)`` - ``close()`` → ``aclose()`` - ``get_channel_binding`` → ``.extra(TLSAttribute.channel_binding_tls_unique)`` - ``cipher`` → ``.extra(TLSAttribute.cipher)`` - ``getpeercert`` → ``.extra(SocketAttribute.peer_certificate)`` or ``.extra(SocketAttribute.peer_certificate_binary)`` - ``getsockopt()`` → ``.extra(SocketAttribute.raw_socket).getsockopt(...)`` - ``peer_address`` → ``.extra(SocketAttribute.remote_address)`` - ``receive_chunks()`` → (removed; use ``async for`` on the stream instead) - ``receive_delimited_chunks()`` → (removed) - ``receive_exactly()`` → ``BufferedReceiveStream.receive_exactly()`` - ``receive_some()`` → ``receive()`` - ``receive_until()`` → ``BufferedReceiveStream.receive_until()`` - ``send_all()`` → ``send()`` - ``setsockopt()`` → ``.extra(SocketAttribute.raw_socket).setsockopt(...)`` - ``shared_ciphers`` → ``.extra(TLSAttribute.shared_ciphers)`` - ``server_side`` → ``.extra(TLSAttribute.server_side)`` - ``start_tls()`` → ``stream = TLSStream.wrap(...)`` - ``tls_version`` → ``.extra(TLSAttribute.tls_version)`` - ``UDPSocket`` has changes to its methods and attributes: - ``address`` → ``.extra(SocketAttribute.local_address)`` - ``getsockopt()`` → ``.extra(SocketAttribute.raw_socket).getsockopt(...)`` - ``port`` → ``.extra(SocketAttribute.local_port)`` - ``receive()`` no longer takes a maximum bytes argument - ``receive_packets()`` → (removed; use ``async for`` on the UDP socket instead) - ``send()`` → requires a tuple for destination now (address, port), for compatibility with the new ``UnreliableObjectStream`` interface. The ``sendto()`` method works like the old ``send()`` method. - ``setsockopt()`` → ``.extra(SocketAttribute.raw_socket).setsockopt(...)`` - **BACKWARDS INCOMPATIBLE** Renamed the ``max_size`` parameter to ``max_bytes`` wherever it occurred (this was inconsistently named ``max_bytes`` in some subclasses before) - Added memory object streams as a replacement for queues - Added stream wrappers for encoding/decoding unicode strings - Support for the ``SO_REUSEPORT`` option (allows binding more than one socket to the same address/port combination, as long as they all have this option set) has been added to TCP listeners and UDP sockets - The ``send_eof()`` method was added to all (bidirectional) streams - File I/O changes: - **BACKWARDS INCOMPATIBLE** Asynchronous file I/O functionality now uses a common code base (``anyio.AsyncFile``) instead of backend-native classes - **BACKWARDS INCOMPATIBLE** The File I/O API has changes to its functions and methods: - ``aopen()`` → ``open_file()`` - ``AsyncFileclose()`` → ``AsyncFileaclose()`` - Task synchronization changes: - **BACKWARDS INCOMPATIBLE** Queues were replaced by memory object streams - **BACKWARDS INCOMPATIBLE** Added the ``acquire()`` and ``release()`` methods to the ``Lock``, ``Condition`` and ``Semaphore`` classes - **BACKWARDS INCOMPATIBLE** Removed the ``Event.clear()`` method. You must now replace the event object with a new one rather than clear the old one. - Fixed ``Condition.wait()`` not working on asyncio and curio (PR by Matt Westcott) - Testing changes: - **BACKWARDS INCOMPATIBLE** Removed the ``--anyio-backends`` command line option for the pytest plugin. Use the ``-k`` option to do ad-hoc filtering, and the ``anyio_backend`` fixture to control which backends you wish to run the tests by default. - The pytest plugin was refactored to run the test and all its related async fixtures inside the same event loop, making async fixtures much more useful - Fixed Hypothesis support in the pytest plugin (it was not actually running the Hypothesis tests at all) .. _IDNA 2008: https://tools.ietf.org/html/rfc5895 **1.4.0** - Added async name resolution functions (``anyio.getaddrinfo()`` and ``anyio.getnameinfo()``) - Added the ``family`` and ``reuse_address`` parameters to ``anyio.create_udp_socket()`` (Enables multicast support; test contributed by Matthias Urlichs) - Fixed ``fail.after(0)`` not raising a timeout error on asyncio and curio - Fixed ``move_on_after()`` and ``fail_after()`` getting stuck on curio in some circumstances - Fixed socket operations not allowing timeouts to cancel the task - Fixed API documentation on ``Stream.receive_until()`` which claimed that the delimiter will be included in the returned data when it really isn't - Harmonized the default task names across all backends - ``wait_all_tasks_blocked()`` no longer considers tasks waiting on ``sleep(0)`` to be blocked on asyncio and curio - Fixed the type of the ``address`` parameter in ``UDPSocket.send()`` to include ``IPAddress`` objects (which were already supported by the backing implementation) - Fixed ``UDPSocket.send()`` to resolve host names using ``anyio.getaddrinfo()`` before calling ``socket.sendto()`` to avoid blocking on synchronous name resolution - Switched to using ``anyio.getaddrinfo()`` for name lookups **1.3.1** - Fixed warnings caused by trio 0.15 - Worked around a compatibility issue between uvloop and Python 3.9 (missing ``shutdown_default_executor()`` method) **1.3.0** - Fixed compatibility with Curio 1.0 - Made it possible to assert fine grained control over which AnyIO backends and backend options are being used with each test - Added the ``address`` and ``peer_address`` properties to the ``SocketStream`` interface **1.2.3** - Repackaged release (v1.2.2 contained extra files from an experimental branch which broke imports) **1.2.2** - Fixed ``CancelledError`` leaking from a cancel scope on asyncio if the task previously received a cancellation exception - Fixed ``AttributeError`` when cancelling a generator-based task (asyncio) - Fixed ``wait_all_tasks_blocked()`` not working with generator-based tasks (asyncio) - Fixed an unnecessary delay in ``connect_tcp()`` if an earlier attempt succeeds - Fixed ``AssertionError`` in ``connect_tcp()`` if multiple connection attempts succeed simultaneously **1.2.1** - Fixed cancellation errors leaking from a task group when they are contained in an exception group - Fixed trio v0.13 compatibility on Windows - Fixed inconsistent queue capacity across backends when capacity was defined as 0 (trio = 0, others = infinite) - Fixed socket creation failure crashing ``connect_tcp()`` **1.2.0** - Added the possibility to parametrize regular pytest test functions against the selected list of backends - Added the ``set_total_tokens()`` method to ``CapacityLimiter`` - Added the ``anyio.current_default_thread_limiter()`` function - Added the ``cancellable`` parameter to ``anyio.run_in_thread()`` - Implemented the Happy Eyeballs (:rfc:`6555`) algorithm for ``anyio.connect_tcp()`` - Fixed ``KeyError`` on asyncio and curio where entering and exiting a cancel scope happens in different tasks - Fixed deprecation warnings on Python 3.8 about the ``loop`` argument of ``asyncio.Event()`` - Forced the use ``WindowsSelectorEventLoopPolicy`` in ``asyncio.run`` when on Windows and asyncio to keep network functionality working - Worker threads are now spawned with ``daemon=True`` on all backends, not just trio - Dropped support for trio v0.11 **1.1.0** - Added the ``lock`` parameter to ``anyio.create_condition()`` (PR by Matthias Urlichs) - Added async iteration for queues (PR by Matthias Urlichs) - Added capacity limiters - Added the possibility of using capacity limiters for limiting the maximum number of threads - Fixed compatibility with trio v0.12 - Fixed IPv6 support in ``create_tcp_server()``, ``connect_tcp()`` and ``create_udp_socket()`` - Fixed mishandling of task cancellation while the task is running a worker thread on asyncio and curio **1.0.0** - Fixed pathlib2_ compatibility with ``anyio.aopen()`` - Fixed timeouts not propagating from nested scopes on asyncio and curio (PR by Matthias Urlichs) - Fixed incorrect call order in socket close notifications on asyncio (mostly affecting Windows) - Prefixed backend module names with an underscore to better indicate privateness .. _pathlib2: https://pypi.org/project/pathlib2/ **1.0.0rc2** - Fixed some corner cases of cancellation where behavior on asyncio and curio did not match with that of trio. Thanks to Joshua Oreman for help with this. - Fixed ``current_effective_deadline()`` not taking shielded cancellation scopes into account on asyncio and curio - Fixed task cancellation not happening right away on asyncio and curio when a cancel scope is entered when the deadline has already passed - Fixed exception group containing only cancellation exceptions not being swallowed by a timed out cancel scope on asyncio and curio - Added the ``current_time()`` function - Replaced ``CancelledError`` with ``get_cancelled_exc_class()`` - Added support for Hypothesis_ - Added support for :pep:`561` - Use uvloop for the asyncio backend by default when available (but only on CPython) .. _Hypothesis: https://hypothesis.works/ **1.0.0rc1** - Fixed ``setsockopt()`` passing options to the underlying method in the wrong manner - Fixed cancellation propagation from nested task groups - Fixed ``get_running_tasks()`` returning tasks from other event loops - Added the ``parent_id`` attribute to ``anyio.TaskInfo`` - Added the ``get_current_task()`` function - Added guards to protect against concurrent read/write from/to sockets by multiple tasks - Added the ``notify_socket_close()`` function **1.0.0b2** - Added introspection of running tasks via ``anyio.get_running_tasks()`` - Added the ``getsockopt()`` and ``setsockopt()`` methods to the ``SocketStream`` API - Fixed mishandling of large buffers by ``BaseSocket.sendall()`` - Fixed compatibility with (and upgraded minimum required version to) trio v0.11 **1.0.0b1** - Initial release anyio-3.5.0/pyproject.toml000066400000000000000000000026041416724134300155610ustar00rootroot00000000000000[build-system] requires = [ "setuptools >= 42", "wheel >= 0.29.0", "setuptools_scm[toml] >= 3.4" ] build-backend = "setuptools.build_meta" [tool.setuptools_scm] version_scheme = "post-release" local_scheme = "dirty-tag" [tool.isort] src_paths = ["src"] skip_gitignore = true line_length = 99 multi_line_output = 4 [tool.autopep8] max_line_length = 99 [tool.flake8] max-line-length = 99 [tool.mypy] python_version = "3.9" strict = true ignore_missing_imports = true disallow_any_generics = false warn_return_any = false disallow_untyped_decorators = false disallow_subclassing_any = false show_error_codes = true [tool.pytest.ini_options] addopts = "-rsx --tb=short --strict-config --strict-markers -p anyio -p no:asyncio" testpaths = ["tests"] # Ignore resource warnings due to a CPython/Windows bug (https://bugs.python.org/issue44428) filterwarnings = [ "error", "ignore:unclosed = 3.6.2 zip_safe = False install_requires = contextvars; python_version < '3.7' dataclasses; python_version < '3.7' idna >= 2.8 sniffio >= 1.1 typing_extensions; python_version < '3.8' [options.packages.find] where = src [options.package_data] anyio = py.typed [options.extras_require] test = mock >= 4; python_version < '3.8' contextlib2; python_version < '3.7' coverage[toml] >= 4.5 hypothesis >= 4.0 pytest >= 6.0 pytest-mock >= 3.6.1 trustme uvloop < 0.15; python_version < '3.7' and (platform_python_implementation == 'CPython' and platform_system != 'Windows') uvloop >= 0.15; python_version >= '3.7' and (platform_python_implementation == 'CPython' and platform_system != 'Windows') trio = trio >= 0.16 doc = packaging sphinx_rtd_theme sphinx-autodoc-typehints >= 1.2.0 [options.entry_points] pytest11 = anyio = anyio.pytest_plugin anyio-3.5.0/setup.py000066400000000000000000000001471416724134300143570ustar00rootroot00000000000000from setuptools import setup setup( use_scm_version=True, setup_requires=['setuptools_scm'] ) anyio-3.5.0/src/000077500000000000000000000000001416724134300134325ustar00rootroot00000000000000anyio-3.5.0/src/anyio/000077500000000000000000000000001416724134300145515ustar00rootroot00000000000000anyio-3.5.0/src/anyio/__init__.py000066400000000000000000000074031416724134300166660ustar00rootroot00000000000000__all__ = ( 'maybe_async', 'maybe_async_cm', 'run', 'sleep', 'sleep_forever', 'sleep_until', 'current_time', 'get_all_backends', 'get_cancelled_exc_class', 'BrokenResourceError', 'BrokenWorkerProcess', 'BusyResourceError', 'ClosedResourceError', 'DelimiterNotFound', 'EndOfStream', 'ExceptionGroup', 'IncompleteRead', 'TypedAttributeLookupError', 'WouldBlock', 'AsyncFile', 'Path', 'open_file', 'wrap_file', 'aclose_forcefully', 'open_signal_receiver', 'connect_tcp', 'connect_unix', 'create_tcp_listener', 'create_unix_listener', 'create_udp_socket', 'create_connected_udp_socket', 'getaddrinfo', 'getnameinfo', 'wait_socket_readable', 'wait_socket_writable', 'create_memory_object_stream', 'run_process', 'open_process', 'create_lock', 'CapacityLimiter', 'CapacityLimiterStatistics', 'Condition', 'ConditionStatistics', 'Event', 'EventStatistics', 'Lock', 'LockStatistics', 'Semaphore', 'SemaphoreStatistics', 'create_condition', 'create_event', 'create_semaphore', 'create_capacity_limiter', 'open_cancel_scope', 'fail_after', 'move_on_after', 'current_effective_deadline', 'TASK_STATUS_IGNORED', 'CancelScope', 'create_task_group', 'TaskInfo', 'get_current_task', 'get_running_tasks', 'wait_all_tasks_blocked', 'run_sync_in_worker_thread', 'run_async_from_thread', 'run_sync_from_thread', 'current_default_worker_thread_limiter', 'create_blocking_portal', 'start_blocking_portal', 'typed_attribute', 'TypedAttributeSet', 'TypedAttributeProvider' ) from typing import Any from ._core._compat import maybe_async, maybe_async_cm from ._core._eventloop import ( current_time, get_all_backends, get_cancelled_exc_class, run, sleep, sleep_forever, sleep_until) from ._core._exceptions import ( BrokenResourceError, BrokenWorkerProcess, BusyResourceError, ClosedResourceError, DelimiterNotFound, EndOfStream, ExceptionGroup, IncompleteRead, TypedAttributeLookupError, WouldBlock) from ._core._fileio import AsyncFile, Path, open_file, wrap_file from ._core._resources import aclose_forcefully from ._core._signals import open_signal_receiver from ._core._sockets import ( connect_tcp, connect_unix, create_connected_udp_socket, create_tcp_listener, create_udp_socket, create_unix_listener, getaddrinfo, getnameinfo, wait_socket_readable, wait_socket_writable) from ._core._streams import create_memory_object_stream from ._core._subprocesses import open_process, run_process from ._core._synchronization import ( CapacityLimiter, CapacityLimiterStatistics, Condition, ConditionStatistics, Event, EventStatistics, Lock, LockStatistics, Semaphore, SemaphoreStatistics, create_capacity_limiter, create_condition, create_event, create_lock, create_semaphore) from ._core._tasks import ( TASK_STATUS_IGNORED, CancelScope, create_task_group, current_effective_deadline, fail_after, move_on_after, open_cancel_scope) from ._core._testing import TaskInfo, get_current_task, get_running_tasks, wait_all_tasks_blocked from ._core._typedattr import TypedAttributeProvider, TypedAttributeSet, typed_attribute # Re-exported here, for backwards compatibility # isort: off from .to_thread import current_default_worker_thread_limiter, run_sync_in_worker_thread from .from_thread import ( create_blocking_portal, run_async_from_thread, run_sync_from_thread, start_blocking_portal) # Re-export imports so they look like they live directly in this package key: str value: Any for key, value in list(locals().items()): if getattr(value, '__module__', '').startswith('anyio.'): value.__module__ = __name__ anyio-3.5.0/src/anyio/_backends/000077500000000000000000000000001416724134300164625ustar00rootroot00000000000000anyio-3.5.0/src/anyio/_backends/__init__.py000066400000000000000000000000001416724134300205610ustar00rootroot00000000000000anyio-3.5.0/src/anyio/_backends/_asyncio.py000066400000000000000000002015331416724134300206440ustar00rootroot00000000000000import array import asyncio import concurrent.futures import math import socket import sys from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined] from collections import OrderedDict, deque from concurrent.futures import Future from contextvars import Context, copy_context from dataclasses import dataclass from functools import partial, wraps from inspect import ( CORO_RUNNING, CORO_SUSPENDED, GEN_RUNNING, GEN_SUSPENDED, getcoroutinestate, getgeneratorstate) from io import IOBase from os import PathLike from queue import Queue from socket import AddressFamily, SocketKind from threading import Thread from types import TracebackType from typing import ( Any, Awaitable, Callable, Collection, Coroutine, Deque, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Type, TypeVar, Union, cast) from weakref import WeakKeyDictionary import sniffio from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable from .._core._eventloop import claim_worker_thread, threadlocals from .._core._exceptions import ( BrokenResourceError, BusyResourceError, ClosedResourceError, EndOfStream) from .._core._exceptions import ExceptionGroup as BaseExceptionGroup from .._core._exceptions import WouldBlock from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter from .._core._synchronization import Event as BaseEvent from .._core._synchronization import ResourceGuard from .._core._tasks import CancelScope as BaseCancelScope from ..abc import IPSockAddrType, UDPPacketType from ..lowlevel import RunVar if sys.version_info >= (3, 8): get_coro = asyncio.Task.get_coro else: def get_coro(task: asyncio.Task) -> Union[Generator, Awaitable[Any]]: return task._coro if sys.version_info >= (3, 7): from asyncio import all_tasks, create_task, current_task, get_running_loop from asyncio import run as native_run def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]: return [cb for cb, context in task._callbacks] # type: ignore[attr-defined] else: _T = TypeVar('_T') def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]: return task._callbacks def native_run(main, *, debug=False): # Snatched from Python 3.7 from asyncio import coroutines, events, tasks def _cancel_all_tasks(loop): to_cancel = all_tasks(loop) if not to_cancel: return for task in to_cancel: task.cancel() loop.run_until_complete( tasks.gather(*to_cancel, loop=loop, return_exceptions=True)) for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: loop.call_exception_handler({ 'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, }) if events._get_running_loop() is not None: raise RuntimeError( "asyncio.run() cannot be called from a running event loop") if not coroutines.iscoroutine(main): raise ValueError(f"a coroutine was expected, got {main!r}") loop = events.new_event_loop() try: events.set_event_loop(loop) loop.set_debug(debug) return loop.run_until_complete(main) finally: try: _cancel_all_tasks(loop) loop.run_until_complete(loop.shutdown_asyncgens()) finally: events.set_event_loop(None) loop.close() def create_task(coro: Union[Generator[Any, None, _T], Awaitable[_T]], *, name: object = None) -> asyncio.Task: return get_running_loop().create_task(coro) def get_running_loop() -> asyncio.AbstractEventLoop: loop = asyncio._get_running_loop() if loop is not None: return loop else: raise RuntimeError('no running event loop') def all_tasks(loop: Optional[asyncio.AbstractEventLoop] = None) -> Set[asyncio.Task]: """Return a set of all tasks for the loop.""" from asyncio import Task if loop is None: loop = get_running_loop() return {t for t in Task.all_tasks(loop) if not t.done()} def current_task(loop: Optional[asyncio.AbstractEventLoop] = None) -> Optional[asyncio.Task]: if loop is None: loop = get_running_loop() return asyncio.Task.current_task(loop) T_Retval = TypeVar('T_Retval') # Check whether there is native support for task names in asyncio (3.8+) _native_task_names = hasattr(asyncio.Task, 'get_name') _root_task: RunVar[Optional[asyncio.Task]] = RunVar('_root_task') def find_root_task() -> asyncio.Task: root_task = _root_task.get(None) if root_task is not None and not root_task.done(): return root_task # Look for a task that has been started via run_until_complete() for task in all_tasks(): if task._callbacks and not task.done(): for cb in _get_task_callbacks(task): if (cb is _run_until_complete_cb or getattr(cb, '__module__', None) == 'uvloop.loop'): _root_task.set(task) return task # Look up the topmost task in the AnyIO task tree, if possible task = cast(asyncio.Task, current_task()) state = _task_states.get(task) if state: cancel_scope = state.cancel_scope while cancel_scope and cancel_scope._parent_scope is not None: cancel_scope = cancel_scope._parent_scope if cancel_scope is not None: return cast(asyncio.Task, cancel_scope._host_task) return task def get_callable_name(func: Callable) -> str: module = getattr(func, '__module__', None) qualname = getattr(func, '__qualname__', None) return '.'.join([x for x in (module, qualname) if x]) # # Event loop # _run_vars = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] current_token = get_running_loop def _task_started(task: asyncio.Task) -> bool: """Return ``True`` if the task has been started and has not finished.""" coro = get_coro(task) try: return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED) except AttributeError: try: return getgeneratorstate(cast(Generator, coro)) in (GEN_RUNNING, GEN_SUSPENDED) except AttributeError: # task coro is async_genenerator_asend https://bugs.python.org/issue37771 raise Exception(f"Cannot determine if task {task} has started or not") def _maybe_set_event_loop_policy(policy: Optional[asyncio.AbstractEventLoopPolicy], use_uvloop: bool) -> None: # On CPython, use uvloop when possible if no other policy has been given and if not # explicitly disabled if policy is None and use_uvloop and sys.implementation.name == 'cpython': try: import uvloop except ImportError: pass else: # Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier) if (not hasattr(asyncio.AbstractEventLoop, 'shutdown_default_executor') or hasattr(uvloop.loop.Loop, 'shutdown_default_executor')): policy = uvloop.EventLoopPolicy() if policy is not None: asyncio.set_event_loop_policy(policy) def run(func: Callable[..., Awaitable[T_Retval]], *args: object, debug: bool = False, use_uvloop: bool = False, policy: Optional[asyncio.AbstractEventLoopPolicy] = None) -> T_Retval: @wraps(func) async def wrapper() -> T_Retval: task = cast(asyncio.Task, current_task()) task_state = TaskState(None, get_callable_name(func), None) _task_states[task] = task_state if _native_task_names: task.set_name(task_state.name) try: return await func(*args) finally: del _task_states[task] _maybe_set_event_loop_policy(policy, use_uvloop) return native_run(wrapper(), debug=debug) # # Miscellaneous # sleep = asyncio.sleep # # Timeouts and cancellation # CancelledError = asyncio.CancelledError class CancelScope(BaseCancelScope): def __new__(cls, *, deadline: float = math.inf, shield: bool = False) -> "CancelScope": return object.__new__(cls) def __init__(self, deadline: float = math.inf, shield: bool = False): self._deadline = deadline self._shield = shield self._parent_scope: Optional[CancelScope] = None self._cancel_called = False self._active = False self._timeout_handle: Optional[asyncio.TimerHandle] = None self._cancel_handle: Optional[asyncio.Handle] = None self._tasks: Set[asyncio.Task] = set() self._host_task: Optional[asyncio.Task] = None self._timeout_expired = False def __enter__(self) -> "CancelScope": if self._active: raise RuntimeError( "Each CancelScope may only be used for a single 'with' block" ) self._host_task = host_task = cast(asyncio.Task, current_task()) self._tasks.add(host_task) try: task_state = _task_states[host_task] except KeyError: task_name = host_task.get_name() if _native_task_names else None task_state = TaskState(None, task_name, self) _task_states[host_task] = task_state else: self._parent_scope = task_state.cancel_scope task_state.cancel_scope = self self._timeout() self._active = True return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: if not self._active: raise RuntimeError('This cancel scope is not active') if current_task() is not self._host_task: raise RuntimeError('Attempted to exit cancel scope in a different task than it was ' 'entered in') assert self._host_task is not None host_task_state = _task_states.get(self._host_task) if host_task_state is None or host_task_state.cancel_scope is not self: raise RuntimeError("Attempted to exit a cancel scope that isn't the current tasks's " "current cancel scope") self._active = False if self._timeout_handle: self._timeout_handle.cancel() self._timeout_handle = None self._tasks.remove(self._host_task) host_task_state.cancel_scope = self._parent_scope # Restart the cancellation effort in the farthest directly cancelled parent scope if this # one was shielded if self._shield: self._deliver_cancellation_to_parent() if exc_val is not None: exceptions = exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val] if all(isinstance(exc, CancelledError) for exc in exceptions): if self._timeout_expired: return True elif not self._cancel_called: # Task was cancelled natively return None elif not self._parent_cancelled(): # This scope was directly cancelled return True return None def _timeout(self) -> None: if self._deadline != math.inf: loop = get_running_loop() if loop.time() >= self._deadline: self._timeout_expired = True self.cancel() else: self._timeout_handle = loop.call_at(self._deadline, self._timeout) def _deliver_cancellation(self) -> None: """ Deliver cancellation to directly contained tasks and nested cancel scopes. Schedule another run at the end if we still have tasks eligible for cancellation. """ should_retry = False current = current_task() for task in self._tasks: if task._must_cancel: # type: ignore[attr-defined] continue # The task is eligible for cancellation if it has started and is not in a cancel # scope shielded from this one cancel_scope = _task_states[task].cancel_scope while cancel_scope is not self: if cancel_scope is None or cancel_scope._shield: break else: cancel_scope = cancel_scope._parent_scope else: should_retry = True if task is not current and (task is self._host_task or _task_started(task)): task.cancel() # Schedule another callback if there are still tasks left if should_retry: self._cancel_handle = get_running_loop().call_soon(self._deliver_cancellation) else: self._cancel_handle = None def _deliver_cancellation_to_parent(self) -> None: """Start cancellation effort in the farthest directly cancelled parent scope""" scope = self._parent_scope scope_to_cancel: Optional[CancelScope] = None while scope is not None: if scope._cancel_called and scope._cancel_handle is None: scope_to_cancel = scope # No point in looking beyond any shielded scope if scope._shield: break scope = scope._parent_scope if scope_to_cancel is not None: scope_to_cancel._deliver_cancellation() def _parent_cancelled(self) -> bool: # Check whether any parent has been cancelled cancel_scope = self._parent_scope while cancel_scope is not None and not cancel_scope._shield: if cancel_scope._cancel_called: return True else: cancel_scope = cancel_scope._parent_scope return False def cancel(self) -> DeprecatedAwaitable: if not self._cancel_called: if self._timeout_handle: self._timeout_handle.cancel() self._timeout_handle = None self._cancel_called = True self._deliver_cancellation() return DeprecatedAwaitable(self.cancel) @property def deadline(self) -> float: return self._deadline @deadline.setter def deadline(self, value: float) -> None: self._deadline = float(value) if self._timeout_handle is not None: self._timeout_handle.cancel() self._timeout_handle = None if self._active and not self._cancel_called: self._timeout() @property def cancel_called(self) -> bool: return self._cancel_called @property def shield(self) -> bool: return self._shield @shield.setter def shield(self, value: bool) -> None: if self._shield != value: self._shield = value if not value: self._deliver_cancellation_to_parent() async def checkpoint() -> None: await sleep(0) async def checkpoint_if_cancelled() -> None: task = current_task() if task is None: return try: cancel_scope = _task_states[task].cancel_scope except KeyError: return while cancel_scope: if cancel_scope.cancel_called: await sleep(0) elif cancel_scope.shield: break else: cancel_scope = cancel_scope._parent_scope async def cancel_shielded_checkpoint() -> None: with CancelScope(shield=True): await sleep(0) def current_effective_deadline() -> float: try: cancel_scope = _task_states[current_task()].cancel_scope # type: ignore[index] except KeyError: return math.inf deadline = math.inf while cancel_scope: deadline = min(deadline, cancel_scope.deadline) if cancel_scope.shield: break else: cancel_scope = cancel_scope._parent_scope return deadline def current_time() -> float: return get_running_loop().time() # # Task states # class TaskState: """ Encapsulates auxiliary task information that cannot be added to the Task instance itself because there are no guarantees about its implementation. """ __slots__ = 'parent_id', 'name', 'cancel_scope' def __init__(self, parent_id: Optional[int], name: Optional[str], cancel_scope: Optional[CancelScope]): self.parent_id = parent_id self.name = name self.cancel_scope = cancel_scope _task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState] # # Task groups # class ExceptionGroup(BaseExceptionGroup): def __init__(self, exceptions: List[BaseException]): super().__init__() self.exceptions = exceptions class _AsyncioTaskStatus(abc.TaskStatus): def __init__(self, future: asyncio.Future, parent_id: int): self._future = future self._parent_id = parent_id def started(self, value: object = None) -> None: try: self._future.set_result(value) except asyncio.InvalidStateError: raise RuntimeError("called 'started' twice on the same task status") from None task = cast(asyncio.Task, current_task()) _task_states[task].parent_id = self._parent_id class TaskGroup(abc.TaskGroup): def __init__(self) -> None: self.cancel_scope: CancelScope = CancelScope() self._active = False self._exceptions: List[BaseException] = [] async def __aenter__(self) -> "TaskGroup": self.cancel_scope.__enter__() self._active = True return self async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb) if exc_val is not None: self.cancel_scope.cancel() self._exceptions.append(exc_val) while self.cancel_scope._tasks: try: await asyncio.wait(self.cancel_scope._tasks) except asyncio.CancelledError: self.cancel_scope.cancel() self._active = False if not self.cancel_scope._parent_cancelled(): exceptions = self._filter_cancellation_errors(self._exceptions) else: exceptions = self._exceptions try: if len(exceptions) > 1: if all(isinstance(e, CancelledError) and not e.args for e in exceptions): # Tasks were cancelled natively, without a cancellation message raise CancelledError else: raise ExceptionGroup(exceptions) elif exceptions and exceptions[0] is not exc_val: raise exceptions[0] except BaseException as exc: # Clear the context here, as it can only be done in-flight. # If the context is not cleared, it can result in recursive tracebacks (see #145). exc.__context__ = None raise return ignore_exception @staticmethod def _filter_cancellation_errors(exceptions: Sequence[BaseException]) -> List[BaseException]: filtered_exceptions: List[BaseException] = [] for exc in exceptions: if isinstance(exc, ExceptionGroup): new_exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions) if len(new_exceptions) > 1: filtered_exceptions.append(exc) elif len(new_exceptions) == 1: filtered_exceptions.append(new_exceptions[0]) elif new_exceptions: new_exc = ExceptionGroup(new_exceptions) new_exc.__cause__ = exc.__cause__ new_exc.__context__ = exc.__context__ new_exc.__traceback__ = exc.__traceback__ filtered_exceptions.append(new_exc) elif not isinstance(exc, CancelledError) or exc.args: filtered_exceptions.append(exc) return filtered_exceptions async def _run_wrapped_task( self, coro: Coroutine, task_status_future: Optional[asyncio.Future]) -> None: # This is the code path for Python 3.6 and 3.7 on which asyncio freaks out if a task raises # a BaseException. __traceback_hide__ = __tracebackhide__ = True # noqa: F841 task = cast(asyncio.Task, current_task()) try: await coro except BaseException as exc: if task_status_future is None or task_status_future.done(): self._exceptions.append(exc) self.cancel_scope.cancel() else: task_status_future.set_exception(exc) else: if task_status_future is not None and not task_status_future.done(): task_status_future.set_exception( RuntimeError('Child exited without calling task_status.started()')) finally: if task in self.cancel_scope._tasks: self.cancel_scope._tasks.remove(task) del _task_states[task] def _spawn(self, func: Callable[..., Coroutine], args: tuple, name: object, task_status_future: Optional[asyncio.Future] = None) -> asyncio.Task: def task_done(_task: asyncio.Task) -> None: # This is the code path for Python 3.8+ assert _task in self.cancel_scope._tasks self.cancel_scope._tasks.remove(_task) del _task_states[_task] try: exc = _task.exception() except CancelledError as e: while isinstance(e.__context__, CancelledError): e = e.__context__ exc = e if exc is not None: if task_status_future is None or task_status_future.done(): self._exceptions.append(exc) self.cancel_scope.cancel() else: task_status_future.set_exception(exc) elif task_status_future is not None and not task_status_future.done(): task_status_future.set_exception( RuntimeError('Child exited without calling task_status.started()')) if not self._active: raise RuntimeError('This task group is not active; no new tasks can be started.') options = {} name = get_callable_name(func) if name is None else str(name) if _native_task_names: options['name'] = name kwargs = {} if task_status_future: parent_id = id(current_task()) kwargs['task_status'] = _AsyncioTaskStatus(task_status_future, id(self.cancel_scope._host_task)) else: parent_id = id(self.cancel_scope._host_task) coro = func(*args, **kwargs) if not asyncio.iscoroutine(coro): raise TypeError(f'Expected an async function, but {func} appears to be synchronous') foreign_coro = not hasattr(coro, 'cr_frame') and not hasattr(coro, 'gi_frame') if foreign_coro or sys.version_info < (3, 8): coro = self._run_wrapped_task(coro, task_status_future) task = create_task(coro, **options) if not foreign_coro and sys.version_info >= (3, 8): task.add_done_callback(task_done) # Make the spawned task inherit the task group's cancel scope _task_states[task] = TaskState(parent_id=parent_id, name=name, cancel_scope=self.cancel_scope) self.cancel_scope._tasks.add(task) return task def start_soon(self, func: Callable[..., Coroutine], *args: object, name: object = None) -> None: self._spawn(func, args, name) async def start(self, func: Callable[..., Coroutine], *args: object, name: object = None) -> None: future: asyncio.Future = asyncio.Future() task = self._spawn(func, args, name, future) # If the task raises an exception after sending a start value without a switch point # between, the task group is cancelled and this method never proceeds to process the # completed future. That's why we have to have a shielded cancel scope here. with CancelScope(shield=True): try: return await future except CancelledError: task.cancel() raise # # Threads # _Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]] class WorkerThread(Thread): MAX_IDLE_TIME = 10 # seconds def __init__(self, root_task: asyncio.Task, workers: Set['WorkerThread'], idle_workers: Deque['WorkerThread']): super().__init__(name='AnyIO worker thread') self.root_task = root_task self.workers = workers self.idle_workers = idle_workers self.loop = root_task._loop self.queue: Queue[Union[Tuple[Context, Callable, tuple, asyncio.Future], None]] = Queue(2) self.idle_since = current_time() self.stopping = False def _report_result(self, future: asyncio.Future, result: Any, exc: Optional[BaseException]) -> None: self.idle_since = current_time() if not self.stopping: self.idle_workers.append(self) if not future.cancelled(): if exc is not None: future.set_exception(exc) else: future.set_result(result) def run(self) -> None: with claim_worker_thread('asyncio'): threadlocals.loop = self.loop while True: item = self.queue.get() if item is None: # Shutdown command received return context, func, args, future = item if not future.cancelled(): result = None exception: Optional[BaseException] = None try: result = context.run(func, *args) except BaseException as exc: exception = exc if not self.loop.is_closed(): self.loop.call_soon_threadsafe( self._report_result, future, result, exception) self.queue.task_done() def stop(self, f: Optional[asyncio.Task] = None) -> None: self.stopping = True self.queue.put_nowait(None) self.workers.discard(self) try: self.idle_workers.remove(self) except ValueError: pass _threadpool_idle_workers: RunVar[Deque[WorkerThread]] = RunVar('_threadpool_idle_workers') _threadpool_workers: RunVar[Set[WorkerThread]] = RunVar('_threadpool_workers') async def run_sync_in_worker_thread( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: Optional['CapacityLimiter'] = None) -> T_Retval: await checkpoint() # If this is the first run in this event loop thread, set up the necessary variables try: idle_workers = _threadpool_idle_workers.get() workers = _threadpool_workers.get() except LookupError: idle_workers = deque() workers = set() _threadpool_idle_workers.set(idle_workers) _threadpool_workers.set(workers) async with (limiter or current_default_thread_limiter()): with CancelScope(shield=not cancellable): future: asyncio.Future = asyncio.Future() root_task = find_root_task() if not idle_workers: worker = WorkerThread(root_task, workers, idle_workers) worker.start() workers.add(worker) root_task.add_done_callback(worker.stop) else: worker = idle_workers.pop() # Prune any other workers that have been idle for MAX_IDLE_TIME seconds or longer now = current_time() while idle_workers: if now - idle_workers[0].idle_since < WorkerThread.MAX_IDLE_TIME: break expired_worker = idle_workers.popleft() expired_worker.root_task.remove_done_callback(expired_worker.stop) expired_worker.stop() context = copy_context() context.run(sniffio.current_async_library_cvar.set, None) worker.queue.put_nowait((context, func, args, future)) return await future def run_sync_from_thread(func: Callable[..., T_Retval], *args: object, loop: Optional[asyncio.AbstractEventLoop] = None) -> T_Retval: @wraps(func) def wrapper() -> None: try: f.set_result(func(*args)) except BaseException as exc: f.set_exception(exc) if not isinstance(exc, Exception): raise f: concurrent.futures.Future[T_Retval] = Future() loop = loop or threadlocals.loop if sys.version_info < (3, 7): loop.call_soon_threadsafe(copy_context().run, wrapper) else: loop.call_soon_threadsafe(wrapper) return f.result() def run_async_from_thread( func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object ) -> T_Retval: f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe( func(*args), threadlocals.loop) return f.result() class BlockingPortal(abc.BlockingPortal): def __new__(cls) -> "BlockingPortal": return object.__new__(cls) def __init__(self) -> None: super().__init__() self._loop = get_running_loop() def _spawn_task_from_thread(self, func: Callable, args: tuple, kwargs: Dict[str, Any], name: object, future: Future) -> None: run_sync_from_thread( partial(self._task_group.start_soon, name=name), self._call_func, func, args, kwargs, future, loop=self._loop) # # Subprocesses # @dataclass(eq=False) class StreamReaderWrapper(abc.ByteReceiveStream): _stream: asyncio.StreamReader async def receive(self, max_bytes: int = 65536) -> bytes: data = await self._stream.read(max_bytes) if data: return data else: raise EndOfStream async def aclose(self) -> None: self._stream.feed_eof() @dataclass(eq=False) class StreamWriterWrapper(abc.ByteSendStream): _stream: asyncio.StreamWriter async def send(self, item: bytes) -> None: self._stream.write(item) await self._stream.drain() async def aclose(self) -> None: self._stream.close() @dataclass(eq=False) class Process(abc.Process): _process: asyncio.subprocess.Process _stdin: Optional[StreamWriterWrapper] _stdout: Optional[StreamReaderWrapper] _stderr: Optional[StreamReaderWrapper] async def aclose(self) -> None: if self._stdin: await self._stdin.aclose() if self._stdout: await self._stdout.aclose() if self._stderr: await self._stderr.aclose() await self.wait() async def wait(self) -> int: return await self._process.wait() def terminate(self) -> None: self._process.terminate() def kill(self) -> None: self._process.kill() def send_signal(self, signal: int) -> None: self._process.send_signal(signal) @property def pid(self) -> int: return self._process.pid @property def returncode(self) -> Optional[int]: return self._process.returncode @property def stdin(self) -> Optional[abc.ByteSendStream]: return self._stdin @property def stdout(self) -> Optional[abc.ByteReceiveStream]: return self._stdout @property def stderr(self) -> Optional[abc.ByteReceiveStream]: return self._stderr async def open_process(command: Union[str, Sequence[str]], *, shell: bool, stdin: int, stdout: int, stderr: int, cwd: Union[str, bytes, PathLike, None] = None, env: Optional[Mapping[str, str]] = None, start_new_session: bool = False) -> Process: await checkpoint() if shell: process = await asyncio.create_subprocess_shell( command, stdin=stdin, stdout=stdout, # type: ignore[arg-type] stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session, ) else: process = await asyncio.create_subprocess_exec(*command, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session) stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None return Process(process, stdin_stream, stdout_stream, stderr_stream) def _forcibly_shutdown_process_pool_on_exit(workers: Set[Process], _task: object) -> None: """ Forcibly shuts down worker processes belonging to this event loop.""" child_watcher: Optional[asyncio.AbstractChildWatcher] try: child_watcher = asyncio.get_event_loop_policy().get_child_watcher() except NotImplementedError: child_watcher = None # Close as much as possible (w/o async/await) to avoid warnings for process in workers: if process.returncode is None: continue process._stdin._stream._transport.close() # type: ignore[union-attr] process._stdout._stream._transport.close() # type: ignore[union-attr] process._stderr._stream._transport.close() # type: ignore[union-attr] process.kill() if child_watcher: child_watcher.remove_child_handler(process.pid) async def _shutdown_process_pool_on_exit(workers: Set[Process]) -> None: """ Shuts down worker processes belonging to this event loop. NOTE: this only works when the event loop was started using asyncio.run() or anyio.run(). """ process: Process try: await sleep(math.inf) except asyncio.CancelledError: for process in workers: if process.returncode is None: process.kill() for process in workers: await process.aclose() def setup_process_pool_exit_at_shutdown(workers: Set[Process]) -> None: kwargs = {'name': 'AnyIO process pool shutdown task'} if _native_task_names else {} create_task(_shutdown_process_pool_on_exit(workers), **kwargs) find_root_task().add_done_callback(partial(_forcibly_shutdown_process_pool_on_exit, workers)) # # Sockets and networking # class StreamProtocol(asyncio.Protocol): read_queue: Deque[bytes] read_event: asyncio.Event write_event: asyncio.Event exception: Optional[Exception] = None def connection_made(self, transport: asyncio.BaseTransport) -> None: self.read_queue = deque() self.read_event = asyncio.Event() self.write_event = asyncio.Event() self.write_event.set() cast(asyncio.Transport, transport).set_write_buffer_limits(0) def connection_lost(self, exc: Optional[Exception]) -> None: if exc: self.exception = BrokenResourceError() self.exception.__cause__ = exc self.read_event.set() self.write_event.set() def data_received(self, data: bytes) -> None: self.read_queue.append(data) self.read_event.set() def eof_received(self) -> Optional[bool]: self.read_event.set() return True def pause_writing(self) -> None: self.write_event = asyncio.Event() def resume_writing(self) -> None: self.write_event.set() class DatagramProtocol(asyncio.DatagramProtocol): read_queue: Deque[Tuple[bytes, IPSockAddrType]] read_event: asyncio.Event write_event: asyncio.Event exception: Optional[Exception] = None def connection_made(self, transport: asyncio.BaseTransport) -> None: self.read_queue = deque(maxlen=100) # arbitrary value self.read_event = asyncio.Event() self.write_event = asyncio.Event() self.write_event.set() def connection_lost(self, exc: Optional[Exception]) -> None: self.read_event.set() self.write_event.set() def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None: addr = convert_ipv6_sockaddr(addr) self.read_queue.append((data, addr)) self.read_event.set() def error_received(self, exc: Exception) -> None: self.exception = exc def pause_writing(self) -> None: self.write_event.clear() def resume_writing(self) -> None: self.write_event.set() class SocketStream(abc.SocketStream): def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol): self._transport = transport self._protocol = protocol self._receive_guard = ResourceGuard('reading from') self._send_guard = ResourceGuard('writing to') self._closed = False @property def _raw_socket(self) -> socket.socket: return self._transport.get_extra_info('socket') async def receive(self, max_bytes: int = 65536) -> bytes: with self._receive_guard: await checkpoint() if not self._protocol.read_event.is_set() and not self._transport.is_closing(): self._transport.resume_reading() await self._protocol.read_event.wait() self._transport.pause_reading() try: chunk = self._protocol.read_queue.popleft() except IndexError: if self._closed: raise ClosedResourceError from None elif self._protocol.exception: raise self._protocol.exception else: raise EndOfStream from None if len(chunk) > max_bytes: # Split the oversized chunk chunk, leftover = chunk[:max_bytes], chunk[max_bytes:] self._protocol.read_queue.appendleft(leftover) # If the read queue is empty, clear the flag so that the next call will block until # data is available if not self._protocol.read_queue: self._protocol.read_event.clear() return chunk async def send(self, item: bytes) -> None: with self._send_guard: await checkpoint() if self._closed: raise ClosedResourceError elif self._protocol.exception is not None: raise self._protocol.exception try: self._transport.write(item) except RuntimeError as exc: if self._transport.is_closing(): raise BrokenResourceError from exc else: raise await self._protocol.write_event.wait() async def send_eof(self) -> None: try: self._transport.write_eof() except OSError: pass async def aclose(self) -> None: if not self._transport.is_closing(): self._closed = True try: self._transport.write_eof() except OSError: pass self._transport.close() await sleep(0) self._transport.abort() class UNIXSocketStream(abc.SocketStream): _receive_future: Optional[asyncio.Future] = None _send_future: Optional[asyncio.Future] = None _closing = False def __init__(self, raw_socket: socket.socket): self.__raw_socket = raw_socket self._loop = get_running_loop() self._receive_guard = ResourceGuard('reading from') self._send_guard = ResourceGuard('writing to') @property def _raw_socket(self) -> socket.socket: return self.__raw_socket def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: def callback(f: object) -> None: del self._receive_future loop.remove_reader(self.__raw_socket) f = self._receive_future = asyncio.Future() self._loop.add_reader(self.__raw_socket, f.set_result, None) f.add_done_callback(callback) return f def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: def callback(f: object) -> None: del self._send_future loop.remove_writer(self.__raw_socket) f = self._send_future = asyncio.Future() self._loop.add_writer(self.__raw_socket, f.set_result, None) f.add_done_callback(callback) return f async def send_eof(self) -> None: with self._send_guard: self._raw_socket.shutdown(socket.SHUT_WR) async def receive(self, max_bytes: int = 65536) -> bytes: loop = get_running_loop() await checkpoint() with self._receive_guard: while True: try: data = self.__raw_socket.recv(max_bytes) except BlockingIOError: await self._wait_until_readable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc else: if not data: raise EndOfStream return data async def send(self, item: bytes) -> None: loop = get_running_loop() await checkpoint() with self._send_guard: view = memoryview(item) while view: try: bytes_sent = self.__raw_socket.send(item) except BlockingIOError: await self._wait_until_writable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc else: view = view[bytes_sent:] async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]: if not isinstance(msglen, int) or msglen < 0: raise ValueError('msglen must be a non-negative integer') if not isinstance(maxfds, int) or maxfds < 1: raise ValueError('maxfds must be a positive integer') loop = get_running_loop() fds = array.array("i") await checkpoint() with self._receive_guard: while True: try: message, ancdata, flags, addr = self.__raw_socket.recvmsg( msglen, socket.CMSG_LEN(maxfds * fds.itemsize)) except BlockingIOError: await self._wait_until_readable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc else: if not message and not ancdata: raise EndOfStream break for cmsg_level, cmsg_type, cmsg_data in ancdata: if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: raise RuntimeError(f'Received unexpected ancillary data; message = {message!r}, ' f'cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}') fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) return message, list(fds) async def send_fds(self, message: bytes, fds: Collection[Union[int, IOBase]]) -> None: if not message: raise ValueError('message must not be empty') if not fds: raise ValueError('fds must not be empty') loop = get_running_loop() filenos: List[int] = [] for fd in fds: if isinstance(fd, int): filenos.append(fd) elif isinstance(fd, IOBase): filenos.append(fd.fileno()) fdarray = array.array("i", filenos) await checkpoint() with self._send_guard: while True: try: # The ignore can be removed after mypy picks up # https://github.com/python/typeshed/pull/5545 self.__raw_socket.sendmsg( [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)] ) break except BlockingIOError: await self._wait_until_writable(loop) except OSError as exc: if self._closing: raise ClosedResourceError from None else: raise BrokenResourceError from exc async def aclose(self) -> None: if not self._closing: self._closing = True if self.__raw_socket.fileno() != -1: self.__raw_socket.close() if self._receive_future: self._receive_future.set_result(None) if self._send_future: self._send_future.set_result(None) class TCPSocketListener(abc.SocketListener): _accept_scope: Optional[CancelScope] = None _closed = False def __init__(self, raw_socket: socket.socket): self.__raw_socket = raw_socket self._loop = cast(asyncio.BaseEventLoop, get_running_loop()) self._accept_guard = ResourceGuard('accepting connections from') @property def _raw_socket(self) -> socket.socket: return self.__raw_socket async def accept(self) -> abc.SocketStream: if self._closed: raise ClosedResourceError with self._accept_guard: await checkpoint() with CancelScope() as self._accept_scope: try: client_sock, _addr = await self._loop.sock_accept(self._raw_socket) except asyncio.CancelledError: # Workaround for https://bugs.python.org/issue41317 try: self._loop.remove_reader(self._raw_socket) except (ValueError, NotImplementedError): pass if self._closed: raise ClosedResourceError from None raise finally: self._accept_scope = None client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) transport, protocol = await self._loop.connect_accepted_socket(StreamProtocol, client_sock) return SocketStream(cast(asyncio.Transport, transport), cast(StreamProtocol, protocol)) async def aclose(self) -> None: if self._closed: return self._closed = True if self._accept_scope: # Workaround for https://bugs.python.org/issue41317 try: self._loop.remove_reader(self._raw_socket) except (ValueError, NotImplementedError): pass self._accept_scope.cancel() await sleep(0) self._raw_socket.close() class UNIXSocketListener(abc.SocketListener): def __init__(self, raw_socket: socket.socket): self.__raw_socket = raw_socket self._loop = get_running_loop() self._accept_guard = ResourceGuard('accepting connections from') self._closed = False async def accept(self) -> abc.SocketStream: await checkpoint() with self._accept_guard: while True: try: client_sock, _ = self.__raw_socket.accept() client_sock.setblocking(False) return UNIXSocketStream(client_sock) except BlockingIOError: f: asyncio.Future = asyncio.Future() self._loop.add_reader(self.__raw_socket, f.set_result, None) f.add_done_callback(lambda _: self._loop.remove_reader(self.__raw_socket)) await f except OSError as exc: if self._closed: raise ClosedResourceError from None else: raise BrokenResourceError from exc async def aclose(self) -> None: self._closed = True self.__raw_socket.close() @property def _raw_socket(self) -> socket.socket: return self.__raw_socket class UDPSocket(abc.UDPSocket): def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol): self._transport = transport self._protocol = protocol self._receive_guard = ResourceGuard('reading from') self._send_guard = ResourceGuard('writing to') self._closed = False @property def _raw_socket(self) -> socket.socket: return self._transport.get_extra_info('socket') async def aclose(self) -> None: if not self._transport.is_closing(): self._closed = True self._transport.close() async def receive(self) -> Tuple[bytes, IPSockAddrType]: with self._receive_guard: await checkpoint() # If the buffer is empty, ask for more data if not self._protocol.read_queue and not self._transport.is_closing(): self._protocol.read_event.clear() await self._protocol.read_event.wait() try: return self._protocol.read_queue.popleft() except IndexError: if self._closed: raise ClosedResourceError from None else: raise BrokenResourceError from None async def send(self, item: UDPPacketType) -> None: with self._send_guard: await checkpoint() await self._protocol.write_event.wait() if self._closed: raise ClosedResourceError elif self._transport.is_closing(): raise BrokenResourceError else: self._transport.sendto(*item) class ConnectedUDPSocket(abc.ConnectedUDPSocket): def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol): self._transport = transport self._protocol = protocol self._receive_guard = ResourceGuard('reading from') self._send_guard = ResourceGuard('writing to') self._closed = False @property def _raw_socket(self) -> socket.socket: return self._transport.get_extra_info('socket') async def aclose(self) -> None: if not self._transport.is_closing(): self._closed = True self._transport.close() async def receive(self) -> bytes: with self._receive_guard: await checkpoint() # If the buffer is empty, ask for more data if not self._protocol.read_queue and not self._transport.is_closing(): self._protocol.read_event.clear() await self._protocol.read_event.wait() try: packet = self._protocol.read_queue.popleft() except IndexError: if self._closed: raise ClosedResourceError from None else: raise BrokenResourceError from None return packet[0] async def send(self, item: bytes) -> None: with self._send_guard: await checkpoint() await self._protocol.write_event.wait() if self._closed: raise ClosedResourceError elif self._transport.is_closing(): raise BrokenResourceError else: self._transport.sendto(item) async def connect_tcp(host: str, port: int, local_addr: Optional[Tuple[str, int]] = None) -> SocketStream: transport, protocol = cast( Tuple[asyncio.Transport, StreamProtocol], await get_running_loop().create_connection(StreamProtocol, host, port, local_addr=local_addr) ) transport.pause_reading() return SocketStream(transport, protocol) async def connect_unix(path: str) -> UNIXSocketStream: await checkpoint() loop = get_running_loop() raw_socket = socket.socket(socket.AF_UNIX) raw_socket.setblocking(False) while True: try: raw_socket.connect(path) except BlockingIOError: f: asyncio.Future = asyncio.Future() loop.add_writer(raw_socket, f.set_result, None) f.add_done_callback(lambda _: loop.remove_writer(raw_socket)) await f except BaseException: raw_socket.close() raise else: return UNIXSocketStream(raw_socket) async def create_udp_socket( family: socket.AddressFamily, local_address: Optional[IPSockAddrType], remote_address: Optional[IPSockAddrType], reuse_port: bool ) -> Union[UDPSocket, ConnectedUDPSocket]: result = await get_running_loop().create_datagram_endpoint( DatagramProtocol, local_addr=local_address, remote_addr=remote_address, family=family, reuse_port=reuse_port) transport = cast(asyncio.DatagramTransport, result[0]) protocol = cast(DatagramProtocol, result[1]) if protocol.exception: transport.close() raise protocol.exception if not remote_address: return UDPSocket(transport, protocol) else: return ConnectedUDPSocket(transport, protocol) async def getaddrinfo(host: Union[bytearray, bytes, str], port: Union[str, int, None], *, family: Union[int, AddressFamily] = 0, type: Union[int, SocketKind] = 0, proto: int = 0, flags: int = 0) -> GetAddrInfoReturnType: # https://github.com/python/typeshed/pull/4304 result = await get_running_loop().getaddrinfo( host, port, family=family, type=type, proto=proto, flags=flags) # type: ignore[arg-type] return cast(GetAddrInfoReturnType, result) async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Tuple[str, str]: return await get_running_loop().getnameinfo(sockaddr, flags) _read_events: RunVar[Dict[Any, asyncio.Event]] = RunVar('read_events') _write_events: RunVar[Dict[Any, asyncio.Event]] = RunVar('write_events') async def wait_socket_readable(sock: socket.socket) -> None: await checkpoint() try: read_events = _read_events.get() except LookupError: read_events = {} _read_events.set(read_events) if read_events.get(sock): raise BusyResourceError('reading from') from None loop = get_running_loop() event = read_events[sock] = asyncio.Event() loop.add_reader(sock, event.set) try: await event.wait() finally: if read_events.pop(sock, None) is not None: loop.remove_reader(sock) readable = True else: readable = False if not readable: raise ClosedResourceError async def wait_socket_writable(sock: socket.socket) -> None: await checkpoint() try: write_events = _write_events.get() except LookupError: write_events = {} _write_events.set(write_events) if write_events.get(sock): raise BusyResourceError('writing to') from None loop = get_running_loop() event = write_events[sock] = asyncio.Event() loop.add_writer(sock.fileno(), event.set) try: await event.wait() finally: if write_events.pop(sock, None) is not None: loop.remove_writer(sock) writable = True else: writable = False if not writable: raise ClosedResourceError # # Synchronization # class Event(BaseEvent): def __new__(cls) -> "Event": return object.__new__(cls) def __init__(self) -> None: self._event = asyncio.Event() def set(self) -> DeprecatedAwaitable: self._event.set() return DeprecatedAwaitable(self.set) def is_set(self) -> bool: return self._event.is_set() async def wait(self) -> None: if await self._event.wait(): await checkpoint() def statistics(self) -> EventStatistics: return EventStatistics(len(self._event._waiters)) # type: ignore[attr-defined] class CapacityLimiter(BaseCapacityLimiter): _total_tokens: float = 0 def __new__(cls, total_tokens: float) -> "CapacityLimiter": return object.__new__(cls) def __init__(self, total_tokens: float): self._borrowers: Set[Any] = set() self._wait_queue: Dict[Any, asyncio.Event] = OrderedDict() self.total_tokens = total_tokens async def __aenter__(self) -> None: await self.acquire() async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: self.release() @property def total_tokens(self) -> float: return self._total_tokens @total_tokens.setter def total_tokens(self, value: float) -> None: if not isinstance(value, int) and not math.isinf(value): raise TypeError('total_tokens must be an int or math.inf') if value < 1: raise ValueError('total_tokens must be >= 1') old_value = self._total_tokens self._total_tokens = value events = [] for event in self._wait_queue.values(): if value <= old_value: break if not event.is_set(): events.append(event) old_value += 1 for event in events: event.set() @property def borrowed_tokens(self) -> int: return len(self._borrowers) @property def available_tokens(self) -> float: return self._total_tokens - len(self._borrowers) def acquire_nowait(self) -> DeprecatedAwaitable: self.acquire_on_behalf_of_nowait(current_task()) return DeprecatedAwaitable(self.acquire_nowait) def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: if borrower in self._borrowers: raise RuntimeError("this borrower is already holding one of this CapacityLimiter's " "tokens") if self._wait_queue or len(self._borrowers) >= self._total_tokens: raise WouldBlock self._borrowers.add(borrower) return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) async def acquire(self) -> None: return await self.acquire_on_behalf_of(current_task()) async def acquire_on_behalf_of(self, borrower: object) -> None: await checkpoint_if_cancelled() try: self.acquire_on_behalf_of_nowait(borrower) except WouldBlock: event = asyncio.Event() self._wait_queue[borrower] = event try: await event.wait() except BaseException: self._wait_queue.pop(borrower, None) raise self._borrowers.add(borrower) else: try: await cancel_shielded_checkpoint() except BaseException: self.release() raise def release(self) -> None: self.release_on_behalf_of(current_task()) def release_on_behalf_of(self, borrower: object) -> None: try: self._borrowers.remove(borrower) except KeyError: raise RuntimeError("this borrower isn't holding any of this CapacityLimiter's " "tokens") from None # Notify the next task in line if this limiter has free capacity now if self._wait_queue and len(self._borrowers) < self._total_tokens: event = self._wait_queue.popitem()[1] event.set() def statistics(self) -> CapacityLimiterStatistics: return CapacityLimiterStatistics(self.borrowed_tokens, self.total_tokens, tuple(self._borrowers), len(self._wait_queue)) _default_thread_limiter: RunVar[CapacityLimiter] = RunVar('_default_thread_limiter') def current_default_thread_limiter() -> CapacityLimiter: try: return _default_thread_limiter.get() except LookupError: limiter = CapacityLimiter(40) _default_thread_limiter.set(limiter) return limiter # # Operating system signals # class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]): def __init__(self, signals: Tuple[int, ...]): self._signals = signals self._loop = get_running_loop() self._signal_queue: Deque[int] = deque() self._future: asyncio.Future = asyncio.Future() self._handled_signals: Set[int] = set() def _deliver(self, signum: int) -> None: self._signal_queue.append(signum) if not self._future.done(): self._future.set_result(None) def __enter__(self) -> "_SignalReceiver": for sig in set(self._signals): self._loop.add_signal_handler(sig, self._deliver, sig) self._handled_signals.add(sig) return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: for sig in self._handled_signals: self._loop.remove_signal_handler(sig) return None def __aiter__(self) -> "_SignalReceiver": return self async def __anext__(self) -> int: await checkpoint() if not self._signal_queue: self._future = asyncio.Future() await self._future return self._signal_queue.popleft() def open_signal_receiver(*signals: int) -> _SignalReceiver: return _SignalReceiver(signals) # # Testing and debugging # def _create_task_info(task: asyncio.Task) -> TaskInfo: task_state = _task_states.get(task) if task_state is None: name = task.get_name() if _native_task_names else None parent_id = None else: name = task_state.name parent_id = task_state.parent_id return TaskInfo(id(task), parent_id, name, get_coro(task)) def get_current_task() -> TaskInfo: return _create_task_info(current_task()) # type: ignore[arg-type] def get_running_tasks() -> List[TaskInfo]: return [_create_task_info(task) for task in all_tasks() if not task.done()] async def wait_all_tasks_blocked() -> None: await checkpoint() this_task = current_task() while True: for task in all_tasks(): if task is this_task: continue if task._fut_waiter is None or task._fut_waiter.done(): # type: ignore[attr-defined] await sleep(0.1) break else: return class TestRunner(abc.TestRunner): def __init__(self, debug: bool = False, use_uvloop: bool = False, policy: Optional[asyncio.AbstractEventLoopPolicy] = None): _maybe_set_event_loop_policy(policy, use_uvloop) self._loop = asyncio.new_event_loop() self._loop.set_debug(debug) asyncio.set_event_loop(self._loop) def _cancel_all_tasks(self) -> None: to_cancel = all_tasks(self._loop) if not to_cancel: return for task in to_cancel: task.cancel() self._loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: raise cast(BaseException, task.exception()) def close(self) -> None: try: self._cancel_all_tasks() self._loop.run_until_complete(self._loop.shutdown_asyncgens()) finally: asyncio.set_event_loop(None) self._loop.close() def call(self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object) -> T_Retval: def exception_handler(loop: asyncio.AbstractEventLoop, context: Dict[str, Any]) -> None: exceptions.append(context['exception']) exceptions: List[BaseException] = [] self._loop.set_exception_handler(exception_handler) try: retval: T_Retval = self._loop.run_until_complete(func(*args, **kwargs)) except Exception as exc: retval = None # type: ignore[assignment] exceptions.append(exc) finally: self._loop.set_exception_handler(None) if len(exceptions) == 1: raise exceptions[0] elif exceptions: raise ExceptionGroup(exceptions) return retval anyio-3.5.0/src/anyio/_backends/_trio.py000066400000000000000000000660201416724134300201540ustar00rootroot00000000000000import array import math import socket from concurrent.futures import Future from contextvars import copy_context from dataclasses import dataclass from functools import partial from io import IOBase from os import PathLike from signal import Signals from types import TracebackType from typing import ( Any, Awaitable, Callable, Collection, ContextManager, Coroutine, Deque, Dict, Generic, List, Mapping, NoReturn, Optional, Sequence, Set, Tuple, Type, TypeVar, Union, cast) import sniffio import trio.from_thread from outcome import Error, Outcome, Value from trio.socket import SocketType as TrioSocketType from trio.to_thread import run_sync from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable, T from .._core._eventloop import claim_worker_thread from .._core._exceptions import ( BrokenResourceError, BusyResourceError, ClosedResourceError, EndOfStream) from .._core._exceptions import ExceptionGroup as BaseExceptionGroup from .._core._sockets import convert_ipv6_sockaddr from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter from .._core._synchronization import Event as BaseEvent from .._core._synchronization import ResourceGuard from .._core._tasks import CancelScope as BaseCancelScope from ..abc import IPSockAddrType, UDPPacketType try: from trio import lowlevel as trio_lowlevel except ImportError: from trio import hazmat as trio_lowlevel # type: ignore[no-redef] from trio.hazmat import wait_readable, wait_writable else: from trio.lowlevel import wait_readable, wait_writable try: from trio.lowlevel import open_process as trio_open_process # type: ignore[attr-defined] except ImportError: from trio import open_process as trio_open_process T_Retval = TypeVar('T_Retval') T_SockAddr = TypeVar('T_SockAddr', str, IPSockAddrType) # # Event loop # run = trio.run current_token = trio.lowlevel.current_trio_token RunVar = trio.lowlevel.RunVar # # Miscellaneous # sleep = trio.sleep # # Timeouts and cancellation # class CancelScope(BaseCancelScope): def __new__(cls, original: Optional[trio.CancelScope] = None, **kwargs: object) -> 'CancelScope': return object.__new__(cls) def __init__(self, original: Optional[trio.CancelScope] = None, **kwargs: Any) -> None: self.__original = original or trio.CancelScope(**kwargs) def __enter__(self) -> 'CancelScope': self.__original.__enter__() return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: return self.__original.__exit__(exc_type, exc_val, exc_tb) def cancel(self) -> DeprecatedAwaitable: self.__original.cancel() return DeprecatedAwaitable(self.cancel) @property def deadline(self) -> float: return self.__original.deadline @deadline.setter def deadline(self, value: float) -> None: self.__original.deadline = value @property def cancel_called(self) -> bool: return self.__original.cancel_called @property def shield(self) -> bool: return self.__original.shield @shield.setter def shield(self, value: bool) -> None: self.__original.shield = value CancelledError = trio.Cancelled checkpoint = trio.lowlevel.checkpoint checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint current_effective_deadline = trio.current_effective_deadline current_time = trio.current_time # # Task groups # class ExceptionGroup(BaseExceptionGroup, trio.MultiError): pass class TaskGroup(abc.TaskGroup): def __init__(self) -> None: self._active = False self._nursery_manager = trio.open_nursery() self.cancel_scope = None # type: ignore[assignment] async def __aenter__(self) -> 'TaskGroup': self._active = True self._nursery = await self._nursery_manager.__aenter__() self.cancel_scope = CancelScope(self._nursery.cancel_scope) return self async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: try: return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) except trio.MultiError as exc: raise ExceptionGroup(exc.exceptions) from None finally: self._active = False def start_soon(self, func: Callable, *args: object, name: object = None) -> None: if not self._active: raise RuntimeError('This task group is not active; no new tasks can be started.') self._nursery.start_soon(func, *args, name=name) async def start(self, func: Callable[..., Coroutine], *args: object, name: object = None) -> object: if not self._active: raise RuntimeError('This task group is not active; no new tasks can be started.') return await self._nursery.start(func, *args, name=name) # # Threads # async def run_sync_in_worker_thread( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: Optional[trio.CapacityLimiter] = None) -> T_Retval: def wrapper() -> T_Retval: with claim_worker_thread('trio'): return func(*args) # TODO: remove explicit context copying when trio 0.20 is the minimum requirement context = copy_context() context.run(sniffio.current_async_library_cvar.set, None) return await run_sync(context.run, wrapper, cancellable=cancellable, limiter=limiter) # TODO: remove this workaround when trio 0.20 is the minimum requirement def run_async_from_thread(fn: Callable[..., Awaitable[T_Retval]], *args: Any) -> T_Retval: async def wrapper() -> T_Retval: retval: T_Retval async def inner() -> None: nonlocal retval __tracebackhide__ = True retval = await fn(*args) async with trio.open_nursery() as n: context.run(n.start_soon, inner) __tracebackhide__ = True return retval context = copy_context() context.run(sniffio.current_async_library_cvar.set, 'trio') return trio.from_thread.run(wrapper) def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval: # TODO: remove explicit context copying when trio 0.20 is the minimum requirement retval = trio.from_thread.run_sync(copy_context().run, fn, *args) return cast(T_Retval, retval) class BlockingPortal(abc.BlockingPortal): def __new__(cls) -> 'BlockingPortal': return object.__new__(cls) def __init__(self) -> None: super().__init__() self._token = trio.lowlevel.current_trio_token() def _spawn_task_from_thread(self, func: Callable, args: tuple, kwargs: Dict[str, Any], name: object, future: Future) -> None: context = copy_context() context.run(sniffio.current_async_library_cvar.set, 'trio') trio.from_thread.run_sync( context.run, partial(self._task_group.start_soon, name=name), self._call_func, func, args, kwargs, future, trio_token=self._token) # # Subprocesses # @dataclass(eq=False) class ReceiveStreamWrapper(abc.ByteReceiveStream): _stream: trio.abc.ReceiveStream async def receive(self, max_bytes: Optional[int] = None) -> bytes: try: data = await self._stream.receive_some(max_bytes) except trio.ClosedResourceError as exc: raise ClosedResourceError from exc.__cause__ except trio.BrokenResourceError as exc: raise BrokenResourceError from exc.__cause__ if data: return data else: raise EndOfStream async def aclose(self) -> None: await self._stream.aclose() @dataclass(eq=False) class SendStreamWrapper(abc.ByteSendStream): _stream: trio.abc.SendStream async def send(self, item: bytes) -> None: try: await self._stream.send_all(item) except trio.ClosedResourceError as exc: raise ClosedResourceError from exc.__cause__ except trio.BrokenResourceError as exc: raise BrokenResourceError from exc.__cause__ async def aclose(self) -> None: await self._stream.aclose() @dataclass(eq=False) class Process(abc.Process): _process: trio.Process _stdin: Optional[abc.ByteSendStream] _stdout: Optional[abc.ByteReceiveStream] _stderr: Optional[abc.ByteReceiveStream] async def aclose(self) -> None: if self._stdin: await self._stdin.aclose() if self._stdout: await self._stdout.aclose() if self._stderr: await self._stderr.aclose() await self.wait() async def wait(self) -> int: return await self._process.wait() def terminate(self) -> None: self._process.terminate() def kill(self) -> None: self._process.kill() def send_signal(self, signal: Signals) -> None: self._process.send_signal(signal) @property def pid(self) -> int: return self._process.pid @property def returncode(self) -> Optional[int]: return self._process.returncode @property def stdin(self) -> Optional[abc.ByteSendStream]: return self._stdin @property def stdout(self) -> Optional[abc.ByteReceiveStream]: return self._stdout @property def stderr(self) -> Optional[abc.ByteReceiveStream]: return self._stderr async def open_process(command: Union[str, Sequence[str]], *, shell: bool, stdin: int, stdout: int, stderr: int, cwd: Union[str, bytes, PathLike, None] = None, env: Optional[Mapping[str, str]] = None, start_new_session: bool = False) -> Process: process = await trio_open_process(command, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env, start_new_session=start_new_session) stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None return Process(process, stdin_stream, stdout_stream, stderr_stream) class _ProcessPoolShutdownInstrument(trio.abc.Instrument): def after_run(self) -> None: super().after_run() current_default_worker_process_limiter: RunVar = RunVar( 'current_default_worker_process_limiter') async def _shutdown_process_pool(workers: Set[Process]) -> None: process: Process try: await sleep(math.inf) except trio.Cancelled: for process in workers: if process.returncode is None: process.kill() with CancelScope(shield=True): for process in workers: await process.aclose() def setup_process_pool_exit_at_shutdown(workers: Set[Process]) -> None: trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers) # # Sockets and networking # class _TrioSocketMixin(Generic[T_SockAddr]): def __init__(self, trio_socket: TrioSocketType) -> None: self._trio_socket = trio_socket self._closed = False def _check_closed(self) -> None: if self._closed: raise ClosedResourceError if self._trio_socket.fileno() < 0: raise BrokenResourceError @property def _raw_socket(self) -> socket.socket: return self._trio_socket._sock # type: ignore[attr-defined] async def aclose(self) -> None: if self._trio_socket.fileno() >= 0: self._closed = True self._trio_socket.close() def _convert_socket_error(self, exc: BaseException) -> 'NoReturn': if isinstance(exc, trio.ClosedResourceError): raise ClosedResourceError from exc elif self._trio_socket.fileno() < 0 and self._closed: raise ClosedResourceError from None elif isinstance(exc, OSError): raise BrokenResourceError from exc else: raise exc class SocketStream(_TrioSocketMixin, abc.SocketStream): def __init__(self, trio_socket: TrioSocketType) -> None: super().__init__(trio_socket) self._receive_guard = ResourceGuard('reading from') self._send_guard = ResourceGuard('writing to') async def receive(self, max_bytes: int = 65536) -> bytes: with self._receive_guard: try: data = await self._trio_socket.recv(max_bytes) except BaseException as exc: self._convert_socket_error(exc) if data: return data else: raise EndOfStream async def send(self, item: bytes) -> None: with self._send_guard: view = memoryview(item) while view: try: bytes_sent = await self._trio_socket.send(view) except BaseException as exc: self._convert_socket_error(exc) view = view[bytes_sent:] async def send_eof(self) -> None: self._trio_socket.shutdown(socket.SHUT_WR) class UNIXSocketStream(SocketStream, abc.UNIXSocketStream): async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]: if not isinstance(msglen, int) or msglen < 0: raise ValueError('msglen must be a non-negative integer') if not isinstance(maxfds, int) or maxfds < 1: raise ValueError('maxfds must be a positive integer') fds = array.array("i") await checkpoint() with self._receive_guard: while True: try: message, ancdata, flags, addr = await self._trio_socket.recvmsg( msglen, socket.CMSG_LEN(maxfds * fds.itemsize)) except BaseException as exc: self._convert_socket_error(exc) else: if not message and not ancdata: raise EndOfStream break for cmsg_level, cmsg_type, cmsg_data in ancdata: if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: raise RuntimeError(f'Received unexpected ancillary data; message = {message!r}, ' f'cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}') fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) return message, list(fds) async def send_fds(self, message: bytes, fds: Collection[Union[int, IOBase]]) -> None: if not message: raise ValueError('message must not be empty') if not fds: raise ValueError('fds must not be empty') filenos: List[int] = [] for fd in fds: if isinstance(fd, int): filenos.append(fd) elif isinstance(fd, IOBase): filenos.append(fd.fileno()) fdarray = array.array("i", filenos) await checkpoint() with self._send_guard: while True: try: await self._trio_socket.sendmsg( [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, # type: ignore[list-item] fdarray)] ) break except BaseException as exc: self._convert_socket_error(exc) class TCPSocketListener(_TrioSocketMixin, abc.SocketListener): def __init__(self, raw_socket: socket.socket): super().__init__(trio.socket.from_stdlib_socket(raw_socket)) self._accept_guard = ResourceGuard('accepting connections from') async def accept(self) -> SocketStream: with self._accept_guard: try: trio_socket, _addr = await self._trio_socket.accept() except BaseException as exc: self._convert_socket_error(exc) trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) return SocketStream(trio_socket) class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener): def __init__(self, raw_socket: socket.socket): super().__init__(trio.socket.from_stdlib_socket(raw_socket)) self._accept_guard = ResourceGuard('accepting connections from') async def accept(self) -> UNIXSocketStream: with self._accept_guard: try: trio_socket, _addr = await self._trio_socket.accept() except BaseException as exc: self._convert_socket_error(exc) return UNIXSocketStream(trio_socket) class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket): def __init__(self, trio_socket: TrioSocketType) -> None: super().__init__(trio_socket) self._receive_guard = ResourceGuard('reading from') self._send_guard = ResourceGuard('writing to') async def receive(self) -> Tuple[bytes, IPSockAddrType]: with self._receive_guard: try: data, addr = await self._trio_socket.recvfrom(65536) return data, convert_ipv6_sockaddr(addr) except BaseException as exc: self._convert_socket_error(exc) async def send(self, item: UDPPacketType) -> None: with self._send_guard: try: await self._trio_socket.sendto(*item) except BaseException as exc: self._convert_socket_error(exc) class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket): def __init__(self, trio_socket: TrioSocketType) -> None: super().__init__(trio_socket) self._receive_guard = ResourceGuard('reading from') self._send_guard = ResourceGuard('writing to') async def receive(self) -> bytes: with self._receive_guard: try: return await self._trio_socket.recv(65536) except BaseException as exc: self._convert_socket_error(exc) async def send(self, item: bytes) -> None: with self._send_guard: try: await self._trio_socket.send(item) except BaseException as exc: self._convert_socket_error(exc) async def connect_tcp(host: str, port: int, local_address: Optional[IPSockAddrType] = None) -> SocketStream: family = socket.AF_INET6 if ':' in host else socket.AF_INET trio_socket = trio.socket.socket(family) trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if local_address: await trio_socket.bind(local_address) try: await trio_socket.connect((host, port)) except BaseException: trio_socket.close() raise return SocketStream(trio_socket) async def connect_unix(path: str) -> UNIXSocketStream: trio_socket = trio.socket.socket(socket.AF_UNIX) try: await trio_socket.connect(path) except BaseException: trio_socket.close() raise return UNIXSocketStream(trio_socket) async def create_udp_socket( family: socket.AddressFamily, local_address: Optional[IPSockAddrType], remote_address: Optional[IPSockAddrType], reuse_port: bool ) -> Union[UDPSocket, ConnectedUDPSocket]: trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM) if reuse_port: trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) if local_address: await trio_socket.bind(local_address) if remote_address: await trio_socket.connect(remote_address) return ConnectedUDPSocket(trio_socket) else: return UDPSocket(trio_socket) getaddrinfo = trio.socket.getaddrinfo getnameinfo = trio.socket.getnameinfo async def wait_socket_readable(sock: socket.socket) -> None: try: await wait_readable(sock) except trio.ClosedResourceError as exc: raise ClosedResourceError().with_traceback(exc.__traceback__) from None except trio.BusyResourceError: raise BusyResourceError('reading from') from None async def wait_socket_writable(sock: socket.socket) -> None: try: await wait_writable(sock) except trio.ClosedResourceError as exc: raise ClosedResourceError().with_traceback(exc.__traceback__) from None except trio.BusyResourceError: raise BusyResourceError('writing to') from None # # Synchronization # class Event(BaseEvent): def __new__(cls) -> 'Event': return object.__new__(cls) def __init__(self) -> None: self.__original = trio.Event() def is_set(self) -> bool: return self.__original.is_set() async def wait(self) -> None: return await self.__original.wait() def statistics(self) -> EventStatistics: orig_statistics = self.__original.statistics() return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting) def set(self) -> DeprecatedAwaitable: self.__original.set() return DeprecatedAwaitable(self.set) class CapacityLimiter(BaseCapacityLimiter): def __new__(cls, *args: object, **kwargs: object) -> "CapacityLimiter": return object.__new__(cls) def __init__(self, *args: Any, original: Optional[trio.CapacityLimiter] = None) -> None: self.__original = original or trio.CapacityLimiter(*args) async def __aenter__(self) -> None: return await self.__original.__aenter__() async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: return await self.__original.__aexit__(exc_type, exc_val, exc_tb) @property def total_tokens(self) -> float: return self.__original.total_tokens @total_tokens.setter def total_tokens(self, value: float) -> None: self.__original.total_tokens = value @property def borrowed_tokens(self) -> int: return self.__original.borrowed_tokens @property def available_tokens(self) -> float: return self.__original.available_tokens def acquire_nowait(self) -> DeprecatedAwaitable: self.__original.acquire_nowait() return DeprecatedAwaitable(self.acquire_nowait) def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: self.__original.acquire_on_behalf_of_nowait(borrower) return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) async def acquire(self) -> None: await self.__original.acquire() async def acquire_on_behalf_of(self, borrower: object) -> None: await self.__original.acquire_on_behalf_of(borrower) def release(self) -> None: return self.__original.release() def release_on_behalf_of(self, borrower: object) -> None: return self.__original.release_on_behalf_of(borrower) def statistics(self) -> CapacityLimiterStatistics: orig = self.__original.statistics() return CapacityLimiterStatistics( borrowed_tokens=orig.borrowed_tokens, total_tokens=orig.total_tokens, borrowers=orig.borrowers, tasks_waiting=orig.tasks_waiting) _capacity_limiter_wrapper: RunVar = RunVar('_capacity_limiter_wrapper') def current_default_thread_limiter() -> CapacityLimiter: try: return _capacity_limiter_wrapper.get() except LookupError: limiter = CapacityLimiter(original=trio.to_thread.current_default_thread_limiter()) _capacity_limiter_wrapper.set(limiter) return limiter # # Signal handling # class _SignalReceiver(DeprecatedAsyncContextManager[T]): def __init__(self, cm: ContextManager[T]): self._cm = cm def __enter__(self) -> T: return self._cm.__enter__() def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: return self._cm.__exit__(exc_type, exc_val, exc_tb) def open_signal_receiver(*signals: Signals) -> _SignalReceiver: cm = trio.open_signal_receiver(*signals) return _SignalReceiver(cm) # # Testing and debugging # def get_current_task() -> TaskInfo: task = trio_lowlevel.current_task() parent_id = None if task.parent_nursery and task.parent_nursery.parent_task: parent_id = id(task.parent_nursery.parent_task) return TaskInfo(id(task), parent_id, task.name, task.coro) def get_running_tasks() -> List[TaskInfo]: root_task = trio_lowlevel.current_root_task() task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)] nurseries = root_task.child_nurseries while nurseries: new_nurseries: List[trio.Nursery] = [] for nursery in nurseries: for task in nursery.child_tasks: task_infos.append( TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro)) new_nurseries.extend(task.child_nurseries) nurseries = new_nurseries return task_infos def wait_all_tasks_blocked() -> Awaitable[None]: import trio.testing return trio.testing.wait_all_tasks_blocked() class TestRunner(abc.TestRunner): def __init__(self, **options: Any) -> None: from collections import deque from queue import Queue self._call_queue: "Queue[Callable[..., object]]" = Queue() self._result_queue: Deque[Outcome] = deque() self._stop_event: Optional[trio.Event] = None self._nursery: Optional[trio.Nursery] = None self._options = options async def _trio_main(self) -> None: self._stop_event = trio.Event() async with trio.open_nursery() as self._nursery: await self._stop_event.wait() async def _call_func(self, func: Callable[..., Awaitable[object]], args: tuple, kwargs: dict) -> None: try: retval = await func(*args, **kwargs) except BaseException as exc: self._result_queue.append(Error(exc)) else: self._result_queue.append(Value(retval)) def _main_task_finished(self, outcome: object) -> None: self._nursery = None def close(self) -> None: if self._stop_event: self._stop_event.set() while self._nursery is not None: self._call_queue.get()() def call(self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object) -> T_Retval: if self._nursery is None: trio.lowlevel.start_guest_run( self._trio_main, run_sync_soon_threadsafe=self._call_queue.put, done_callback=self._main_task_finished, **self._options) while self._nursery is None: self._call_queue.get()() self._nursery.start_soon(self._call_func, func, args, kwargs) while not self._result_queue: self._call_queue.get()() outcome = self._result_queue.pop() return outcome.unwrap() anyio-3.5.0/src/anyio/_core/000077500000000000000000000000001416724134300156405ustar00rootroot00000000000000anyio-3.5.0/src/anyio/_core/__init__.py000066400000000000000000000000001416724134300177370ustar00rootroot00000000000000anyio-3.5.0/src/anyio/_core/_compat.py000066400000000000000000000130441416724134300176360ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod from contextlib import AbstractContextManager from types import TracebackType from typing import ( TYPE_CHECKING, Any, AsyncContextManager, Callable, ContextManager, Generator, Generic, Iterable, List, Optional, Tuple, Type, TypeVar, Union, overload) from warnings import warn if TYPE_CHECKING: from ._testing import TaskInfo else: TaskInfo = object T = TypeVar('T') AnyDeprecatedAwaitable = Union['DeprecatedAwaitable', 'DeprecatedAwaitableFloat', 'DeprecatedAwaitableList[T]', TaskInfo] @overload async def maybe_async(__obj: TaskInfo) -> TaskInfo: ... @overload async def maybe_async(__obj: 'DeprecatedAwaitableFloat') -> float: ... @overload async def maybe_async(__obj: 'DeprecatedAwaitableList[T]') -> List[T]: ... @overload async def maybe_async(__obj: 'DeprecatedAwaitable') -> None: ... async def maybe_async(__obj: 'AnyDeprecatedAwaitable[T]') -> Union[TaskInfo, float, List[T], None]: """ Await on the given object if necessary. This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and methods were converted from coroutine functions into regular functions. Do **not** try to use this for any other purpose! :return: the result of awaiting on the object if coroutine, or the object itself otherwise .. versionadded:: 2.2 """ return __obj._unwrap() class _ContextManagerWrapper: def __init__(self, cm: ContextManager[T]): self._cm = cm async def __aenter__(self) -> T: return self._cm.__enter__() async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: return self._cm.__exit__(exc_type, exc_val, exc_tb) def maybe_async_cm(cm: Union[ContextManager[T], AsyncContextManager[T]]) -> AsyncContextManager[T]: """ Wrap a regular context manager as an async one if necessary. This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and methods were changed to return regular context managers instead of async ones. :param cm: a regular or async context manager :return: an async context manager .. versionadded:: 2.2 """ if not isinstance(cm, AbstractContextManager): raise TypeError('Given object is not an context manager') return _ContextManagerWrapper(cm) def _warn_deprecation(awaitable: 'AnyDeprecatedAwaitable[Any]', stacklevel: int = 1) -> None: warn(f'Awaiting on {awaitable._name}() is deprecated. Use "await ' f'anyio.maybe_async({awaitable._name}(...)) if you have to support both AnyIO 2.x ' f'and 3.x, or just remove the "await" if you are completely migrating to AnyIO 3+.', DeprecationWarning, stacklevel=stacklevel + 1) class DeprecatedAwaitable: def __init__(self, func: Callable[..., 'DeprecatedAwaitable']): self._name = f'{func.__module__}.{func.__qualname__}' def __await__(self) -> Generator[None, None, None]: _warn_deprecation(self) if False: yield def __reduce__(self) -> Tuple[Type[None], Tuple[()]]: return type(None), () def _unwrap(self) -> None: return None class DeprecatedAwaitableFloat(float): def __new__( cls, x: float, func: Callable[..., 'DeprecatedAwaitableFloat'] ) -> 'DeprecatedAwaitableFloat': return super().__new__(cls, x) def __init__(self, x: float, func: Callable[..., 'DeprecatedAwaitableFloat']): self._name = f'{func.__module__}.{func.__qualname__}' def __await__(self) -> Generator[None, None, float]: _warn_deprecation(self) if False: yield return float(self) def __reduce__(self) -> Tuple[Type[float], Tuple[float]]: return float, (float(self),) def _unwrap(self) -> float: return float(self) class DeprecatedAwaitableList(List[T]): def __init__(self, iterable: Iterable[T] = (), *, func: Callable[..., 'DeprecatedAwaitableList[T]']): super().__init__(iterable) self._name = f'{func.__module__}.{func.__qualname__}' def __await__(self) -> Generator[None, None, List[T]]: _warn_deprecation(self) if False: yield return list(self) def __reduce__(self) -> Tuple[Type[List[T]], Tuple[List[T]]]: return list, (list(self),) def _unwrap(self) -> List[T]: return list(self) class DeprecatedAsyncContextManager(Generic[T], metaclass=ABCMeta): @abstractmethod def __enter__(self) -> T: pass @abstractmethod def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: pass async def __aenter__(self) -> T: warn(f'Using {self.__class__.__name__} as an async context manager has been deprecated. ' f'Use "async with anyio.maybe_async_cm(yourcontextmanager) as foo:" if you have to ' f'support both AnyIO 2.x and 3.x, or just remove the "async" from "async with" if ' f'you are completely migrating to AnyIO 3+.', DeprecationWarning) return self.__enter__() async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: return self.__exit__(exc_type, exc_val, exc_tb) anyio-3.5.0/src/anyio/_core/_eventloop.py000066400000000000000000000077271416724134300204010ustar00rootroot00000000000000import math import sys import threading from contextlib import contextmanager from importlib import import_module from typing import Any, Callable, Coroutine, Dict, Generator, Optional, Tuple, Type, TypeVar import sniffio # This must be updated when new backends are introduced from ._compat import DeprecatedAwaitableFloat BACKENDS = 'asyncio', 'trio' T_Retval = TypeVar('T_Retval') threadlocals = threading.local() def run(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object, backend: str = 'asyncio', backend_options: Optional[Dict[str, Any]] = None) -> T_Retval: """ Run the given coroutine function in an asynchronous event loop. The current thread must not be already running an event loop. :param func: a coroutine function :param args: positional arguments to ``func`` :param backend: name of the asynchronous event loop implementation – currently either ``asyncio`` or ``trio`` :param backend_options: keyword arguments to call the backend ``run()`` implementation with (documented :ref:`here `) :return: the return value of the coroutine function :raises RuntimeError: if an asynchronous event loop is already running in this thread :raises LookupError: if the named backend is not found """ try: asynclib_name = sniffio.current_async_library() except sniffio.AsyncLibraryNotFoundError: pass else: raise RuntimeError(f'Already running {asynclib_name} in this thread') try: asynclib = import_module(f'..._backends._{backend}', package=__name__) except ImportError as exc: raise LookupError(f'No such backend: {backend}') from exc token = None if sniffio.current_async_library_cvar.get(None) is None: # Since we're in control of the event loop, we can cache the name of the async library token = sniffio.current_async_library_cvar.set(backend) try: backend_options = backend_options or {} return asynclib.run(func, *args, **backend_options) finally: if token: sniffio.current_async_library_cvar.reset(token) async def sleep(delay: float) -> None: """ Pause the current task for the specified duration. :param delay: the duration, in seconds """ return await get_asynclib().sleep(delay) async def sleep_forever() -> None: """ Pause the current task until it's cancelled. This is a shortcut for ``sleep(math.inf)``. .. versionadded:: 3.1 """ await sleep(math.inf) async def sleep_until(deadline: float) -> None: """ Pause the current task until the given time. :param deadline: the absolute time to wake up at (according to the internal monotonic clock of the event loop) .. versionadded:: 3.1 """ now = current_time() await sleep(max(deadline - now, 0)) def current_time() -> DeprecatedAwaitableFloat: """ Return the current value of the event loop's internal clock. :return: the clock value (seconds) """ return DeprecatedAwaitableFloat(get_asynclib().current_time(), current_time) def get_all_backends() -> Tuple[str, ...]: """Return a tuple of the names of all built-in backends.""" return BACKENDS def get_cancelled_exc_class() -> Type[BaseException]: """Return the current async library's cancellation exception class.""" return get_asynclib().CancelledError # # Private API # @contextmanager def claim_worker_thread(backend: str) -> Generator[Any, None, None]: module = sys.modules['anyio._backends._' + backend] threadlocals.current_async_module = module try: yield finally: del threadlocals.current_async_module def get_asynclib(asynclib_name: Optional[str] = None) -> Any: if asynclib_name is None: asynclib_name = sniffio.current_async_library() modulename = 'anyio._backends._' + asynclib_name try: return sys.modules[modulename] except KeyError: return import_module(modulename) anyio-3.5.0/src/anyio/_core/_exceptions.py000066400000000000000000000054151416724134300205370ustar00rootroot00000000000000from traceback import format_exception from typing import List class BrokenResourceError(Exception): """ Raised when trying to use a resource that has been rendered unusable due to external causes (e.g. a send stream whose peer has disconnected). """ class BrokenWorkerProcess(Exception): """ Raised by :func:`run_sync_in_process` if the worker process terminates abruptly or otherwise misbehaves. """ class BusyResourceError(Exception): """Raised when two tasks are trying to read from or write to the same resource concurrently.""" def __init__(self, action: str): super().__init__(f'Another task is already {action} this resource') class ClosedResourceError(Exception): """Raised when trying to use a resource that has been closed.""" class DelimiterNotFound(Exception): """ Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the maximum number of bytes has been read without the delimiter being found. """ def __init__(self, max_bytes: int) -> None: super().__init__(f'The delimiter was not found among the first {max_bytes} bytes') class EndOfStream(Exception): """Raised when trying to read from a stream that has been closed from the other end.""" class ExceptionGroup(BaseException): """ Raised when multiple exceptions have been raised in a task group. :var ~typing.Sequence[BaseException] exceptions: the sequence of exceptions raised together """ SEPARATOR = '----------------------------\n' exceptions: List[BaseException] def __str__(self) -> str: tracebacks = [''.join(format_exception(type(exc), exc, exc.__traceback__)) for exc in self.exceptions] return f'{len(self.exceptions)} exceptions were raised in the task group:\n' \ f'{self.SEPARATOR}{self.SEPARATOR.join(tracebacks)}' def __repr__(self) -> str: exception_reprs = ', '.join(repr(exc) for exc in self.exceptions) return f'<{self.__class__.__name__}: {exception_reprs}>' class IncompleteRead(Exception): """ Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the connection is closed before the requested amount of bytes has been read. """ def __init__(self) -> None: super().__init__('The stream was closed before the read operation could be completed') class TypedAttributeLookupError(LookupError): """ Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute is not found and no default value has been given. """ class WouldBlock(Exception): """Raised by ``X_nowait`` functions if ``X()`` would block.""" anyio-3.5.0/src/anyio/_core/_fileio.py000066400000000000000000000432161416724134300176260ustar00rootroot00000000000000import os import pathlib import sys from dataclasses import dataclass from functools import partial from os import PathLike from typing import ( IO, TYPE_CHECKING, Any, AnyStr, AsyncIterator, Callable, Generic, Iterable, Iterator, List, Optional, Sequence, Tuple, Union, cast, overload) from .. import to_thread from ..abc import AsyncResource if sys.version_info >= (3, 8): from typing import Final else: from typing_extensions import Final if TYPE_CHECKING: from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer else: ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object class AsyncFile(AsyncResource, Generic[AnyStr]): """ An asynchronous file object. This class wraps a standard file object and provides async friendly versions of the following blocking methods (where available on the original file object): * read * read1 * readline * readlines * readinto * readinto1 * write * writelines * truncate * seek * tell * flush All other methods are directly passed through. This class supports the asynchronous context manager protocol which closes the underlying file at the end of the context block. This class also supports asynchronous iteration:: async with await open_file(...) as f: async for line in f: print(line) """ def __init__(self, fp: IO[AnyStr]) -> None: self._fp: Any = fp def __getattr__(self, name: str) -> object: return getattr(self._fp, name) @property def wrapped(self) -> IO[AnyStr]: """The wrapped file object.""" return self._fp async def __aiter__(self) -> AsyncIterator[AnyStr]: while True: line = await self.readline() if line: yield line else: break async def aclose(self) -> None: return await to_thread.run_sync(self._fp.close) async def read(self, size: int = -1) -> AnyStr: return await to_thread.run_sync(self._fp.read, size) async def read1(self: 'AsyncFile[bytes]', size: int = -1) -> bytes: return await to_thread.run_sync(self._fp.read1, size) async def readline(self) -> AnyStr: return await to_thread.run_sync(self._fp.readline) async def readlines(self) -> List[AnyStr]: return await to_thread.run_sync(self._fp.readlines) async def readinto(self: 'AsyncFile[bytes]', b: WriteableBuffer) -> bytes: return await to_thread.run_sync(self._fp.readinto, b) async def readinto1(self: 'AsyncFile[bytes]', b: WriteableBuffer) -> bytes: return await to_thread.run_sync(self._fp.readinto1, b) @overload async def write(self: 'AsyncFile[bytes]', b: ReadableBuffer) -> int: ... @overload async def write(self: 'AsyncFile[str]', b: str) -> int: ... async def write(self, b: Union[ReadableBuffer, str]) -> int: return await to_thread.run_sync(self._fp.write, b) @overload async def writelines(self: 'AsyncFile[bytes]', lines: Iterable[ReadableBuffer]) -> None: ... @overload async def writelines(self: 'AsyncFile[str]', lines: Iterable[str]) -> None: ... async def writelines(self, lines: Union[Iterable[ReadableBuffer], Iterable[str]]) -> None: return await to_thread.run_sync(self._fp.writelines, lines) async def truncate(self, size: Optional[int] = None) -> int: return await to_thread.run_sync(self._fp.truncate, size) async def seek(self, offset: int, whence: Optional[int] = os.SEEK_SET) -> int: return await to_thread.run_sync(self._fp.seek, offset, whence) async def tell(self) -> int: return await to_thread.run_sync(self._fp.tell) async def flush(self) -> None: return await to_thread.run_sync(self._fp.flush) @overload async def open_file(file: Union[str, 'PathLike[str]', int], mode: OpenBinaryMode, buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., closefd: bool = ..., opener: Optional[Callable[[str, int], int]] = ...) -> AsyncFile[bytes]: ... @overload async def open_file(file: Union[str, 'PathLike[str]', int], mode: OpenTextMode = ..., buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., closefd: bool = ..., opener: Optional[Callable[[str, int], int]] = ...) -> AsyncFile[str]: ... async def open_file(file: Union[str, 'PathLike[str]', int], mode: str = 'r', buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, closefd: bool = True, opener: Optional[Callable[[str, int], int]] = None) -> AsyncFile[Any]: """ Open a file asynchronously. The arguments are exactly the same as for the builtin :func:`open`. :return: an asynchronous file object """ fp = await to_thread.run_sync(open, file, mode, buffering, encoding, errors, newline, closefd, opener) return AsyncFile(fp) def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]: """ Wrap an existing file as an asynchronous file. :param file: an existing file-like object :return: an asynchronous file object """ return AsyncFile(file) @dataclass(eq=False) class _PathIterator(AsyncIterator['Path']): iterator: Iterator['PathLike[str]'] async def __anext__(self) -> 'Path': nextval = await to_thread.run_sync(next, self.iterator, None, cancellable=True) if nextval is None: raise StopAsyncIteration from None return Path(cast('PathLike[str]', nextval)) class Path: """ An asynchronous version of :class:`pathlib.Path`. This class cannot be substituted for :class:`pathlib.Path` or :class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike` interface. It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for the deprecated :meth:`~pathlib.Path.link_to` method. Any methods that do disk I/O need to be awaited on. These methods are: * :meth:`~pathlib.Path.absolute` * :meth:`~pathlib.Path.chmod` * :meth:`~pathlib.Path.cwd` * :meth:`~pathlib.Path.exists` * :meth:`~pathlib.Path.expanduser` * :meth:`~pathlib.Path.group` * :meth:`~pathlib.Path.hardlink_to` * :meth:`~pathlib.Path.home` * :meth:`~pathlib.Path.is_block_device` * :meth:`~pathlib.Path.is_char_device` * :meth:`~pathlib.Path.is_dir` * :meth:`~pathlib.Path.is_fifo` * :meth:`~pathlib.Path.is_file` * :meth:`~pathlib.Path.is_mount` * :meth:`~pathlib.Path.lchmod` * :meth:`~pathlib.Path.lstat` * :meth:`~pathlib.Path.mkdir` * :meth:`~pathlib.Path.open` * :meth:`~pathlib.Path.owner` * :meth:`~pathlib.Path.read_bytes` * :meth:`~pathlib.Path.read_text` * :meth:`~pathlib.Path.readlink` * :meth:`~pathlib.Path.rename` * :meth:`~pathlib.Path.replace` * :meth:`~pathlib.Path.rmdir` * :meth:`~pathlib.Path.samefile` * :meth:`~pathlib.Path.stat` * :meth:`~pathlib.Path.touch` * :meth:`~pathlib.Path.unlink` * :meth:`~pathlib.Path.write_bytes` * :meth:`~pathlib.Path.write_text` Additionally, the following methods return an async iterator yielding :class:`~.Path` objects: * :meth:`~pathlib.Path.glob` * :meth:`~pathlib.Path.iterdir` * :meth:`~pathlib.Path.rglob` """ __slots__ = '_path', '__weakref__' __weakref__: Any def __init__(self, *args: Union[str, 'PathLike[str]']) -> None: self._path: Final[pathlib.Path] = pathlib.Path(*args) def __fspath__(self) -> str: return self._path.__fspath__() def __str__(self) -> str: return self._path.__str__() def __repr__(self) -> str: return f'{self.__class__.__name__}({self.as_posix()!r})' def __bytes__(self) -> bytes: return self._path.__bytes__() def __hash__(self) -> int: return self._path.__hash__() def __eq__(self, other: object) -> bool: target = other._path if isinstance(other, Path) else other return self._path.__eq__(target) def __lt__(self, other: 'Path') -> bool: target = other._path if isinstance(other, Path) else other return self._path.__lt__(target) def __le__(self, other: 'Path') -> bool: target = other._path if isinstance(other, Path) else other return self._path.__le__(target) def __gt__(self, other: 'Path') -> bool: target = other._path if isinstance(other, Path) else other return self._path.__gt__(target) def __ge__(self, other: 'Path') -> bool: target = other._path if isinstance(other, Path) else other return self._path.__ge__(target) def __truediv__(self, other: Any) -> 'Path': return Path(self._path / other) def __rtruediv__(self, other: Any) -> 'Path': return Path(other) / self @property def parts(self) -> Tuple[str, ...]: return self._path.parts @property def drive(self) -> str: return self._path.drive @property def root(self) -> str: return self._path.root @property def anchor(self) -> str: return self._path.anchor @property def parents(self) -> Sequence['Path']: return tuple(Path(p) for p in self._path.parents) @property def parent(self) -> 'Path': return Path(self._path.parent) @property def name(self) -> str: return self._path.name @property def suffix(self) -> str: return self._path.suffix @property def suffixes(self) -> List[str]: return self._path.suffixes @property def stem(self) -> str: return self._path.stem async def absolute(self) -> 'Path': path = await to_thread.run_sync(self._path.absolute) return Path(path) def as_posix(self) -> str: return self._path.as_posix() def as_uri(self) -> str: return self._path.as_uri() def match(self, path_pattern: str) -> bool: return self._path.match(path_pattern) def is_relative_to(self, *other: Union[str, 'PathLike[str]']) -> bool: try: self.relative_to(*other) return True except ValueError: return False async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None: func = partial(os.chmod, follow_symlinks=follow_symlinks) return await to_thread.run_sync(func, self._path, mode) @classmethod async def cwd(cls) -> 'Path': path = await to_thread.run_sync(pathlib.Path.cwd) return cls(path) async def exists(self) -> bool: return await to_thread.run_sync(self._path.exists, cancellable=True) async def expanduser(self) -> 'Path': return Path(await to_thread.run_sync(self._path.expanduser, cancellable=True)) def glob(self, pattern: str) -> AsyncIterator['Path']: gen = self._path.glob(pattern) return _PathIterator(gen) async def group(self) -> str: return await to_thread.run_sync(self._path.group, cancellable=True) async def hardlink_to(self, target: Union[str, pathlib.Path, 'Path']) -> None: if isinstance(target, Path): target = target._path await to_thread.run_sync(os.link, target, self) @classmethod async def home(cls) -> 'Path': home_path = await to_thread.run_sync(pathlib.Path.home) return cls(home_path) def is_absolute(self) -> bool: return self._path.is_absolute() async def is_block_device(self) -> bool: return await to_thread.run_sync(self._path.is_block_device, cancellable=True) async def is_char_device(self) -> bool: return await to_thread.run_sync(self._path.is_char_device, cancellable=True) async def is_dir(self) -> bool: return await to_thread.run_sync(self._path.is_dir, cancellable=True) async def is_fifo(self) -> bool: return await to_thread.run_sync(self._path.is_fifo, cancellable=True) async def is_file(self) -> bool: return await to_thread.run_sync(self._path.is_file, cancellable=True) async def is_mount(self) -> bool: return await to_thread.run_sync(os.path.ismount, self._path, cancellable=True) def is_reserved(self) -> bool: return self._path.is_reserved() async def is_socket(self) -> bool: return await to_thread.run_sync(self._path.is_socket, cancellable=True) async def is_symlink(self) -> bool: return await to_thread.run_sync(self._path.is_symlink, cancellable=True) def iterdir(self) -> AsyncIterator['Path']: gen = self._path.iterdir() return _PathIterator(gen) def joinpath(self, *args: Union[str, 'PathLike[str]']) -> 'Path': return Path(self._path.joinpath(*args)) async def lchmod(self, mode: int) -> None: await to_thread.run_sync(self._path.lchmod, mode) async def lstat(self) -> os.stat_result: return await to_thread.run_sync(self._path.lstat, cancellable=True) async def mkdir(self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False) -> None: await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok) @overload async def open(self, mode: OpenBinaryMode, buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ...) -> AsyncFile[bytes]: ... @overload async def open(self, mode: OpenTextMode = ..., buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ...) -> AsyncFile[str]: ... async def open(self, mode: str = 'r', buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None) -> AsyncFile[Any]: fp = await to_thread.run_sync(self._path.open, mode, buffering, encoding, errors, newline) return AsyncFile(fp) async def owner(self) -> str: return await to_thread.run_sync(self._path.owner, cancellable=True) async def read_bytes(self) -> bytes: return await to_thread.run_sync(self._path.read_bytes) async def read_text(self, encoding: Optional[str] = None, errors: Optional[str] = None) -> str: return await to_thread.run_sync(self._path.read_text, encoding, errors) def relative_to(self, *other: Union[str, 'PathLike[str]']) -> 'Path': return Path(self._path.relative_to(*other)) async def readlink(self) -> 'Path': target = await to_thread.run_sync(os.readlink, self._path) return Path(cast(str, target)) async def rename(self, target: Union[str, pathlib.PurePath, 'Path']) -> 'Path': if isinstance(target, Path): target = target._path await to_thread.run_sync(self._path.rename, target) return Path(target) async def replace(self, target: Union[str, pathlib.PurePath, 'Path']) -> 'Path': if isinstance(target, Path): target = target._path await to_thread.run_sync(self._path.replace, target) return Path(target) async def resolve(self, strict: bool = False) -> 'Path': func = partial(self._path.resolve, strict=strict) return Path(await to_thread.run_sync(func, cancellable=True)) def rglob(self, pattern: str) -> AsyncIterator['Path']: gen = self._path.rglob(pattern) return _PathIterator(gen) async def rmdir(self) -> None: await to_thread.run_sync(self._path.rmdir) async def samefile(self, other_path: Union[str, bytes, int, pathlib.Path, 'Path']) -> bool: if isinstance(other_path, Path): other_path = other_path._path return await to_thread.run_sync(self._path.samefile, other_path, cancellable=True) async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result: func = partial(os.stat, follow_symlinks=follow_symlinks) return await to_thread.run_sync(func, self._path, cancellable=True) async def symlink_to(self, target: Union[str, pathlib.Path, 'Path'], target_is_directory: bool = False) -> None: if isinstance(target, Path): target = target._path await to_thread.run_sync(self._path.symlink_to, target, target_is_directory) async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None: await to_thread.run_sync(self._path.touch, mode, exist_ok) async def unlink(self, missing_ok: bool = False) -> None: try: await to_thread.run_sync(self._path.unlink) except FileNotFoundError: if not missing_ok: raise def with_name(self, name: str) -> 'Path': return Path(self._path.with_name(name)) def with_stem(self, stem: str) -> 'Path': return Path(self._path.with_name(stem + self._path.suffix)) def with_suffix(self, suffix: str) -> 'Path': return Path(self._path.with_suffix(suffix)) async def write_bytes(self, data: bytes) -> int: return await to_thread.run_sync(self._path.write_bytes, data) async def write_text(self, data: str, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None) -> int: # Path.write_text() does not support the "newline" parameter before Python 3.10 def sync_write_text() -> int: with self._path.open('w', encoding=encoding, errors=errors, newline=newline) as fp: return fp.write(data) return await to_thread.run_sync(sync_write_text) PathLike.register(Path) anyio-3.5.0/src/anyio/_core/_resources.py000066400000000000000000000006171416724134300203670ustar00rootroot00000000000000from ..abc import AsyncResource from ._tasks import CancelScope async def aclose_forcefully(resource: AsyncResource) -> None: """ Close an asynchronous resource in a cancelled scope. Doing this closes the resource without waiting on anything. :param resource: the resource to close """ with CancelScope() as scope: scope.cancel() await resource.aclose() anyio-3.5.0/src/anyio/_core/_signals.py000066400000000000000000000014641416724134300200160ustar00rootroot00000000000000from typing import AsyncIterator from ._compat import DeprecatedAsyncContextManager from ._eventloop import get_asynclib def open_signal_receiver(*signals: int) -> DeprecatedAsyncContextManager[AsyncIterator[int]]: """ Start receiving operating system signals. :param signals: signals to receive (e.g. ``signal.SIGINT``) :return: an asynchronous context manager for an asynchronous iterator which yields signal numbers .. warning:: Windows does not support signals natively so it is best to avoid relying on this in cross-platform applications. .. warning:: On asyncio, this permanently replaces any previous signal handler for the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`. """ return get_asynclib().open_signal_receiver(*signals) anyio-3.5.0/src/anyio/_core/_sockets.py000066400000000000000000000465101416724134300200320ustar00rootroot00000000000000import socket import ssl import sys from ipaddress import IPv6Address, ip_address from os import PathLike, chmod from pathlib import Path from socket import AddressFamily, SocketKind from typing import Awaitable, List, Optional, Tuple, Union, cast, overload from .. import to_thread from ..abc import ( ConnectedUDPSocket, IPAddressType, IPSockAddrType, SocketListener, SocketStream, UDPSocket, UNIXSocketStream) from ..streams.stapled import MultiListener from ..streams.tls import TLSStream from ._eventloop import get_asynclib from ._resources import aclose_forcefully from ._synchronization import Event from ._tasks import create_task_group, move_on_after if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal IPPROTO_IPV6 = getattr(socket, 'IPPROTO_IPV6', 41) # https://bugs.python.org/issue29515 GetAddrInfoReturnType = List[Tuple[AddressFamily, SocketKind, int, str, Tuple[str, int]]] AnyIPAddressFamily = Literal[AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6] IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6] # tls_hostname given @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = ..., ssl_context: Optional[ssl.SSLContext] = ..., tls_standard_compatible: bool = ..., tls_hostname: str, happy_eyeballs_delay: float = ... ) -> TLSStream: ... # ssl_context given @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = ..., ssl_context: ssl.SSLContext, tls_standard_compatible: bool = ..., tls_hostname: Optional[str] = ..., happy_eyeballs_delay: float = ... ) -> TLSStream: ... # tls=True @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = ..., tls: Literal[True], ssl_context: Optional[ssl.SSLContext] = ..., tls_standard_compatible: bool = ..., tls_hostname: Optional[str] = ..., happy_eyeballs_delay: float = ... ) -> TLSStream: ... # tls=False @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = ..., tls: Literal[False], ssl_context: Optional[ssl.SSLContext] = ..., tls_standard_compatible: bool = ..., tls_hostname: Optional[str] = ..., happy_eyeballs_delay: float = ... ) -> SocketStream: ... # No TLS arguments @overload async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = ..., happy_eyeballs_delay: float = ... ) -> SocketStream: ... async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = None, tls: bool = False, ssl_context: Optional[ssl.SSLContext] = None, tls_standard_compatible: bool = True, tls_hostname: Optional[str] = None, happy_eyeballs_delay: float = 0.25 ) -> Union[SocketStream, TLSStream]: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: Optional[SocketStream] = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: Optional[IPSockAddrType] = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: addr_obj = ip_address(remote_host) except ValueError: # getaddrinfo() will raise an exception if name resolution fails gai_res = await getaddrinfo(target_host, remote_port, family=family, type=socket.SOCK_STREAM) # Organize the list so that the first address is an IPv6 address (if available) and the # second one is an IPv4 addresses. The rest can be in whatever order. v6_found = v4_found = False target_addrs: List[Tuple[socket.AddressFamily, str]] = [] for af, *rest, sa in gai_res: if af == socket.AF_INET6 and not v6_found: v6_found = True target_addrs.insert(0, (af, sa[0])) elif af == socket.AF_INET and not v4_found and v6_found: v4_found = True target_addrs.insert(1, (af, sa[0])) else: target_addrs.append((af, sa[0])) else: if isinstance(addr_obj, IPv6Address): target_addrs = [(socket.AF_INET6, addr_obj.compressed)] else: target_addrs = [(socket.AF_INET, addr_obj.compressed)] oserrors: List[OSError] = [] async with create_task_group() as tg: for i, (af, addr) in enumerate(target_addrs): event = Event() tg.start_soon(try_connect, addr, event) with move_on_after(happy_eyeballs_delay): await event.wait() if connected_stream is None: cause = oserrors[0] if len(oserrors) == 1 else asynclib.ExceptionGroup(oserrors) raise OSError('All connection attempts failed') from cause if tls or tls_hostname or ssl_context: try: return await TLSStream.wrap(connected_stream, server_side=False, hostname=tls_hostname or str(remote_host), ssl_context=ssl_context, standard_compatible=tls_standard_compatible) except BaseException: await aclose_forcefully(connected_stream) raise return connected_stream async def connect_unix(path: Union[str, 'PathLike[str]']) -> UNIXSocketStream: """ Connect to the given UNIX socket. Not available on Windows. :param path: path to the socket :return: a socket stream object """ path = str(Path(path)) return await get_asynclib().connect_unix(path) async def create_tcp_listener( *, local_host: Optional[IPAddressType] = None, local_port: int = 0, family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC, backlog: int = 65536, reuse_port: bool = False ) -> MultiListener[SocketStream]: """ Create a TCP socket listener. :param local_port: port number to listen on :param local_host: IP address of the interface to listen on. If omitted, listen on all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6. :param family: address family (used if ``interface`` was omitted) :param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or 65536) :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port (not supported on Windows) :return: a list of listener objects """ asynclib = get_asynclib() backlog = min(backlog, 65536) local_host = str(local_host) if local_host is not None else None gai_res = await getaddrinfo(local_host, local_port, family=family, # type: ignore[arg-type] type=socket.SOCK_STREAM, flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG) listeners: List[SocketListener] = [] try: # The set() is here to work around a glibc bug: # https://sourceware.org/bugzilla/show_bug.cgi?id=14969 for fam, *_, sockaddr in sorted(set(gai_res)): raw_socket = socket.socket(fam) raw_socket.setblocking(False) # For Windows, enable exclusive address use. For others, enable address reuse. if sys.platform == 'win32': raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) else: raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if reuse_port: raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # If only IPv6 was requested, disable dual stack operation if fam == socket.AF_INET6: raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) raw_socket.bind(sockaddr) raw_socket.listen(backlog) listener = asynclib.TCPSocketListener(raw_socket) listeners.append(listener) except BaseException: for listener in listeners: await listener.aclose() raise return MultiListener(listeners) async def create_unix_listener( path: Union[str, 'PathLike[str]'], *, mode: Optional[int] = None, backlog: int = 65536) -> SocketListener: """ Create a UNIX socket listener. Not available on Windows. :param path: path of the socket :param mode: permissions to set on the socket :param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or 65536) :return: a listener object .. versionchanged:: 3.0 If a socket already exists on the file system in the given path, it will be removed first. """ path_str = str(path) path = Path(path) if path.is_socket(): path.unlink() backlog = min(backlog, 65536) raw_socket = socket.socket(socket.AF_UNIX) raw_socket.setblocking(False) try: await to_thread.run_sync(raw_socket.bind, path_str, cancellable=True) if mode is not None: await to_thread.run_sync(chmod, path_str, mode, cancellable=True) raw_socket.listen(backlog) return get_asynclib().UNIXSocketListener(raw_socket) except BaseException: raw_socket.close() raise async def create_udp_socket( family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, *, local_host: Optional[IPAddressType] = None, local_port: int = 0, reuse_port: bool = False ) -> UDPSocket: """ Create a UDP socket. If ``port`` has been given, the socket will be bound to this port on the local machine, making this socket suitable for providing UDP based services. :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from ``local_host`` if omitted :param local_host: IP address or host name of the local interface to bind to :param local_port: local port to bind to :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port (not supported on Windows) :return: a UDP socket """ if family is AddressFamily.AF_UNSPEC and not local_host: raise ValueError('Either "family" or "local_host" must be given') if local_host: gai_res = await getaddrinfo(str(local_host), local_port, family=family, type=socket.SOCK_DGRAM, flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG) family = cast(AnyIPAddressFamily, gai_res[0][0]) local_address = gai_res[0][-1] elif family is AddressFamily.AF_INET6: local_address = ('::', 0) else: local_address = ('0.0.0.0', 0) return await get_asynclib().create_udp_socket(family, local_address, None, reuse_port) async def create_connected_udp_socket( remote_host: IPAddressType, remote_port: int, *, family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, local_host: Optional[IPAddressType] = None, local_port: int = 0, reuse_port: bool = False ) -> ConnectedUDPSocket: """ Create a connected UDP socket. Connected UDP sockets can only communicate with the specified remote host/port, and any packets sent from other sources are dropped. :param remote_host: remote host to set as the default target :param remote_port: port on the remote host to set as the default target :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from ``local_host`` or ``remote_host`` if omitted :param local_host: IP address or host name of the local interface to bind to :param local_port: local port to bind to :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port (not supported on Windows) :return: a connected UDP socket """ local_address = None if local_host: gai_res = await getaddrinfo(str(local_host), local_port, family=family, type=socket.SOCK_DGRAM, flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG) family = cast(AnyIPAddressFamily, gai_res[0][0]) local_address = gai_res[0][-1] gai_res = await getaddrinfo(str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM) family = cast(AnyIPAddressFamily, gai_res[0][0]) remote_address = gai_res[0][-1] return await get_asynclib().create_udp_socket(family, local_address, remote_address, reuse_port) async def getaddrinfo(host: Union[bytearray, bytes, str], port: Union[str, int, None], *, family: Union[int, AddressFamily] = 0, type: Union[int, SocketKind] = 0, proto: int = 0, flags: int = 0) -> GetAddrInfoReturnType: """ Look up a numeric IP address given a host name. Internationalized domain names are translated according to the (non-transitional) IDNA 2008 standard. .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of (host, port), unlike what :func:`socket.getaddrinfo` does. :param host: host name :param port: port number :param family: socket family (`'AF_INET``, ...) :param type: socket type (``SOCK_STREAM``, ...) :param proto: protocol number :param flags: flags to pass to upstream ``getaddrinfo()`` :return: list of tuples containing (family, type, proto, canonname, sockaddr) .. seealso:: :func:`socket.getaddrinfo` """ # Handle unicode hostnames if isinstance(host, str): try: encoded_host = host.encode('ascii') except UnicodeEncodeError: import idna encoded_host = idna.encode(host, uts46=True) else: encoded_host = host gai_res = await get_asynclib().getaddrinfo(encoded_host, port, family=family, type=type, proto=proto, flags=flags) return [(family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr)) for family, type, proto, canonname, sockaddr in gai_res] def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[Tuple[str, str]]: """ Look up the host name of an IP address. :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4) :param flags: flags to pass to upstream ``getnameinfo()`` :return: a tuple of (host name, service name) .. seealso:: :func:`socket.getnameinfo` """ return get_asynclib().getnameinfo(sockaddr, flags) def wait_socket_readable(sock: socket.socket) -> Awaitable[None]: """ Wait until the given socket has data to be read. This does **NOT** work on Windows when using the asyncio backend with a proactor event loop (default on py3.8+). .. warning:: Only use this on raw sockets that have not been wrapped by any higher level constructs like socket streams! :param sock: a socket object :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the socket to become readable :raises ~anyio.BusyResourceError: if another task is already waiting for the socket to become readable """ return get_asynclib().wait_socket_readable(sock) def wait_socket_writable(sock: socket.socket) -> Awaitable[None]: """ Wait until the given socket can be written to. This does **NOT** work on Windows when using the asyncio backend with a proactor event loop (default on py3.8+). .. warning:: Only use this on raw sockets that have not been wrapped by any higher level constructs like socket streams! :param sock: a socket object :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the socket to become writable :raises ~anyio.BusyResourceError: if another task is already waiting for the socket to become writable """ return get_asynclib().wait_socket_writable(sock) # # Private API # def convert_ipv6_sockaddr( sockaddr: Union[Tuple[str, int, int, int], Tuple[str, int]] ) -> Tuple[str, int]: """ Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format. If the scope ID is nonzero, it is added to the address, separated with ``%``. Otherwise the flow id and scope id are simply cut off from the tuple. Any other kinds of socket addresses are returned as-is. :param sockaddr: the result of :meth:`~socket.socket.getsockname` :return: the converted socket address """ # This is more complicated than it should be because of MyPy if isinstance(sockaddr, tuple) and len(sockaddr) == 4: host, port, flowinfo, scope_id = cast(Tuple[str, int, int, int], sockaddr) if scope_id: # Add scope_id to the address return f"{host}%{scope_id}", port else: return host, port else: return cast(Tuple[str, int], sockaddr) anyio-3.5.0/src/anyio/_core/_streams.py000066400000000000000000000027131416724134300200320ustar00rootroot00000000000000import math from typing import Any, Optional, Tuple, Type, TypeVar, overload from ..streams.memory import ( MemoryObjectReceiveStream, MemoryObjectSendStream, MemoryObjectStreamState) T_Item = TypeVar('T_Item') @overload def create_memory_object_stream( max_buffer_size: float, item_type: Type[T_Item] ) -> Tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]: ... @overload def create_memory_object_stream( max_buffer_size: float = 0 ) -> Tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: ... def create_memory_object_stream( max_buffer_size: float = 0, item_type: Optional[Type[T_Item]] = None ) -> Tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]: """ Create a memory object stream. :param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking :param item_type: type of item, for marking the streams with the right generic type for static typing (not used at run time) :return: a tuple of (send stream, receive stream) """ if max_buffer_size != math.inf and not isinstance(max_buffer_size, int): raise ValueError('max_buffer_size must be either an integer or math.inf') if max_buffer_size < 0: raise ValueError('max_buffer_size cannot be negative') state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size) return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state) anyio-3.5.0/src/anyio/_core/_subprocesses.py000066400000000000000000000114051416724134300210720ustar00rootroot00000000000000from io import BytesIO from os import PathLike from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess from typing import AsyncIterable, List, Mapping, Optional, Sequence, Union, cast from ..abc import Process from ._eventloop import get_asynclib from ._tasks import create_task_group async def run_process(command: Union[str, Sequence[str]], *, input: Optional[bytes] = None, stdout: int = PIPE, stderr: int = PIPE, check: bool = True, cwd: Union[str, bytes, 'PathLike[str]', None] = None, env: Optional[Mapping[str, str]] = None, start_new_session: bool = False, ) -> 'CompletedProcess[bytes]': """ Run an external command in a subprocess and wait until it completes. .. seealso:: :func:`subprocess.run` :param command: either a string to pass to the shell, or an iterable of strings containing the executable name or path and its arguments :param input: bytes passed to the standard input of the subprocess :param stdout: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL` :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL` or :data:`subprocess.STDOUT` :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the process terminates with a return code other than 0 :param cwd: If not ``None``, change the working directory to this before running the command :param env: if not ``None``, this mapping replaces the inherited environment variables from the parent process :param start_new_session: if ``true`` the setsid() system call will be made in the child process prior to the execution of the subprocess. (POSIX only) :return: an object representing the completed process :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process exits with a nonzero return code """ async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None: buffer = BytesIO() async for chunk in stream: buffer.write(chunk) stream_contents[index] = buffer.getvalue() async with await open_process(command, stdin=PIPE if input else DEVNULL, stdout=stdout, stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session) as process: stream_contents: List[Optional[bytes]] = [None, None] try: async with create_task_group() as tg: if process.stdout: tg.start_soon(drain_stream, process.stdout, 0) if process.stderr: tg.start_soon(drain_stream, process.stderr, 1) if process.stdin and input: await process.stdin.send(input) await process.stdin.aclose() await process.wait() except BaseException: process.kill() raise output, errors = stream_contents if check and process.returncode != 0: raise CalledProcessError(cast(int, process.returncode), command, output, errors) return CompletedProcess(command, cast(int, process.returncode), output, errors) async def open_process(command: Union[str, Sequence[str]], *, stdin: int = PIPE, stdout: int = PIPE, stderr: int = PIPE, cwd: Union[str, bytes, 'PathLike[str]', None] = None, env: Optional[Mapping[str, str]] = None, start_new_session: bool = False) -> Process: """ Start an external command in a subprocess. .. seealso:: :class:`subprocess.Popen` :param command: either a string to pass to the shell, or an iterable of strings containing the executable name or path and its arguments :param stdin: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL` :param stdout: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL` :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL` or :data:`subprocess.STDOUT` :param cwd: If not ``None``, the working directory is changed before executing :param env: If env is not ``None``, it must be a mapping that defines the environment variables for the new process :param start_new_session: if ``true`` the setsid() system call will be made in the child process prior to the execution of the subprocess. (POSIX only) :return: an asynchronous process object """ shell = isinstance(command, str) return await get_asynclib().open_process(command, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env, start_new_session=start_new_session) anyio-3.5.0/src/anyio/_core/_synchronization.py000066400000000000000000000405201416724134300216130ustar00rootroot00000000000000from collections import deque from dataclasses import dataclass from types import TracebackType from typing import Deque, Optional, Tuple, Type from warnings import warn from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled from ._compat import DeprecatedAwaitable from ._eventloop import get_asynclib from ._exceptions import BusyResourceError, WouldBlock from ._tasks import CancelScope from ._testing import TaskInfo, get_current_task @dataclass(frozen=True) class EventStatistics: """ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait` """ tasks_waiting: int @dataclass(frozen=True) class CapacityLimiterStatistics: """ :ivar int borrowed_tokens: number of tokens currently borrowed by tasks :ivar float total_tokens: total number of available tokens :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this limiter :ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or :meth:`~.CapacityLimiter.acquire_on_behalf_of` """ borrowed_tokens: int total_tokens: float borrowers: Tuple[object, ...] tasks_waiting: int @dataclass(frozen=True) class LockStatistics: """ :ivar bool locked: flag indicating if this lock is locked or not :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not held by any task) :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire` """ locked: bool owner: Optional[TaskInfo] tasks_waiting: int @dataclass(frozen=True) class ConditionStatistics: """ :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait` :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock` """ tasks_waiting: int lock_statistics: LockStatistics @dataclass(frozen=True) class SemaphoreStatistics: """ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire` """ tasks_waiting: int class Event: def __new__(cls) -> 'Event': return get_asynclib().Event() def set(self) -> DeprecatedAwaitable: """Set the flag, notifying all listeners.""" raise NotImplementedError def is_set(self) -> bool: """Return ``True`` if the flag is set, ``False`` if not.""" raise NotImplementedError async def wait(self) -> None: """ Wait until the flag has been set. If the flag has already been set when this method is called, it returns immediately. """ raise NotImplementedError def statistics(self) -> EventStatistics: """Return statistics about the current state of this event.""" raise NotImplementedError class Lock: _owner_task: Optional[TaskInfo] = None def __init__(self) -> None: self._waiters: Deque[Tuple[TaskInfo, Event]] = deque() async def __aenter__(self) -> None: await self.acquire() async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: self.release() async def acquire(self) -> None: """Acquire the lock.""" await checkpoint_if_cancelled() try: self.acquire_nowait() except WouldBlock: task = get_current_task() event = Event() token = task, event self._waiters.append(token) try: await event.wait() except BaseException: if not event.is_set(): self._waiters.remove(token) elif self._owner_task == task: self.release() raise assert self._owner_task == task else: try: await cancel_shielded_checkpoint() except BaseException: self.release() raise def acquire_nowait(self) -> None: """ Acquire the lock, without blocking. :raises ~WouldBlock: if the operation would block """ task = get_current_task() if self._owner_task == task: raise RuntimeError('Attempted to acquire an already held Lock') if self._owner_task is not None: raise WouldBlock self._owner_task = task def release(self) -> DeprecatedAwaitable: """Release the lock.""" if self._owner_task != get_current_task(): raise RuntimeError('The current task is not holding this lock') if self._waiters: self._owner_task, event = self._waiters.popleft() event.set() else: del self._owner_task return DeprecatedAwaitable(self.release) def locked(self) -> bool: """Return True if the lock is currently held.""" return self._owner_task is not None def statistics(self) -> LockStatistics: """ Return statistics about the current state of this lock. .. versionadded:: 3.0 """ return LockStatistics(self.locked(), self._owner_task, len(self._waiters)) class Condition: _owner_task: Optional[TaskInfo] = None def __init__(self, lock: Optional[Lock] = None): self._lock = lock or Lock() self._waiters: Deque[Event] = deque() async def __aenter__(self) -> None: await self.acquire() async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: self.release() def _check_acquired(self) -> None: if self._owner_task != get_current_task(): raise RuntimeError('The current task is not holding the underlying lock') async def acquire(self) -> None: """Acquire the underlying lock.""" await self._lock.acquire() self._owner_task = get_current_task() def acquire_nowait(self) -> None: """ Acquire the underlying lock, without blocking. :raises ~WouldBlock: if the operation would block """ self._lock.acquire_nowait() self._owner_task = get_current_task() def release(self) -> DeprecatedAwaitable: """Release the underlying lock.""" self._lock.release() return DeprecatedAwaitable(self.release) def locked(self) -> bool: """Return True if the lock is set.""" return self._lock.locked() def notify(self, n: int = 1) -> None: """Notify exactly n listeners.""" self._check_acquired() for _ in range(n): try: event = self._waiters.popleft() except IndexError: break event.set() def notify_all(self) -> None: """Notify all the listeners.""" self._check_acquired() for event in self._waiters: event.set() self._waiters.clear() async def wait(self) -> None: """Wait for a notification.""" await checkpoint() event = Event() self._waiters.append(event) self.release() try: await event.wait() except BaseException: if not event.is_set(): self._waiters.remove(event) raise finally: with CancelScope(shield=True): await self.acquire() def statistics(self) -> ConditionStatistics: """ Return statistics about the current state of this condition. .. versionadded:: 3.0 """ return ConditionStatistics(len(self._waiters), self._lock.statistics()) class Semaphore: def __init__(self, initial_value: int, *, max_value: Optional[int] = None): if not isinstance(initial_value, int): raise TypeError('initial_value must be an integer') if initial_value < 0: raise ValueError('initial_value must be >= 0') if max_value is not None: if not isinstance(max_value, int): raise TypeError('max_value must be an integer or None') if max_value < initial_value: raise ValueError('max_value must be equal to or higher than initial_value') self._value = initial_value self._max_value = max_value self._waiters: Deque[Event] = deque() async def __aenter__(self) -> 'Semaphore': await self.acquire() return self async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: self.release() async def acquire(self) -> None: """Decrement the semaphore value, blocking if necessary.""" await checkpoint_if_cancelled() try: self.acquire_nowait() except WouldBlock: event = Event() self._waiters.append(event) try: await event.wait() except BaseException: if not event.is_set(): self._waiters.remove(event) else: self.release() raise else: try: await cancel_shielded_checkpoint() except BaseException: self.release() raise def acquire_nowait(self) -> None: """ Acquire the underlying lock, without blocking. :raises ~WouldBlock: if the operation would block """ if self._value == 0: raise WouldBlock self._value -= 1 def release(self) -> DeprecatedAwaitable: """Increment the semaphore value.""" if self._max_value is not None and self._value == self._max_value: raise ValueError('semaphore released too many times') if self._waiters: self._waiters.popleft().set() else: self._value += 1 return DeprecatedAwaitable(self.release) @property def value(self) -> int: """The current value of the semaphore.""" return self._value @property def max_value(self) -> Optional[int]: """The maximum value of the semaphore.""" return self._max_value def statistics(self) -> SemaphoreStatistics: """ Return statistics about the current state of this semaphore. .. versionadded:: 3.0 """ return SemaphoreStatistics(len(self._waiters)) class CapacityLimiter: def __new__(cls, total_tokens: float) -> 'CapacityLimiter': return get_asynclib().CapacityLimiter(total_tokens) async def __aenter__(self) -> None: raise NotImplementedError async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: raise NotImplementedError @property def total_tokens(self) -> float: """ The total number of tokens available for borrowing. This is a read-write property. If the total number of tokens is increased, the proportionate number of tasks waiting on this limiter will be granted their tokens. .. versionchanged:: 3.0 The property is now writable. """ raise NotImplementedError @total_tokens.setter def total_tokens(self, value: float) -> None: raise NotImplementedError async def set_total_tokens(self, value: float) -> None: warn('CapacityLimiter.set_total_tokens has been deprecated. Set the value of the' '"total_tokens" attribute directly.', DeprecationWarning) self.total_tokens = value @property def borrowed_tokens(self) -> int: """The number of tokens that have currently been borrowed.""" raise NotImplementedError @property def available_tokens(self) -> float: """The number of tokens currently available to be borrowed""" raise NotImplementedError def acquire_nowait(self) -> DeprecatedAwaitable: """ Acquire a token for the current task without waiting for one to become available. :raises ~anyio.WouldBlock: if there are no tokens available for borrowing """ raise NotImplementedError def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: """ Acquire a token without waiting for one to become available. :param borrower: the entity borrowing a token :raises ~anyio.WouldBlock: if there are no tokens available for borrowing """ raise NotImplementedError async def acquire(self) -> None: """ Acquire a token for the current task, waiting if necessary for one to become available. """ raise NotImplementedError async def acquire_on_behalf_of(self, borrower: object) -> None: """ Acquire a token, waiting if necessary for one to become available. :param borrower: the entity borrowing a token """ raise NotImplementedError def release(self) -> None: """ Release the token held by the current task. :raises RuntimeError: if the current task has not borrowed a token from this limiter. """ raise NotImplementedError def release_on_behalf_of(self, borrower: object) -> None: """ Release the token held by the given borrower. :raises RuntimeError: if the borrower has not borrowed a token from this limiter. """ raise NotImplementedError def statistics(self) -> CapacityLimiterStatistics: """ Return statistics about the current state of this limiter. .. versionadded:: 3.0 """ raise NotImplementedError def create_lock() -> Lock: """ Create an asynchronous lock. :return: a lock object .. deprecated:: 3.0 Use :class:`~Lock` directly. """ warn('create_lock() is deprecated -- use Lock() directly', DeprecationWarning) return Lock() def create_condition(lock: Optional[Lock] = None) -> Condition: """ Create an asynchronous condition. :param lock: the lock to base the condition object on :return: a condition object .. deprecated:: 3.0 Use :class:`~Condition` directly. """ warn('create_condition() is deprecated -- use Condition() directly', DeprecationWarning) return Condition(lock=lock) def create_event() -> Event: """ Create an asynchronous event object. :return: an event object .. deprecated:: 3.0 Use :class:`~Event` directly. """ warn('create_event() is deprecated -- use Event() directly', DeprecationWarning) return get_asynclib().Event() def create_semaphore(value: int, *, max_value: Optional[int] = None) -> Semaphore: """ Create an asynchronous semaphore. :param value: the semaphore's initial value :param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the semaphore's value would exceed this number :return: a semaphore object .. deprecated:: 3.0 Use :class:`~Semaphore` directly. """ warn('create_semaphore() is deprecated -- use Semaphore() directly', DeprecationWarning) return Semaphore(value, max_value=max_value) def create_capacity_limiter(total_tokens: float) -> CapacityLimiter: """ Create a capacity limiter. :param total_tokens: the total number of tokens available for borrowing (can be an integer or :data:`math.inf`) :return: a capacity limiter object .. deprecated:: 3.0 Use :class:`~CapacityLimiter` directly. """ warn('create_capacity_limiter() is deprecated -- use CapacityLimiter() directly', DeprecationWarning) return get_asynclib().CapacityLimiter(total_tokens) class ResourceGuard: __slots__ = 'action', '_guarded' def __init__(self, action: str): self.action = action self._guarded = False def __enter__(self) -> None: if self._guarded: raise BusyResourceError(self.action) self._guarded = True def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: self._guarded = False return None anyio-3.5.0/src/anyio/_core/_tasks.py000066400000000000000000000121171416724134300175000ustar00rootroot00000000000000import math from types import TracebackType from typing import Optional, Type from warnings import warn from ..abc._tasks import TaskGroup, TaskStatus from ._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable, DeprecatedAwaitableFloat from ._eventloop import get_asynclib class _IgnoredTaskStatus(TaskStatus): def started(self, value: object = None) -> None: pass TASK_STATUS_IGNORED = _IgnoredTaskStatus() class CancelScope(DeprecatedAsyncContextManager['CancelScope']): """ Wraps a unit of work that can be made separately cancellable. :param deadline: The time (clock value) when this scope is cancelled automatically :param shield: ``True`` to shield the cancel scope from external cancellation """ def __new__(cls, *, deadline: float = math.inf, shield: bool = False) -> 'CancelScope': return get_asynclib().CancelScope(shield=shield, deadline=deadline) def cancel(self) -> DeprecatedAwaitable: """Cancel this scope immediately.""" raise NotImplementedError @property def deadline(self) -> float: """ The time (clock value) when this scope is cancelled automatically. Will be ``float('inf')`` if no timeout has been set. """ raise NotImplementedError @deadline.setter def deadline(self, value: float) -> None: raise NotImplementedError @property def cancel_called(self) -> bool: """``True`` if :meth:`cancel` has been called.""" raise NotImplementedError @property def shield(self) -> bool: """ ``True`` if this scope is shielded from external cancellation. While a scope is shielded, it will not receive cancellations from outside. """ raise NotImplementedError @shield.setter def shield(self, value: bool) -> None: raise NotImplementedError def __enter__(self) -> 'CancelScope': raise NotImplementedError def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: raise NotImplementedError def open_cancel_scope(*, shield: bool = False) -> CancelScope: """ Open a cancel scope. :param shield: ``True`` to shield the cancel scope from external cancellation :return: a cancel scope .. deprecated:: 3.0 Use :class:`~CancelScope` directly. """ warn('open_cancel_scope() is deprecated -- use CancelScope() directly', DeprecationWarning) return get_asynclib().CancelScope(shield=shield) class FailAfterContextManager(DeprecatedAsyncContextManager[CancelScope]): def __init__(self, cancel_scope: CancelScope): self._cancel_scope = cancel_scope def __enter__(self) -> CancelScope: return self._cancel_scope.__enter__() def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: retval = self._cancel_scope.__exit__(exc_type, exc_val, exc_tb) if self._cancel_scope.cancel_called: raise TimeoutError return retval def fail_after(delay: Optional[float], shield: bool = False) -> FailAfterContextManager: """ Create a context manager which raises a :class:`TimeoutError` if does not finish in time. :param delay: maximum allowed time (in seconds) before raising the exception, or ``None`` to disable the timeout :param shield: ``True`` to shield the cancel scope from external cancellation :return: a context manager that yields a cancel scope :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.abc.CancelScope`\\] """ deadline = (get_asynclib().current_time() + delay) if delay is not None else math.inf cancel_scope = get_asynclib().CancelScope(deadline=deadline, shield=shield) return FailAfterContextManager(cancel_scope) def move_on_after(delay: Optional[float], shield: bool = False) -> CancelScope: """ Create a cancel scope with a deadline that expires after the given delay. :param delay: maximum allowed time (in seconds) before exiting the context block, or ``None`` to disable the timeout :param shield: ``True`` to shield the cancel scope from external cancellation :return: a cancel scope """ deadline = (get_asynclib().current_time() + delay) if delay is not None else math.inf return get_asynclib().CancelScope(deadline=deadline, shield=shield) def current_effective_deadline() -> DeprecatedAwaitableFloat: """ Return the nearest deadline among all the cancel scopes effective for the current task. :return: a clock value from the event loop's internal clock (``float('inf')`` if there is no deadline in effect) :rtype: float """ return DeprecatedAwaitableFloat(get_asynclib().current_effective_deadline(), current_effective_deadline) def create_task_group() -> 'TaskGroup': """ Create a task group. :return: a task group """ return get_asynclib().TaskGroup() anyio-3.5.0/src/anyio/_core/_testing.py000066400000000000000000000041661416724134300200350ustar00rootroot00000000000000from typing import Any, Awaitable, Generator, Optional, Union from ._compat import DeprecatedAwaitableList, _warn_deprecation from ._eventloop import get_asynclib class TaskInfo: """ Represents an asynchronous task. :ivar int id: the unique identifier of the task :ivar parent_id: the identifier of the parent task, if any :vartype parent_id: Optional[int] :ivar str name: the description of the task (if any) :ivar ~collections.abc.Coroutine coro: the coroutine object of the task """ __slots__ = '_name', 'id', 'parent_id', 'name', 'coro' def __init__(self, id: int, parent_id: Optional[int], name: Optional[str], coro: Union[Generator, Awaitable[Any]]): func = get_current_task self._name = f'{func.__module__}.{func.__qualname__}' self.id: int = id self.parent_id: Optional[int] = parent_id self.name: Optional[str] = name self.coro: Union[Generator, Awaitable[Any]] = coro def __eq__(self, other: object) -> bool: if isinstance(other, TaskInfo): return self.id == other.id return NotImplemented def __hash__(self) -> int: return hash(self.id) def __repr__(self) -> str: return f'{self.__class__.__name__}(id={self.id!r}, name={self.name!r})' def __await__(self) -> Generator[None, None, "TaskInfo"]: _warn_deprecation(self) if False: yield return self def _unwrap(self) -> 'TaskInfo': return self def get_current_task() -> TaskInfo: """ Return the current task. :return: a representation of the current task """ return get_asynclib().get_current_task() def get_running_tasks() -> DeprecatedAwaitableList[TaskInfo]: """ Return a list of running tasks in the current event loop. :return: a list of task info objects """ tasks = get_asynclib().get_running_tasks() return DeprecatedAwaitableList(tasks, func=get_running_tasks) async def wait_all_tasks_blocked() -> None: """Wait until all other tasks are waiting for something.""" await get_asynclib().wait_all_tasks_blocked() anyio-3.5.0/src/anyio/_core/_typedattr.py000066400000000000000000000047001416724134300203720ustar00rootroot00000000000000import sys from typing import Any, Callable, Dict, Mapping, TypeVar, Union, overload from ._exceptions import TypedAttributeLookupError if sys.version_info >= (3, 8): from typing import final else: from typing_extensions import final T_Attr = TypeVar('T_Attr') T_Default = TypeVar('T_Default') undefined = object() def typed_attribute() -> Any: """Return a unique object, used to mark typed attributes.""" return object() class TypedAttributeSet: """ Superclass for typed attribute collections. Checks that every public attribute of every subclass has a type annotation. """ def __init_subclass__(cls) -> None: annotations: Dict[str, Any] = getattr(cls, '__annotations__', {}) for attrname in dir(cls): if not attrname.startswith('_') and attrname not in annotations: raise TypeError(f'Attribute {attrname!r} is missing its type annotation') super().__init_subclass__() class TypedAttributeProvider: """Base class for classes that wish to provide typed extra attributes.""" @property def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]: """ A mapping of the extra attributes to callables that return the corresponding values. If the provider wraps another provider, the attributes from that wrapper should also be included in the returned mapping (but the wrapper may override the callables from the wrapped instance). """ return {} @overload def extra(self, attribute: T_Attr) -> T_Attr: ... @overload def extra(self, attribute: T_Attr, default: T_Default) -> Union[T_Attr, T_Default]: ... @final def extra(self, attribute: Any, default: object = undefined) -> object: """ extra(attribute, default=undefined) Return the value of the given typed extra attribute. :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to look for :param default: the value that should be returned if no value is found for the attribute :raises ~anyio.TypedAttributeLookupError: if the search failed and no default value was given """ try: return self.extra_attributes[attribute]() except KeyError: if default is undefined: raise TypedAttributeLookupError('Attribute not found') from None else: return default anyio-3.5.0/src/anyio/abc/000077500000000000000000000000001416724134300152765ustar00rootroot00000000000000anyio-3.5.0/src/anyio/abc/__init__.py000066400000000000000000000036741416724134300174210ustar00rootroot00000000000000__all__ = ('AsyncResource', 'IPAddressType', 'IPSockAddrType', 'SocketAttribute', 'SocketStream', 'SocketListener', 'UDPSocket', 'UNIXSocketStream', 'UDPPacketType', 'ConnectedUDPSocket', 'UnreliableObjectReceiveStream', 'UnreliableObjectSendStream', 'UnreliableObjectStream', 'ObjectReceiveStream', 'ObjectSendStream', 'ObjectStream', 'ByteReceiveStream', 'ByteSendStream', 'ByteStream', 'AnyUnreliableByteReceiveStream', 'AnyUnreliableByteSendStream', 'AnyUnreliableByteStream', 'AnyByteReceiveStream', 'AnyByteSendStream', 'AnyByteStream', 'Listener', 'Process', 'Event', 'Condition', 'Lock', 'Semaphore', 'CapacityLimiter', 'CancelScope', 'TaskGroup', 'TaskStatus', 'TestRunner', 'BlockingPortal') from typing import Any from ._resources import AsyncResource from ._sockets import ( ConnectedUDPSocket, IPAddressType, IPSockAddrType, SocketAttribute, SocketListener, SocketStream, UDPPacketType, UDPSocket, UNIXSocketStream) from ._streams import ( AnyByteReceiveStream, AnyByteSendStream, AnyByteStream, AnyUnreliableByteReceiveStream, AnyUnreliableByteSendStream, AnyUnreliableByteStream, ByteReceiveStream, ByteSendStream, ByteStream, Listener, ObjectReceiveStream, ObjectSendStream, ObjectStream, UnreliableObjectReceiveStream, UnreliableObjectSendStream, UnreliableObjectStream) from ._subprocesses import Process from ._tasks import TaskGroup, TaskStatus from ._testing import TestRunner # Re-exported here, for backwards compatibility # isort: off from .._core._synchronization import CapacityLimiter, Condition, Event, Lock, Semaphore from .._core._tasks import CancelScope from ..from_thread import BlockingPortal # Re-export imports so they look like they live directly in this package key: str value: Any for key, value in list(locals().items()): if getattr(value, '__module__', '').startswith('anyio.abc.'): value.__module__ = __name__ anyio-3.5.0/src/anyio/abc/_resources.py000066400000000000000000000013711416724134300200230ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod from types import TracebackType from typing import Optional, Type, TypeVar T = TypeVar("T") class AsyncResource(metaclass=ABCMeta): """ Abstract base class for all closeable asynchronous resources. Works as an asynchronous context manager which returns the instance itself on enter, and calls :meth:`aclose` on exit. """ async def __aenter__(self: T) -> T: return self async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: await self.aclose() @abstractmethod async def aclose(self) -> None: """Close the resource.""" anyio-3.5.0/src/anyio/abc/_sockets.py000066400000000000000000000127451416724134300174730ustar00rootroot00000000000000import socket from abc import abstractmethod from io import IOBase from ipaddress import IPv4Address, IPv6Address from socket import AddressFamily from types import TracebackType from typing import ( Any, AsyncContextManager, Callable, Collection, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union) from .._core._typedattr import TypedAttributeProvider, TypedAttributeSet, typed_attribute from ._streams import ByteStream, Listener, T_Stream, UnreliableObjectStream from ._tasks import TaskGroup IPAddressType = Union[str, IPv4Address, IPv6Address] IPSockAddrType = Tuple[str, int] SockAddrType = Union[IPSockAddrType, str] UDPPacketType = Tuple[bytes, IPSockAddrType] T_Retval = TypeVar('T_Retval') class _NullAsyncContextManager: async def __aenter__(self) -> None: pass async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: return None class SocketAttribute(TypedAttributeSet): #: the address family of the underlying socket family: AddressFamily = typed_attribute() #: the local socket address of the underlying socket local_address: SockAddrType = typed_attribute() #: for IP addresses, the local port the underlying socket is bound to local_port: int = typed_attribute() #: the underlying stdlib socket object raw_socket: socket.socket = typed_attribute() #: the remote address the underlying socket is connected to remote_address: SockAddrType = typed_attribute() #: for IP addresses, the remote port the underlying socket is connected to remote_port: int = typed_attribute() class _SocketProvider(TypedAttributeProvider): @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: from .._core._sockets import convert_ipv6_sockaddr as convert attributes: Dict[Any, Callable[[], Any]] = { SocketAttribute.family: lambda: self._raw_socket.family, SocketAttribute.local_address: lambda: convert(self._raw_socket.getsockname()), SocketAttribute.raw_socket: lambda: self._raw_socket } try: peername: Optional[Tuple[str, int]] = convert(self._raw_socket.getpeername()) except OSError: peername = None # Provide the remote address for connected sockets if peername is not None: attributes[SocketAttribute.remote_address] = lambda: peername # Provide local and remote ports for IP based sockets if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6): attributes[SocketAttribute.local_port] = lambda: self._raw_socket.getsockname()[1] if peername is not None: remote_port = peername[1] attributes[SocketAttribute.remote_port] = lambda: remote_port return attributes @property @abstractmethod def _raw_socket(self) -> socket.socket: pass class SocketStream(ByteStream, _SocketProvider): """ Transports bytes over a socket. Supports all relevant extra attributes from :class:`~SocketAttribute`. """ class UNIXSocketStream(SocketStream): @abstractmethod async def send_fds(self, message: bytes, fds: Collection[Union[int, IOBase]]) -> None: """ Send file descriptors along with a message to the peer. :param message: a non-empty bytestring :param fds: a collection of files (either numeric file descriptors or open file or socket objects) """ @abstractmethod async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]: """ Receive file descriptors along with a message from the peer. :param msglen: length of the message to expect from the peer :param maxfds: maximum number of file descriptors to expect from the peer :return: a tuple of (message, file descriptors) """ class SocketListener(Listener[SocketStream], _SocketProvider): """ Listens to incoming socket connections. Supports all relevant extra attributes from :class:`~SocketAttribute`. """ @abstractmethod async def accept(self) -> SocketStream: """Accept an incoming connection.""" async def serve(self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None) -> None: from .. import create_task_group context_manager: AsyncContextManager if task_group is None: task_group = context_manager = create_task_group() else: # Can be replaced with AsyncExitStack once on py3.7+ context_manager = _NullAsyncContextManager() async with context_manager: while True: stream = await self.accept() task_group.start_soon(handler, stream) class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider): """ Represents an unconnected UDP socket. Supports all relevant extra attributes from :class:`~SocketAttribute`. """ async def sendto(self, data: bytes, host: str, port: int) -> None: """Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).""" return await self.send((data, (host, port))) class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider): """ Represents an connected UDP socket. Supports all relevant extra attributes from :class:`~SocketAttribute`. """ anyio-3.5.0/src/anyio/abc/_streams.py000066400000000000000000000145641416724134300174770ustar00rootroot00000000000000from abc import abstractmethod from typing import Any, Callable, Generic, Optional, TypeVar, Union from .._core._exceptions import EndOfStream from .._core._typedattr import TypedAttributeProvider from ._resources import AsyncResource from ._tasks import TaskGroup T_Item = TypeVar('T_Item') T_Stream = TypeVar('T_Stream') class UnreliableObjectReceiveStream(Generic[T_Item], AsyncResource, TypedAttributeProvider): """ An interface for receiving objects. This interface makes no guarantees that the received messages arrive in the order in which they were sent, or that no messages are missed. Asynchronously iterating over objects of this type will yield objects matching the given type parameter. """ def __aiter__(self) -> "UnreliableObjectReceiveStream[T_Item]": return self async def __anext__(self) -> T_Item: try: return await self.receive() except EndOfStream: raise StopAsyncIteration @abstractmethod async def receive(self) -> T_Item: """ Receive the next item. :raises ~anyio.ClosedResourceError: if the receive stream has been explicitly closed :raises ~anyio.EndOfStream: if this stream has been closed from the other end :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable due to external causes """ class UnreliableObjectSendStream(Generic[T_Item], AsyncResource, TypedAttributeProvider): """ An interface for sending objects. This interface makes no guarantees that the messages sent will reach the recipient(s) in the same order in which they were sent, or at all. """ @abstractmethod async def send(self, item: T_Item) -> None: """ Send an item to the peer(s). :param item: the item to send :raises ~anyio.ClosedResourceError: if the send stream has been explicitly closed :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable due to external causes """ class UnreliableObjectStream(UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]): """ A bidirectional message stream which does not guarantee the order or reliability of message delivery. """ class ObjectReceiveStream(UnreliableObjectReceiveStream[T_Item]): """ A receive message stream which guarantees that messages are received in the same order in which they were sent, and that no messages are missed. """ class ObjectSendStream(UnreliableObjectSendStream[T_Item]): """ A send message stream which guarantees that messages are delivered in the same order in which they were sent, without missing any messages in the middle. """ class ObjectStream(ObjectReceiveStream[T_Item], ObjectSendStream[T_Item], UnreliableObjectStream[T_Item]): """ A bidirectional message stream which guarantees the order and reliability of message delivery. """ @abstractmethod async def send_eof(self) -> None: """ Send an end-of-file indication to the peer. You should not try to send any further data to this stream after calling this method. This method is idempotent (does nothing on successive calls). """ class ByteReceiveStream(AsyncResource, TypedAttributeProvider): """ An interface for receiving bytes from a single peer. Iterating this byte stream will yield a byte string of arbitrary length, but no more than 65536 bytes. """ def __aiter__(self) -> 'ByteReceiveStream': return self async def __anext__(self) -> bytes: try: return await self.receive() except EndOfStream: raise StopAsyncIteration @abstractmethod async def receive(self, max_bytes: int = 65536) -> bytes: """ Receive at most ``max_bytes`` bytes from the peer. .. note:: Implementors of this interface should not return an empty :class:`bytes` object, and users should ignore them. :param max_bytes: maximum number of bytes to receive :return: the received bytes :raises ~anyio.EndOfStream: if this stream has been closed from the other end """ class ByteSendStream(AsyncResource, TypedAttributeProvider): """An interface for sending bytes to a single peer.""" @abstractmethod async def send(self, item: bytes) -> None: """ Send the given bytes to the peer. :param item: the bytes to send """ class ByteStream(ByteReceiveStream, ByteSendStream): """A bidirectional byte stream.""" @abstractmethod async def send_eof(self) -> None: """ Send an end-of-file indication to the peer. You should not try to send any further data to this stream after calling this method. This method is idempotent (does nothing on successive calls). """ #: Type alias for all unreliable bytes-oriented receive streams. AnyUnreliableByteReceiveStream = Union[UnreliableObjectReceiveStream[bytes], ByteReceiveStream] #: Type alias for all unreliable bytes-oriented send streams. AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream] #: Type alias for all unreliable bytes-oriented streams. AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream] #: Type alias for all bytes-oriented receive streams. AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream] #: Type alias for all bytes-oriented send streams. AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream] #: Type alias for all bytes-oriented streams. AnyByteStream = Union[ObjectStream[bytes], ByteStream] class Listener(Generic[T_Stream], AsyncResource, TypedAttributeProvider): """An interface for objects that let you accept incoming connections.""" @abstractmethod async def serve(self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None) -> None: """ Accept incoming connections as they come in and start tasks to handle them. :param handler: a callable that will be used to handle each accepted connection :param task_group: the task group that will be used to start tasks for handling each accepted connection (if omitted, an ad-hoc task group will be created) """ anyio-3.5.0/src/anyio/abc/_subprocesses.py000066400000000000000000000040271416724134300205320ustar00rootroot00000000000000from abc import abstractmethod from signal import Signals from typing import Optional from ._resources import AsyncResource from ._streams import ByteReceiveStream, ByteSendStream class Process(AsyncResource): """An asynchronous version of :class:`subprocess.Popen`.""" @abstractmethod async def wait(self) -> int: """ Wait until the process exits. :return: the exit code of the process """ @abstractmethod def terminate(self) -> None: """ Terminates the process, gracefully if possible. On Windows, this calls ``TerminateProcess()``. On POSIX systems, this sends ``SIGTERM`` to the process. .. seealso:: :meth:`subprocess.Popen.terminate` """ @abstractmethod def kill(self) -> None: """ Kills the process. On Windows, this calls ``TerminateProcess()``. On POSIX systems, this sends ``SIGKILL`` to the process. .. seealso:: :meth:`subprocess.Popen.kill` """ @abstractmethod def send_signal(self, signal: Signals) -> None: """ Send a signal to the subprocess. .. seealso:: :meth:`subprocess.Popen.send_signal` :param signal: the signal number (e.g. :data:`signal.SIGHUP`) """ @property @abstractmethod def pid(self) -> int: """The process ID of the process.""" @property @abstractmethod def returncode(self) -> Optional[int]: """ The return code of the process. If the process has not yet terminated, this will be ``None``. """ @property @abstractmethod def stdin(self) -> Optional[ByteSendStream]: """The stream for the standard input of the process.""" @property @abstractmethod def stdout(self) -> Optional[ByteReceiveStream]: """The stream for the standard output of the process.""" @property @abstractmethod def stderr(self) -> Optional[ByteReceiveStream]: """The stream for the standard error output of the process.""" anyio-3.5.0/src/anyio/abc/_tasks.py000066400000000000000000000057531416724134300171460ustar00rootroot00000000000000import typing from abc import ABCMeta, abstractmethod from types import TracebackType from typing import Any, Callable, Coroutine, Optional, Type, TypeVar from warnings import warn if typing.TYPE_CHECKING: from anyio._core._tasks import CancelScope T_Retval = TypeVar('T_Retval') class TaskStatus(metaclass=ABCMeta): @abstractmethod def started(self, value: object = None) -> None: """ Signal that the task has started. :param value: object passed back to the starter of the task """ class TaskGroup(metaclass=ABCMeta): """ Groups several asynchronous tasks together. :ivar cancel_scope: the cancel scope inherited by all child tasks :vartype cancel_scope: CancelScope """ cancel_scope: 'CancelScope' async def spawn(self, func: Callable[..., Coroutine[Any, Any, Any]], *args: object, name: object = None) -> None: """ Start a new task in this task group. :param func: a coroutine function :param args: positional arguments to call the function with :param name: name of the task, for the purposes of introspection and debugging .. deprecated:: 3.0 Use :meth:`start_soon` instead. If your code needs AnyIO 2 compatibility, you can keep using this until AnyIO 4. """ warn('spawn() is deprecated -- use start_soon() (without the "await") instead', DeprecationWarning) self.start_soon(func, *args, name=name) @abstractmethod def start_soon(self, func: Callable[..., Coroutine[Any, Any, Any]], *args: object, name: object = None) -> None: """ Start a new task in this task group. :param func: a coroutine function :param args: positional arguments to call the function with :param name: name of the task, for the purposes of introspection and debugging .. versionadded:: 3.0 """ @abstractmethod async def start(self, func: Callable[..., Coroutine[Any, Any, Any]], *args: object, name: object = None) -> object: """ Start a new task and wait until it signals for readiness. :param func: a coroutine function :param args: positional arguments to call the function with :param name: name of the task, for the purposes of introspection and debugging :return: the value passed to ``task_status.started()`` :raises RuntimeError: if the task finishes without calling ``task_status.started()`` .. versionadded:: 3.0 """ @abstractmethod async def __aenter__(self) -> 'TaskGroup': """Enter the task group context and allow starting new tasks.""" @abstractmethod async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: """Exit the task group context waiting for all tasks to finish.""" anyio-3.5.0/src/anyio/abc/_testing.py000066400000000000000000000021731416724134300174670ustar00rootroot00000000000000import types from abc import ABCMeta, abstractmethod from typing import Any, Awaitable, Callable, Dict, Optional, Type, TypeVar _T = TypeVar("_T") class TestRunner(metaclass=ABCMeta): """ Encapsulates a running event loop. Every call made through this object will use the same event loop. """ def __enter__(self) -> 'TestRunner': return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[types.TracebackType]) -> Optional[bool]: self.close() return None @abstractmethod def close(self) -> None: """Close the event loop.""" @abstractmethod def call(self, func: Callable[..., Awaitable[_T]], *args: object, **kwargs: Dict[str, Any]) -> _T: """ Call the given function within the backend's event loop. :param func: a callable returning an awaitable :param args: positional arguments to call ``func`` with :param kwargs: keyword arguments to call ``func`` with :return: the return value of ``func`` """ anyio-3.5.0/src/anyio/from_thread.py000066400000000000000000000372521416724134300174260ustar00rootroot00000000000000import threading from asyncio import iscoroutine from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait from contextlib import AbstractContextManager, contextmanager from types import TracebackType from typing import ( Any, AsyncContextManager, Callable, ContextManager, Coroutine, Dict, Generator, Iterable, Optional, Tuple, Type, TypeVar, Union, cast, overload) from warnings import warn from ._core import _eventloop from ._core._eventloop import get_asynclib, get_cancelled_exc_class, threadlocals from ._core._synchronization import Event from ._core._tasks import CancelScope, create_task_group from .abc._tasks import TaskStatus T_Retval = TypeVar('T_Retval') T_co = TypeVar('T_co') def run(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object) -> T_Retval: """ Call a coroutine function from a worker thread. :param func: a coroutine function :param args: positional arguments for the callable :return: the return value of the coroutine function """ try: asynclib = threadlocals.current_async_module except AttributeError: raise RuntimeError('This function can only be run from an AnyIO worker thread') return asynclib.run_async_from_thread(func, *args) def run_async_from_thread(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object) -> T_Retval: warn('run_async_from_thread() has been deprecated, use anyio.from_thread.run() instead', DeprecationWarning) return run(func, *args) def run_sync(func: Callable[..., T_Retval], *args: object) -> T_Retval: """ Call a function in the event loop thread from a worker thread. :param func: a callable :param args: positional arguments for the callable :return: the return value of the callable """ try: asynclib = threadlocals.current_async_module except AttributeError: raise RuntimeError('This function can only be run from an AnyIO worker thread') return asynclib.run_sync_from_thread(func, *args) def run_sync_from_thread(func: Callable[..., T_Retval], *args: object) -> T_Retval: warn('run_sync_from_thread() has been deprecated, use anyio.from_thread.run_sync() instead', DeprecationWarning) return run_sync(func, *args) class _BlockingAsyncContextManager(AbstractContextManager): _enter_future: Future _exit_future: Future _exit_event: Event _exit_exc_info: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]] = (None, None, None) def __init__(self, async_cm: AsyncContextManager[T_co], portal: 'BlockingPortal'): self._async_cm = async_cm self._portal = portal async def run_async_cm(self) -> Optional[bool]: try: self._exit_event = Event() value = await self._async_cm.__aenter__() except BaseException as exc: self._enter_future.set_exception(exc) raise else: self._enter_future.set_result(value) try: # Wait for the sync context manager to exit. # This next statement can raise `get_cancelled_exc_class()` if # something went wrong in a task group in this async context # manager. await self._exit_event.wait() finally: # In case of cancellation, it could be that we end up here before # `_BlockingAsyncContextManager.__exit__` is called, and an # `_exit_exc_info` has been set. result = await self._async_cm.__aexit__(*self._exit_exc_info) return result def __enter__(self) -> T_co: self._enter_future = Future() self._exit_future = self._portal.start_task_soon(self.run_async_cm) cm = self._enter_future.result() return cast(T_co, cm) def __exit__(self, __exc_type: Optional[Type[BaseException]], __exc_value: Optional[BaseException], __traceback: Optional[TracebackType]) -> Optional[bool]: self._exit_exc_info = __exc_type, __exc_value, __traceback self._portal.call(self._exit_event.set) return self._exit_future.result() class _BlockingPortalTaskStatus(TaskStatus): def __init__(self, future: Future): self._future = future def started(self, value: object = None) -> None: self._future.set_result(value) class BlockingPortal: """An object that lets external threads run code in an asynchronous event loop.""" def __new__(cls) -> 'BlockingPortal': return get_asynclib().BlockingPortal() def __init__(self) -> None: self._event_loop_thread_id: Optional[int] = threading.get_ident() self._stop_event = Event() self._task_group = create_task_group() self._cancelled_exc_class = get_cancelled_exc_class() async def __aenter__(self) -> 'BlockingPortal': await self._task_group.__aenter__() return self async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: await self.stop() return await self._task_group.__aexit__(exc_type, exc_val, exc_tb) def _check_running(self) -> None: if self._event_loop_thread_id is None: raise RuntimeError('This portal is not running') if self._event_loop_thread_id == threading.get_ident(): raise RuntimeError('This method cannot be called from the event loop thread') async def sleep_until_stopped(self) -> None: """Sleep until :meth:`stop` is called.""" await self._stop_event.wait() async def stop(self, cancel_remaining: bool = False) -> None: """ Signal the portal to shut down. This marks the portal as no longer accepting new calls and exits from :meth:`sleep_until_stopped`. :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False`` to let them finish before returning """ self._event_loop_thread_id = None self._stop_event.set() if cancel_remaining: self._task_group.cancel_scope.cancel() async def _call_func(self, func: Callable, args: tuple, kwargs: Dict[str, Any], future: Future) -> None: def callback(f: Future) -> None: if f.cancelled() and self._event_loop_thread_id not in (None, threading.get_ident()): self.call(scope.cancel) try: retval = func(*args, **kwargs) if iscoroutine(retval): with CancelScope() as scope: if future.cancelled(): scope.cancel() else: future.add_done_callback(callback) retval = await retval except self._cancelled_exc_class: future.cancel() except BaseException as exc: if not future.cancelled(): future.set_exception(exc) # Let base exceptions fall through if not isinstance(exc, Exception): raise else: if not future.cancelled(): future.set_result(retval) finally: scope = None # type: ignore[assignment] def _spawn_task_from_thread(self, func: Callable, args: tuple, kwargs: Dict[str, Any], name: object, future: Future) -> None: """ Spawn a new task using the given callable. Implementors must ensure that the future is resolved when the task finishes. :param func: a callable :param args: positional arguments to be passed to the callable :param kwargs: keyword arguments to be passed to the callable :param name: name of the task (will be coerced to a string if not ``None``) :param future: a future that will resolve to the return value of the callable, or the exception raised during its execution """ raise NotImplementedError @overload def call(self, func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object) -> T_Retval: ... @overload def call(self, func: Callable[..., T_Retval], *args: object) -> T_Retval: ... def call(self, func: Callable[..., Union[Coroutine[Any, Any, T_Retval], T_Retval]], *args: object) -> T_Retval: """ Call the given function in the event loop thread. If the callable returns a coroutine object, it is awaited on. :param func: any callable :raises RuntimeError: if the portal is not running or if this method is called from within the event loop thread """ return cast(T_Retval, self.start_task_soon(func, *args).result()) @overload def spawn_task(self, func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object, name: object = None) -> "Future[T_Retval]": ... @overload def spawn_task(self, func: Callable[..., T_Retval], *args: object, name: object = None) -> "Future[T_Retval]": ... def spawn_task(self, func: Callable[..., Union[Coroutine[Any, Any, T_Retval], T_Retval]], *args: object, name: object = None) -> "Future[T_Retval]": """ Start a task in the portal's task group. :param func: the target coroutine function :param args: positional arguments passed to ``func`` :param name: name of the task (will be coerced to a string if not ``None``) :return: a future that resolves with the return value of the callable if the task completes successfully, or with the exception raised in the task :raises RuntimeError: if the portal is not running or if this method is called from within the event loop thread .. versionadded:: 2.1 .. deprecated:: 3.0 Use :meth:`start_task_soon` instead. If your code needs AnyIO 2 compatibility, you can keep using this until AnyIO 4. """ warn('spawn_task() is deprecated -- use start_task_soon() instead', DeprecationWarning) return self.start_task_soon(func, *args, name=name) # type: ignore[arg-type] @overload def start_task_soon(self, func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object, name: object = None) -> "Future[T_Retval]": ... @overload def start_task_soon(self, func: Callable[..., T_Retval], *args: object, name: object = None) -> "Future[T_Retval]": ... def start_task_soon(self, func: Callable[..., Union[Coroutine[Any, Any, T_Retval], T_Retval]], *args: object, name: object = None) -> "Future[T_Retval]": """ Start a task in the portal's task group. The task will be run inside a cancel scope which can be cancelled by cancelling the returned future. :param func: the target coroutine function :param args: positional arguments passed to ``func`` :param name: name of the task (will be coerced to a string if not ``None``) :return: a future that resolves with the return value of the callable if the task completes successfully, or with the exception raised in the task :raises RuntimeError: if the portal is not running or if this method is called from within the event loop thread .. versionadded:: 3.0 """ self._check_running() f: Future = Future() self._spawn_task_from_thread(func, args, {}, name, f) return f def start_task(self, func: Callable[..., Coroutine[Any, Any, Any]], *args: object, name: object = None) -> Tuple['Future[Any]', Any]: """ Start a task in the portal's task group and wait until it signals for readiness. This method works the same way as :meth:`TaskGroup.start`. :param func: the target coroutine function :param args: positional arguments passed to ``func`` :param name: name of the task (will be coerced to a string if not ``None``) :return: a tuple of (future, task_status_value) where the ``task_status_value`` is the value passed to ``task_status.started()`` from within the target function .. versionadded:: 3.0 """ def task_done(future: Future) -> None: if not task_status_future.done(): if future.cancelled(): task_status_future.cancel() elif future.exception(): task_status_future.set_exception(future.exception()) else: exc = RuntimeError('Task exited without calling task_status.started()') task_status_future.set_exception(exc) self._check_running() task_status_future: Future = Future() task_status = _BlockingPortalTaskStatus(task_status_future) f: Future = Future() f.add_done_callback(task_done) self._spawn_task_from_thread(func, args, {'task_status': task_status}, name, f) return f, task_status_future.result() def wrap_async_context_manager(self, cm: AsyncContextManager[T_co]) -> ContextManager[T_co]: """ Wrap an async context manager as a synchronous context manager via this portal. Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping in the middle until the synchronous context manager exits. :param cm: an asynchronous context manager :return: a synchronous context manager .. versionadded:: 2.1 """ return _BlockingAsyncContextManager(cm, self) def create_blocking_portal() -> BlockingPortal: """ Create a portal for running functions in the event loop thread from external threads. Use this function in asynchronous code when you need to allow external threads access to the event loop where your asynchronous code is currently running. .. deprecated:: 3.0 Use :class:`.BlockingPortal` directly. """ warn('create_blocking_portal() has been deprecated -- use anyio.from_thread.BlockingPortal() ' 'directly', DeprecationWarning) return BlockingPortal() @contextmanager def start_blocking_portal( backend: str = 'asyncio', backend_options: Optional[Dict[str, Any]] = None) -> Generator[BlockingPortal, Any, None]: """ Start a new event loop in a new thread and run a blocking portal in its main task. The parameters are the same as for :func:`~anyio.run`. :param backend: name of the backend :param backend_options: backend options :return: a context manager that yields a blocking portal .. versionchanged:: 3.0 Usage as a context manager is now required. """ async def run_portal() -> None: async with BlockingPortal() as portal_: if future.set_running_or_notify_cancel(): future.set_result(portal_) await portal_.sleep_until_stopped() future: Future[BlockingPortal] = Future() with ThreadPoolExecutor(1) as executor: run_future = executor.submit(_eventloop.run, run_portal, backend=backend, backend_options=backend_options) try: wait(cast(Iterable[Future], [run_future, future]), return_when=FIRST_COMPLETED) except BaseException: future.cancel() run_future.cancel() raise if future.done(): portal = future.result() try: yield portal except BaseException: portal.call(portal.stop, True) raise portal.call(portal.stop, False) run_future.result() anyio-3.5.0/src/anyio/lowlevel.py000066400000000000000000000110041416724134300167500ustar00rootroot00000000000000import enum import sys from dataclasses import dataclass from typing import Any, Dict, Generic, Set, TypeVar, Union, overload from weakref import WeakKeyDictionary from ._core._eventloop import get_asynclib if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal T = TypeVar('T') D = TypeVar('D') async def checkpoint() -> None: """ Check for cancellation and allow the scheduler to switch to another task. Equivalent to (but more efficient than):: await checkpoint_if_cancelled() await cancel_shielded_checkpoint() .. versionadded:: 3.0 """ await get_asynclib().checkpoint() async def checkpoint_if_cancelled() -> None: """ Enter a checkpoint if the enclosing cancel scope has been cancelled. This does not allow the scheduler to switch to a different task. .. versionadded:: 3.0 """ await get_asynclib().checkpoint_if_cancelled() async def cancel_shielded_checkpoint() -> None: """ Allow the scheduler to switch to another task but without checking for cancellation. Equivalent to (but potentially more efficient than):: with CancelScope(shield=True): await checkpoint() .. versionadded:: 3.0 """ await get_asynclib().cancel_shielded_checkpoint() def current_token() -> object: """Return a backend specific token object that can be used to get back to the event loop.""" return get_asynclib().current_token() _run_vars = WeakKeyDictionary() # type: WeakKeyDictionary[Any, Dict[str, Any]] _token_wrappers: Dict[Any, '_TokenWrapper'] = {} @dataclass(frozen=True) class _TokenWrapper: __slots__ = '_token', '__weakref__' _token: object class _NoValueSet(enum.Enum): NO_VALUE_SET = enum.auto() class RunvarToken(Generic[T]): __slots__ = '_var', '_value', '_redeemed' def __init__(self, var: 'RunVar[T]', value: Union[T, Literal[_NoValueSet.NO_VALUE_SET]]): self._var = var self._value: Union[T, Literal[_NoValueSet.NO_VALUE_SET]] = value self._redeemed = False class RunVar(Generic[T]): """Like a :class:`~contextvars.ContextVar`, expect scoped to the running event loop.""" __slots__ = '_name', '_default' NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET _token_wrappers: Set[_TokenWrapper] = set() def __init__(self, name: str, default: Union[T, Literal[_NoValueSet.NO_VALUE_SET]] = NO_VALUE_SET): self._name = name self._default = default @property def _current_vars(self) -> Dict[str, T]: token = current_token() while True: try: return _run_vars[token] except TypeError: # Happens when token isn't weak referable (TrioToken). # This workaround does mean that some memory will leak on Trio until the problem # is fixed on their end. token = _TokenWrapper(token) self._token_wrappers.add(token) except KeyError: run_vars = _run_vars[token] = {} return run_vars @overload def get(self, default: D) -> Union[T, D]: ... @overload def get(self) -> T: ... def get( self, default: Union[D, Literal[_NoValueSet.NO_VALUE_SET]] = NO_VALUE_SET ) -> Union[T, D]: try: return self._current_vars[self._name] except KeyError: if default is not RunVar.NO_VALUE_SET: return default elif self._default is not RunVar.NO_VALUE_SET: return self._default raise LookupError(f'Run variable "{self._name}" has no value and no default set') def set(self, value: T) -> RunvarToken[T]: current_vars = self._current_vars token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET)) current_vars[self._name] = value return token def reset(self, token: RunvarToken[T]) -> None: if token._var is not self: raise ValueError('This token does not belong to this RunVar') if token._redeemed: raise ValueError('This token has already been used') if token._value is _NoValueSet.NO_VALUE_SET: try: del self._current_vars[self._name] except KeyError: pass else: self._current_vars[self._name] = token._value token._redeemed = True def __repr__(self) -> str: return f'' anyio-3.5.0/src/anyio/py.typed000066400000000000000000000000001416724134300162360ustar00rootroot00000000000000anyio-3.5.0/src/anyio/pytest_plugin.py000066400000000000000000000126501416724134300200350ustar00rootroot00000000000000from contextlib import contextmanager from inspect import isasyncgenfunction, iscoroutinefunction from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, cast import pytest import sniffio from ._core._eventloop import get_all_backends, get_asynclib from .abc import TestRunner if TYPE_CHECKING: from _pytest.config import Config _current_runner: Optional[TestRunner] = None def extract_backend_and_options(backend: object) -> Tuple[str, Dict[str, Any]]: if isinstance(backend, str): return backend, {} elif isinstance(backend, tuple) and len(backend) == 2: if isinstance(backend[0], str) and isinstance(backend[1], dict): return cast(Tuple[str, Dict[str, Any]], backend) raise TypeError('anyio_backend must be either a string or tuple of (string, dict)') @contextmanager def get_runner(backend_name: str, backend_options: Dict[str, Any]) -> Iterator[TestRunner]: global _current_runner if _current_runner: yield _current_runner return asynclib = get_asynclib(backend_name) token = None if sniffio.current_async_library_cvar.get(None) is None: # Since we're in control of the event loop, we can cache the name of the async library token = sniffio.current_async_library_cvar.set(backend_name) try: backend_options = backend_options or {} with asynclib.TestRunner(**backend_options) as runner: _current_runner = runner yield runner finally: _current_runner = None if token: sniffio.current_async_library_cvar.reset(token) def pytest_configure(config: "Config") -> None: config.addinivalue_line('markers', 'anyio: mark the (coroutine function) test to be run ' 'asynchronously via anyio.') def pytest_fixture_setup(fixturedef: Any, request: Any) -> None: def wrapper(*args, anyio_backend, **kwargs): # type: ignore[no-untyped-def] backend_name, backend_options = extract_backend_and_options(anyio_backend) if has_backend_arg: kwargs['anyio_backend'] = anyio_backend with get_runner(backend_name, backend_options) as runner: if isasyncgenfunction(func): gen = func(*args, **kwargs) try: value = runner.call(gen.asend, None) except StopAsyncIteration: raise RuntimeError('Async generator did not yield') yield value try: runner.call(gen.asend, None) except StopAsyncIteration: pass else: runner.call(gen.aclose) raise RuntimeError('Async generator fixture did not stop') else: yield runner.call(func, *args, **kwargs) # Only apply this to coroutine functions and async generator functions in requests that involve # the anyio_backend fixture func = fixturedef.func if isasyncgenfunction(func) or iscoroutinefunction(func): if 'anyio_backend' in request.fixturenames: has_backend_arg = 'anyio_backend' in fixturedef.argnames fixturedef.func = wrapper if not has_backend_arg: fixturedef.argnames += ('anyio_backend',) @pytest.hookimpl(tryfirst=True) def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None: if collector.istestfunction(obj, name): inner_func = obj.hypothesis.inner_test if hasattr(obj, 'hypothesis') else obj if iscoroutinefunction(inner_func): marker = collector.get_closest_marker('anyio') own_markers = getattr(obj, 'pytestmark', ()) if marker or any(marker.name == 'anyio' for marker in own_markers): pytest.mark.usefixtures('anyio_backend')(obj) @pytest.hookimpl(tryfirst=True) def pytest_pyfunc_call(pyfuncitem: Any) -> Optional[bool]: def run_with_hypothesis(**kwargs: Any) -> None: with get_runner(backend_name, backend_options) as runner: runner.call(original_func, **kwargs) backend = pyfuncitem.funcargs.get('anyio_backend') if backend: backend_name, backend_options = extract_backend_and_options(backend) if hasattr(pyfuncitem.obj, 'hypothesis'): # Wrap the inner test function unless it's already wrapped original_func = pyfuncitem.obj.hypothesis.inner_test if original_func.__qualname__ != run_with_hypothesis.__qualname__: if iscoroutinefunction(original_func): pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis return None if iscoroutinefunction(pyfuncitem.obj): funcargs = pyfuncitem.funcargs testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} with get_runner(backend_name, backend_options) as runner: runner.call(pyfuncitem.obj, **testargs) return True return None @pytest.fixture(params=get_all_backends()) def anyio_backend(request: Any) -> Any: return request.param @pytest.fixture def anyio_backend_name(anyio_backend: Any) -> str: if isinstance(anyio_backend, str): return anyio_backend else: return anyio_backend[0] @pytest.fixture def anyio_backend_options(anyio_backend: Any) -> Dict[str, Any]: if isinstance(anyio_backend, str): return {} else: return anyio_backend[1] anyio-3.5.0/src/anyio/streams/000077500000000000000000000000001416724134300162275ustar00rootroot00000000000000anyio-3.5.0/src/anyio/streams/__init__.py000066400000000000000000000000001416724134300203260ustar00rootroot00000000000000anyio-3.5.0/src/anyio/streams/buffered.py000066400000000000000000000105231416724134300203640ustar00rootroot00000000000000from dataclasses import dataclass, field from typing import Any, Callable, Mapping from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead from ..abc import AnyByteReceiveStream, ByteReceiveStream @dataclass(eq=False) class BufferedByteReceiveStream(ByteReceiveStream): """ Wraps any bytes-based receive stream and uses a buffer to provide sophisticated receiving capabilities in the form of a byte stream. """ receive_stream: AnyByteReceiveStream _buffer: bytearray = field(init=False, default_factory=bytearray) _closed: bool = field(init=False, default=False) async def aclose(self) -> None: await self.receive_stream.aclose() self._closed = True @property def buffer(self) -> bytes: """The bytes currently in the buffer.""" return bytes(self._buffer) @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return self.receive_stream.extra_attributes async def receive(self, max_bytes: int = 65536) -> bytes: if self._closed: raise ClosedResourceError if self._buffer: chunk = bytes(self._buffer[:max_bytes]) del self._buffer[:max_bytes] return chunk elif isinstance(self.receive_stream, ByteReceiveStream): return await self.receive_stream.receive(max_bytes) else: # With a bytes-oriented object stream, we need to handle any surplus bytes we get from # the receive() call chunk = await self.receive_stream.receive() if len(chunk) > max_bytes: # Save the surplus bytes in the buffer self._buffer.extend(chunk[max_bytes:]) return chunk[:max_bytes] else: return chunk async def receive_exactly(self, nbytes: int) -> bytes: """ Read exactly the given amount of bytes from the stream. :param nbytes: the number of bytes to read :return: the bytes read :raises ~anyio.IncompleteRead: if the stream was closed before the requested amount of bytes could be read from the stream """ while True: remaining = nbytes - len(self._buffer) if remaining <= 0: retval = self._buffer[:nbytes] del self._buffer[:nbytes] return bytes(retval) try: if isinstance(self.receive_stream, ByteReceiveStream): chunk = await self.receive_stream.receive(remaining) else: chunk = await self.receive_stream.receive() except EndOfStream as exc: raise IncompleteRead from exc self._buffer.extend(chunk) async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes: """ Read from the stream until the delimiter is found or max_bytes have been read. :param delimiter: the marker to look for in the stream :param max_bytes: maximum number of bytes that will be read before raising :exc:`~anyio.DelimiterNotFound` :return: the bytes read (not including the delimiter) :raises ~anyio.IncompleteRead: if the stream was closed before the delimiter was found :raises ~anyio.DelimiterNotFound: if the delimiter is not found within the bytes read up to the maximum allowed """ delimiter_size = len(delimiter) offset = 0 while True: # Check if the delimiter can be found in the current buffer index = self._buffer.find(delimiter, offset) if index >= 0: found = self._buffer[:index] del self._buffer[:index + len(delimiter):] return bytes(found) # Check if the buffer is already at or over the limit if len(self._buffer) >= max_bytes: raise DelimiterNotFound(max_bytes) # Read more data into the buffer from the socket try: data = await self.receive_stream.receive() except EndOfStream as exc: raise IncompleteRead from exc # Move the offset forward and add the new data to the buffer offset = max(len(self._buffer) - delimiter_size + 1, 0) self._buffer.extend(data) anyio-3.5.0/src/anyio/streams/file.py000066400000000000000000000103711416724134300175220ustar00rootroot00000000000000from io import SEEK_SET, UnsupportedOperation from os import PathLike from pathlib import Path from typing import Any, BinaryIO, Callable, Dict, Mapping, Union, cast from .. import ( BrokenResourceError, ClosedResourceError, EndOfStream, TypedAttributeSet, to_thread, typed_attribute) from ..abc import ByteReceiveStream, ByteSendStream class FileStreamAttribute(TypedAttributeSet): #: the open file descriptor file: BinaryIO = typed_attribute() #: the path of the file on the file system, if available (file must be a real file) path: Path = typed_attribute() #: the file number, if available (file must be a real file or a TTY) fileno: int = typed_attribute() class _BaseFileStream: def __init__(self, file: BinaryIO): self._file = file async def aclose(self) -> None: await to_thread.run_sync(self._file.close) @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: attributes: Dict[Any, Callable[[], Any]] = { FileStreamAttribute.file: lambda: self._file, } if hasattr(self._file, 'name'): attributes[FileStreamAttribute.path] = lambda: Path(self._file.name) try: self._file.fileno() except UnsupportedOperation: pass else: attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno() return attributes class FileReadStream(_BaseFileStream, ByteReceiveStream): """ A byte stream that reads from a file in the file system. :param file: a file that has been opened for reading in binary mode .. versionadded:: 3.0 """ @classmethod async def from_path(cls, path: Union[str, 'PathLike[str]']) -> 'FileReadStream': """ Create a file read stream by opening the given file. :param path: path of the file to read from """ file = await to_thread.run_sync(Path(path).open, 'rb') return cls(cast(BinaryIO, file)) async def receive(self, max_bytes: int = 65536) -> bytes: try: data = await to_thread.run_sync(self._file.read, max_bytes) except ValueError: raise ClosedResourceError from None except OSError as exc: raise BrokenResourceError from exc if data: return data else: raise EndOfStream async def seek(self, position: int, whence: int = SEEK_SET) -> int: """ Seek the file to the given position. .. seealso:: :meth:`io.IOBase.seek` .. note:: Not all file descriptors are seekable. :param position: position to seek the file to :param whence: controls how ``position`` is interpreted :return: the new absolute position :raises OSError: if the file is not seekable """ return await to_thread.run_sync(self._file.seek, position, whence) async def tell(self) -> int: """ Return the current stream position. .. note:: Not all file descriptors are seekable. :return: the current absolute position :raises OSError: if the file is not seekable """ return await to_thread.run_sync(self._file.tell) class FileWriteStream(_BaseFileStream, ByteSendStream): """ A byte stream that writes to a file in the file system. :param file: a file that has been opened for writing in binary mode .. versionadded:: 3.0 """ @classmethod async def from_path(cls, path: Union[str, 'PathLike[str]'], append: bool = False) -> 'FileWriteStream': """ Create a file write stream by opening the given file for writing. :param path: path of the file to write to :param append: if ``True``, open the file for appending; if ``False``, any existing file at the given path will be truncated """ mode = 'ab' if append else 'wb' file = await to_thread.run_sync(Path(path).open, mode) return cls(cast(BinaryIO, file)) async def send(self, item: bytes) -> None: try: await to_thread.run_sync(self._file.write, item) except ValueError: raise ClosedResourceError from None except OSError as exc: raise BrokenResourceError from exc anyio-3.5.0/src/anyio/streams/memory.py000066400000000000000000000217111416724134300201130ustar00rootroot00000000000000from collections import OrderedDict, deque from dataclasses import dataclass, field from types import TracebackType from typing import Deque, Generic, List, NamedTuple, Optional, Type, TypeVar from .. import ( BrokenResourceError, ClosedResourceError, EndOfStream, WouldBlock, get_cancelled_exc_class) from .._core._compat import DeprecatedAwaitable from ..abc import Event, ObjectReceiveStream, ObjectSendStream from ..lowlevel import checkpoint T_Item = TypeVar('T_Item') class MemoryObjectStreamStatistics(NamedTuple): current_buffer_used: int #: number of items stored in the buffer #: maximum number of items that can be stored on this stream (or :data:`math.inf`) max_buffer_size: float open_send_streams: int #: number of unclosed clones of the send stream open_receive_streams: int #: number of unclosed clones of the receive stream tasks_waiting_send: int #: number of tasks blocked on :meth:`MemoryObjectSendStream.send` #: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive` tasks_waiting_receive: int @dataclass(eq=False) class MemoryObjectStreamState(Generic[T_Item]): max_buffer_size: float = field() buffer: Deque[T_Item] = field(init=False, default_factory=deque) open_send_channels: int = field(init=False, default=0) open_receive_channels: int = field(init=False, default=0) waiting_receivers: 'OrderedDict[Event, List[T_Item]]' = field(init=False, default_factory=OrderedDict) waiting_senders: 'OrderedDict[Event, T_Item]' = field(init=False, default_factory=OrderedDict) def statistics(self) -> MemoryObjectStreamStatistics: return MemoryObjectStreamStatistics( len(self.buffer), self.max_buffer_size, self.open_send_channels, self.open_receive_channels, len(self.waiting_senders), len(self.waiting_receivers)) @dataclass(eq=False) class MemoryObjectReceiveStream(Generic[T_Item], ObjectReceiveStream[T_Item]): _state: MemoryObjectStreamState[T_Item] _closed: bool = field(init=False, default=False) def __post_init__(self) -> None: self._state.open_receive_channels += 1 def receive_nowait(self) -> T_Item: """ Receive the next item if it can be done without waiting. :return: the received item :raises ~anyio.ClosedResourceError: if this send stream has been closed :raises ~anyio.EndOfStream: if the buffer is empty and this stream has been closed from the sending end :raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks waiting to send """ if self._closed: raise ClosedResourceError if self._state.waiting_senders: # Get the item from the next sender send_event, item = self._state.waiting_senders.popitem(last=False) self._state.buffer.append(item) send_event.set() if self._state.buffer: return self._state.buffer.popleft() elif not self._state.open_send_channels: raise EndOfStream raise WouldBlock async def receive(self) -> T_Item: await checkpoint() try: return self.receive_nowait() except WouldBlock: # Add ourselves in the queue receive_event = Event() container: List[T_Item] = [] self._state.waiting_receivers[receive_event] = container try: await receive_event.wait() except get_cancelled_exc_class(): # Ignore the immediate cancellation if we already received an item, so as not to # lose it if not container: raise finally: self._state.waiting_receivers.pop(receive_event, None) if container: return container[0] else: raise EndOfStream def clone(self) -> 'MemoryObjectReceiveStream[T_Item]': """ Create a clone of this receive stream. Each clone can be closed separately. Only when all clones have been closed will the receiving end of the memory stream be considered closed by the sending ends. :return: the cloned stream """ if self._closed: raise ClosedResourceError return MemoryObjectReceiveStream(_state=self._state) def close(self) -> None: """ Close the stream. This works the exact same way as :meth:`aclose`, but is provided as a special case for the benefit of synchronous callbacks. """ if not self._closed: self._closed = True self._state.open_receive_channels -= 1 if self._state.open_receive_channels == 0: send_events = list(self._state.waiting_senders.keys()) for event in send_events: event.set() async def aclose(self) -> None: self.close() def statistics(self) -> MemoryObjectStreamStatistics: """ Return statistics about the current state of this stream. .. versionadded:: 3.0 """ return self._state.statistics() def __enter__(self) -> 'MemoryObjectReceiveStream[T_Item]': return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: self.close() @dataclass(eq=False) class MemoryObjectSendStream(Generic[T_Item], ObjectSendStream[T_Item]): _state: MemoryObjectStreamState[T_Item] _closed: bool = field(init=False, default=False) def __post_init__(self) -> None: self._state.open_send_channels += 1 def send_nowait(self, item: T_Item) -> DeprecatedAwaitable: """ Send an item immediately if it can be done without waiting. :param item: the item to send :raises ~anyio.ClosedResourceError: if this send stream has been closed :raises ~anyio.BrokenResourceError: if the stream has been closed from the receiving end :raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting to receive """ if self._closed: raise ClosedResourceError if not self._state.open_receive_channels: raise BrokenResourceError if self._state.waiting_receivers: receive_event, container = self._state.waiting_receivers.popitem(last=False) container.append(item) receive_event.set() elif len(self._state.buffer) < self._state.max_buffer_size: self._state.buffer.append(item) else: raise WouldBlock return DeprecatedAwaitable(self.send_nowait) async def send(self, item: T_Item) -> None: await checkpoint() try: self.send_nowait(item) except WouldBlock: # Wait until there's someone on the receiving end send_event = Event() self._state.waiting_senders[send_event] = item try: await send_event.wait() except BaseException: self._state.waiting_senders.pop(send_event, None) # type: ignore[arg-type] raise if self._state.waiting_senders.pop(send_event, None): # type: ignore[arg-type] raise BrokenResourceError def clone(self) -> 'MemoryObjectSendStream[T_Item]': """ Create a clone of this send stream. Each clone can be closed separately. Only when all clones have been closed will the sending end of the memory stream be considered closed by the receiving ends. :return: the cloned stream """ if self._closed: raise ClosedResourceError return MemoryObjectSendStream(_state=self._state) def close(self) -> None: """ Close the stream. This works the exact same way as :meth:`aclose`, but is provided as a special case for the benefit of synchronous callbacks. """ if not self._closed: self._closed = True self._state.open_send_channels -= 1 if self._state.open_send_channels == 0: receive_events = list(self._state.waiting_receivers.keys()) self._state.waiting_receivers.clear() for event in receive_events: event.set() async def aclose(self) -> None: self.close() def statistics(self) -> MemoryObjectStreamStatistics: """ Return statistics about the current state of this stream. .. versionadded:: 3.0 """ return self._state.statistics() def __enter__(self) -> 'MemoryObjectSendStream[T_Item]': return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: self.close() anyio-3.5.0/src/anyio/streams/stapled.py000066400000000000000000000101101416724134300202260ustar00rootroot00000000000000from dataclasses import dataclass from typing import Any, Callable, Generic, List, Mapping, Optional, Sequence, TypeVar from ..abc import ( ByteReceiveStream, ByteSendStream, ByteStream, Listener, ObjectReceiveStream, ObjectSendStream, ObjectStream, TaskGroup) T_Item = TypeVar('T_Item') T_Stream = TypeVar('T_Stream') @dataclass(eq=False) class StapledByteStream(ByteStream): """ Combines two byte streams into a single, bidirectional byte stream. Extra attributes will be provided from both streams, with the receive stream providing the values in case of a conflict. :param ByteSendStream send_stream: the sending byte stream :param ByteReceiveStream receive_stream: the receiving byte stream """ send_stream: ByteSendStream receive_stream: ByteReceiveStream async def receive(self, max_bytes: int = 65536) -> bytes: return await self.receive_stream.receive(max_bytes) async def send(self, item: bytes) -> None: await self.send_stream.send(item) async def send_eof(self) -> None: await self.send_stream.aclose() async def aclose(self) -> None: await self.send_stream.aclose() await self.receive_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return {**self.send_stream.extra_attributes, **self.receive_stream.extra_attributes} @dataclass(eq=False) class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]): """ Combines two object streams into a single, bidirectional object stream. Extra attributes will be provided from both streams, with the receive stream providing the values in case of a conflict. :param ObjectSendStream send_stream: the sending object stream :param ObjectReceiveStream receive_stream: the receiving object stream """ send_stream: ObjectSendStream[T_Item] receive_stream: ObjectReceiveStream[T_Item] async def receive(self) -> T_Item: return await self.receive_stream.receive() async def send(self, item: T_Item) -> None: await self.send_stream.send(item) async def send_eof(self) -> None: await self.send_stream.aclose() async def aclose(self) -> None: await self.send_stream.aclose() await self.receive_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return {**self.send_stream.extra_attributes, **self.receive_stream.extra_attributes} @dataclass(eq=False) class MultiListener(Generic[T_Stream], Listener[T_Stream]): """ Combines multiple listeners into one, serving connections from all of them at once. Any MultiListeners in the given collection of listeners will have their listeners moved into this one. Extra attributes are provided from each listener, with each successive listener overriding any conflicting attributes from the previous one. :param listeners: listeners to serve :type listeners: Sequence[Listener[T_Stream]] """ listeners: Sequence[Listener[T_Stream]] def __post_init__(self) -> None: listeners: List[Listener[T_Stream]] = [] for listener in self.listeners: if isinstance(listener, MultiListener): listeners.extend(listener.listeners) del listener.listeners[:] # type: ignore[attr-defined] else: listeners.append(listener) self.listeners = listeners async def serve(self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None) -> None: from .. import create_task_group async with create_task_group() as tg: for listener in self.listeners: tg.start_soon(listener.serve, handler, task_group) async def aclose(self) -> None: for listener in self.listeners: await listener.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: attributes: dict = {} for listener in self.listeners: attributes.update(listener.extra_attributes) return attributes anyio-3.5.0/src/anyio/streams/text.py000066400000000000000000000115461416724134300175740ustar00rootroot00000000000000import codecs from dataclasses import InitVar, dataclass, field from typing import Any, Callable, Mapping, Tuple from ..abc import ( AnyByteReceiveStream, AnyByteSendStream, AnyByteStream, ObjectReceiveStream, ObjectSendStream, ObjectStream) @dataclass(eq=False) class TextReceiveStream(ObjectReceiveStream[str]): """ Stream wrapper that decodes bytes to strings using the given encoding. Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any completely received unicode characters as soon as they come in. :param transport_stream: any bytes-based receive stream :param encoding: character encoding to use for decoding bytes to strings (defaults to ``utf-8``) :param errors: handling scheme for decoding errors (defaults to ``strict``; see the `codecs module documentation`_ for a comprehensive list of options) .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects """ transport_stream: AnyByteReceiveStream encoding: InitVar[str] = 'utf-8' errors: InitVar[str] = 'strict' _decoder: codecs.IncrementalDecoder = field(init=False) def __post_init__(self, encoding: str, errors: str) -> None: decoder_class = codecs.getincrementaldecoder(encoding) self._decoder = decoder_class(errors=errors) async def receive(self) -> str: while True: chunk = await self.transport_stream.receive() decoded = self._decoder.decode(chunk) if decoded: return decoded async def aclose(self) -> None: await self.transport_stream.aclose() self._decoder.reset() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return self.transport_stream.extra_attributes @dataclass(eq=False) class TextSendStream(ObjectSendStream[str]): """ Sends strings to the wrapped stream as bytes using the given encoding. :param AnyByteSendStream transport_stream: any bytes-based send stream :param str encoding: character encoding to use for encoding strings to bytes (defaults to ``utf-8``) :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the `codecs module documentation`_ for a comprehensive list of options) .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects """ transport_stream: AnyByteSendStream encoding: InitVar[str] = 'utf-8' errors: str = 'strict' _encoder: Callable[..., Tuple[bytes, int]] = field(init=False) def __post_init__(self, encoding: str) -> None: self._encoder = codecs.getencoder(encoding) async def send(self, item: str) -> None: encoded = self._encoder(item, self.errors)[0] await self.transport_stream.send(encoded) async def aclose(self) -> None: await self.transport_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return self.transport_stream.extra_attributes @dataclass(eq=False) class TextStream(ObjectStream[str]): """ A bidirectional stream that decodes bytes to strings on receive and encodes strings to bytes on send. Extra attributes will be provided from both streams, with the receive stream providing the values in case of a conflict. :param AnyByteStream transport_stream: any bytes-based stream :param str encoding: character encoding to use for encoding/decoding strings to/from bytes (defaults to ``utf-8``) :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the `codecs module documentation`_ for a comprehensive list of options) .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects """ transport_stream: AnyByteStream encoding: InitVar[str] = 'utf-8' errors: InitVar[str] = 'strict' _receive_stream: TextReceiveStream = field(init=False) _send_stream: TextSendStream = field(init=False) def __post_init__(self, encoding: str, errors: str) -> None: self._receive_stream = TextReceiveStream(self.transport_stream, encoding=encoding, errors=errors) self._send_stream = TextSendStream(self.transport_stream, encoding=encoding, errors=errors) async def receive(self) -> str: return await self._receive_stream.receive() async def send(self, item: str) -> None: await self._send_stream.send(item) async def send_eof(self) -> None: await self.transport_stream.send_eof() async def aclose(self) -> None: await self._send_stream.aclose() await self._receive_stream.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return {**self._send_stream.extra_attributes, **self._receive_stream.extra_attributes} anyio-3.5.0/src/anyio/streams/tls.py000066400000000000000000000270021416724134300174040ustar00rootroot00000000000000import logging import re import ssl from dataclasses import dataclass from functools import wraps from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, TypeVar, Union from .. import BrokenResourceError, EndOfStream, aclose_forcefully, get_cancelled_exc_class from .._core._typedattr import TypedAttributeSet, typed_attribute from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup T_Retval = TypeVar('T_Retval') _PCTRTT = Tuple[Tuple[str, str], ...] _PCTRTTT = Tuple[_PCTRTT, ...] class TLSAttribute(TypedAttributeSet): """Contains Transport Layer Security related attributes.""" #: the selected ALPN protocol alpn_protocol: Optional[str] = typed_attribute() #: the channel binding for type ``tls-unique`` channel_binding_tls_unique: bytes = typed_attribute() #: the selected cipher cipher: Tuple[str, str, int] = typed_attribute() #: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert` for more #: information) peer_certificate: Optional[Dict[str, Union[str, _PCTRTTT, _PCTRTT]]] = typed_attribute() #: the peer certificate in binary form peer_certificate_binary: Optional[bytes] = typed_attribute() #: ``True`` if this is the server side of the connection server_side: bool = typed_attribute() #: ciphers shared between both ends of the TLS connection shared_ciphers: List[Tuple[str, str, int]] = typed_attribute() #: the :class:`~ssl.SSLObject` used for encryption ssl_object: ssl.SSLObject = typed_attribute() #: ``True`` if this stream does (and expects) a closing TLS handshake when the stream is being #: closed standard_compatible: bool = typed_attribute() #: the TLS protocol version (e.g. ``TLSv1.2``) tls_version: str = typed_attribute() @dataclass(eq=False) class TLSStream(ByteStream): """ A stream wrapper that encrypts all sent data and decrypts received data. This class has no public initializer; use :meth:`wrap` instead. All extra attributes from :class:`~TLSAttribute` are supported. :var AnyByteStream transport_stream: the wrapped stream """ transport_stream: AnyByteStream standard_compatible: bool _ssl_object: ssl.SSLObject _read_bio: ssl.MemoryBIO _write_bio: ssl.MemoryBIO @classmethod async def wrap(cls, transport_stream: AnyByteStream, *, server_side: Optional[bool] = None, hostname: Optional[str] = None, ssl_context: Optional[ssl.SSLContext] = None, standard_compatible: bool = True) -> 'TLSStream': """ Wrap an existing stream with Transport Layer Security. This performs a TLS handshake with the peer. :param transport_stream: a bytes-transporting stream to wrap :param server_side: ``True`` if this is the server side of the connection, ``False`` if this is the client side (if omitted, will be set to ``False`` if ``hostname`` has been provided, ``False`` otherwise). Used only to create a default context when an explicit context has not been provided. :param hostname: host name of the peer (if host name checking is desired) :param ssl_context: the SSLContext object to use (if not provided, a secure default will be created) :param standard_compatible: if ``False``, skip the closing handshake when closing the connection, and don't raise an exception if the peer does the same :raises ~ssl.SSLError: if the TLS handshake fails """ if server_side is None: server_side = not hostname if not ssl_context: purpose = ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH ssl_context = ssl.create_default_context(purpose) # Re-enable detection of unexpected EOFs if it was disabled by Python if hasattr(ssl, 'OP_IGNORE_UNEXPECTED_EOF'): ssl_context.options ^= ssl.OP_IGNORE_UNEXPECTED_EOF # type: ignore[attr-defined] bio_in = ssl.MemoryBIO() bio_out = ssl.MemoryBIO() ssl_object = ssl_context.wrap_bio(bio_in, bio_out, server_side=server_side, server_hostname=hostname) wrapper = cls(transport_stream=transport_stream, standard_compatible=standard_compatible, _ssl_object=ssl_object, _read_bio=bio_in, _write_bio=bio_out) await wrapper._call_sslobject_method(ssl_object.do_handshake) return wrapper async def _call_sslobject_method( self, func: Callable[..., T_Retval], *args: object ) -> T_Retval: while True: try: result = func(*args) except ssl.SSLWantReadError: try: # Flush any pending writes first if self._write_bio.pending: await self.transport_stream.send(self._write_bio.read()) data = await self.transport_stream.receive() except EndOfStream: self._read_bio.write_eof() except OSError as exc: self._read_bio.write_eof() self._write_bio.write_eof() raise BrokenResourceError from exc else: self._read_bio.write(data) except ssl.SSLWantWriteError: await self.transport_stream.send(self._write_bio.read()) except ssl.SSLSyscallError as exc: self._read_bio.write_eof() self._write_bio.write_eof() raise BrokenResourceError from exc except ssl.SSLError as exc: self._read_bio.write_eof() self._write_bio.write_eof() if (isinstance(exc, ssl.SSLEOFError) or 'UNEXPECTED_EOF_WHILE_READING' in exc.strerror): if self.standard_compatible: raise BrokenResourceError from exc else: raise EndOfStream from None raise else: # Flush any pending writes first if self._write_bio.pending: await self.transport_stream.send(self._write_bio.read()) return result async def unwrap(self) -> Tuple[AnyByteStream, bytes]: """ Does the TLS closing handshake. :return: a tuple of (wrapped byte stream, bytes left in the read buffer) """ await self._call_sslobject_method(self._ssl_object.unwrap) self._read_bio.write_eof() self._write_bio.write_eof() return self.transport_stream, self._read_bio.read() async def aclose(self) -> None: if self.standard_compatible: try: await self.unwrap() except BaseException: await aclose_forcefully(self.transport_stream) raise await self.transport_stream.aclose() async def receive(self, max_bytes: int = 65536) -> bytes: data = await self._call_sslobject_method(self._ssl_object.read, max_bytes) if not data: raise EndOfStream return data async def send(self, item: bytes) -> None: await self._call_sslobject_method(self._ssl_object.write, item) async def send_eof(self) -> None: tls_version = self.extra(TLSAttribute.tls_version) match = re.match(r'TLSv(\d+)(?:\.(\d+))?', tls_version) if match: major, minor = int(match.group(1)), int(match.group(2) or 0) if (major, minor) < (1, 3): raise NotImplementedError(f'send_eof() requires at least TLSv1.3; current ' f'session uses {tls_version}') raise NotImplementedError('send_eof() has not yet been implemented for TLS streams') @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return { **self.transport_stream.extra_attributes, TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol, TLSAttribute.channel_binding_tls_unique: self._ssl_object.get_channel_binding, TLSAttribute.cipher: self._ssl_object.cipher, TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False), TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(True), TLSAttribute.server_side: lambda: self._ssl_object.server_side, TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers(), TLSAttribute.standard_compatible: lambda: self.standard_compatible, TLSAttribute.ssl_object: lambda: self._ssl_object, TLSAttribute.tls_version: self._ssl_object.version } @dataclass(eq=False) class TLSListener(Listener[TLSStream]): """ A convenience listener that wraps another listener and auto-negotiates a TLS session on every accepted connection. If the TLS handshake times out or raises an exception, :meth:`handle_handshake_error` is called to do whatever post-mortem processing is deemed necessary. Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute. :param Listener listener: the listener to wrap :param ssl_context: the SSL context object :param standard_compatible: a flag passed through to :meth:`TLSStream.wrap` :param handshake_timeout: time limit for the TLS handshake (passed to :func:`~anyio.fail_after`) """ listener: Listener[Any] ssl_context: ssl.SSLContext standard_compatible: bool = True handshake_timeout: float = 30 @staticmethod async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None: f""" Handle an exception raised during the TLS handshake. This method does 3 things: #. Forcefully closes the original stream #. Logs the exception (unless it was a cancellation exception) using the ``{__name__}`` logger #. Reraises the exception if it was a base exception or a cancellation exception :param exc: the exception :param stream: the original stream """ await aclose_forcefully(stream) # Log all except cancellation exceptions if not isinstance(exc, get_cancelled_exc_class()): logging.getLogger(__name__).exception('Error during TLS handshake') # Only reraise base exceptions and cancellation exceptions if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()): raise async def serve(self, handler: Callable[[TLSStream], Any], task_group: Optional[TaskGroup] = None) -> None: @wraps(handler) async def handler_wrapper(stream: AnyByteStream) -> None: from .. import fail_after try: with fail_after(self.handshake_timeout): wrapped_stream = await TLSStream.wrap( stream, ssl_context=self.ssl_context, standard_compatible=self.standard_compatible) except BaseException as exc: await self.handle_handshake_error(exc, stream) else: await handler(wrapped_stream) await self.listener.serve(handler_wrapper, task_group) async def aclose(self) -> None: await self.listener.aclose() @property def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: return { TLSAttribute.standard_compatible: lambda: self.standard_compatible, } anyio-3.5.0/src/anyio/to_process.py000066400000000000000000000214741416724134300173130ustar00rootroot00000000000000import os import pickle import subprocess import sys from collections import deque from importlib.util import module_from_spec, spec_from_file_location from typing import Callable, Deque, List, Optional, Set, Tuple, TypeVar, cast from ._core._eventloop import current_time, get_asynclib, get_cancelled_exc_class from ._core._exceptions import BrokenWorkerProcess from ._core._subprocesses import open_process from ._core._synchronization import CapacityLimiter from ._core._tasks import CancelScope, fail_after from .abc import ByteReceiveStream, ByteSendStream, Process from .lowlevel import RunVar, checkpoint_if_cancelled from .streams.buffered import BufferedByteReceiveStream WORKER_MAX_IDLE_TIME = 300 # 5 minutes T_Retval = TypeVar('T_Retval') _process_pool_workers: RunVar[Set[Process]] = RunVar('_process_pool_workers') _process_pool_idle_workers: RunVar[Deque[Tuple[Process, float]]] = RunVar( '_process_pool_idle_workers') _default_process_limiter: RunVar[CapacityLimiter] = RunVar('_default_process_limiter') async def run_sync( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: Optional[CapacityLimiter] = None) -> T_Retval: """ Call the given function with the given arguments in a worker process. If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, the worker process running it will be abruptly terminated using SIGKILL (or ``terminateProcess()`` on Windows). :param func: a callable :param args: positional arguments for the callable :param cancellable: ``True`` to allow cancellation of the operation while it's running :param limiter: capacity limiter to use to limit the total amount of processes running (if omitted, the default limiter is used) :return: an awaitable that yields the return value of the function. """ async def send_raw_command(pickled_cmd: bytes) -> object: try: await stdin.send(pickled_cmd) response = await buffered.receive_until(b'\n', 50) status, length = response.split(b' ') if status not in (b'RETURN', b'EXCEPTION'): raise RuntimeError(f'Worker process returned unexpected response: {response!r}') pickled_response = await buffered.receive_exactly(int(length)) except BaseException as exc: workers.discard(process) try: process.kill() with CancelScope(shield=True): await process.aclose() except ProcessLookupError: pass if isinstance(exc, get_cancelled_exc_class()): raise else: raise BrokenWorkerProcess from exc retval = pickle.loads(pickled_response) if status == b'EXCEPTION': assert isinstance(retval, BaseException) raise retval else: return retval # First pickle the request before trying to reserve a worker process await checkpoint_if_cancelled() request = pickle.dumps(('run', func, args), protocol=pickle.HIGHEST_PROTOCOL) # If this is the first run in this event loop thread, set up the necessary variables try: workers = _process_pool_workers.get() idle_workers = _process_pool_idle_workers.get() except LookupError: workers = set() idle_workers = deque() _process_pool_workers.set(workers) _process_pool_idle_workers.set(idle_workers) get_asynclib().setup_process_pool_exit_at_shutdown(workers) async with (limiter or current_default_process_limiter()): # Pop processes from the pool (starting from the most recently used) until we find one that # hasn't exited yet process: Process while idle_workers: process, idle_since = idle_workers.pop() if process.returncode is None: stdin = cast(ByteSendStream, process.stdin) buffered = BufferedByteReceiveStream(cast(ByteReceiveStream, process.stdout)) # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME seconds or # longer now = current_time() killed_processes: List[Process] = [] while idle_workers: if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME: break process, idle_since = idle_workers.popleft() process.kill() workers.remove(process) killed_processes.append(process) with CancelScope(shield=True): for process in killed_processes: await process.aclose() break workers.remove(process) else: command = [sys.executable, '-u', '-m', __name__] process = await open_process(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) try: stdin = cast(ByteSendStream, process.stdin) buffered = BufferedByteReceiveStream(cast(ByteReceiveStream, process.stdout)) with fail_after(20): message = await buffered.receive(6) if message != b'READY\n': raise BrokenWorkerProcess( f'Worker process returned unexpected response: {message!r}') main_module_path = getattr(sys.modules['__main__'], '__file__', None) pickled = pickle.dumps(('init', sys.path, main_module_path), protocol=pickle.HIGHEST_PROTOCOL) await send_raw_command(pickled) except (BrokenWorkerProcess, get_cancelled_exc_class()): raise except BaseException as exc: process.kill() raise BrokenWorkerProcess('Error during worker process initialization') from exc workers.add(process) with CancelScope(shield=not cancellable): try: return cast(T_Retval, await send_raw_command(request)) finally: if process in workers: idle_workers.append((process, current_time())) def current_default_process_limiter() -> CapacityLimiter: """ Return the capacity limiter that is used by default to limit the number of worker processes. :return: a capacity limiter object """ try: return _default_process_limiter.get() except LookupError: limiter = CapacityLimiter(os.cpu_count() or 2) _default_process_limiter.set(limiter) return limiter def process_worker() -> None: # Redirect standard streams to os.devnull so that user code won't interfere with the # parent-worker communication stdin = sys.stdin stdout = sys.stdout sys.stdin = open(os.devnull) sys.stdout = open(os.devnull, 'w') stdout.buffer.write(b'READY\n') while True: retval = exception = None try: command, *args = pickle.load(stdin.buffer) except EOFError: return except BaseException as exc: exception = exc else: if command == 'run': func, args = args try: retval = func(*args) except BaseException as exc: exception = exc elif command == 'init': main_module_path: Optional[str] sys.path, main_module_path = args del sys.modules['__main__'] if main_module_path: # Load the parent's main module but as __mp_main__ instead of __main__ # (like multiprocessing does) to avoid infinite recursion try: spec = spec_from_file_location('__mp_main__', main_module_path) if spec and spec.loader: main = module_from_spec(spec) spec.loader.exec_module(main) sys.modules['__main__'] = main except BaseException as exc: exception = exc try: if exception is not None: status = b'EXCEPTION' pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL) else: status = b'RETURN' pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL) except BaseException as exc: exception = exc status = b'EXCEPTION' pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL) stdout.buffer.write(b'%s %d\n' % (status, len(pickled))) stdout.buffer.write(pickled) # Respect SIGTERM if isinstance(exception, SystemExit): raise exception if __name__ == '__main__': process_worker() anyio-3.5.0/src/anyio/to_thread.py000066400000000000000000000041331416724134300170750ustar00rootroot00000000000000from typing import Callable, Optional, TypeVar from warnings import warn from ._core._eventloop import get_asynclib from .abc import CapacityLimiter T_Retval = TypeVar('T_Retval') async def run_sync( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: Optional[CapacityLimiter] = None) -> T_Retval: """ Call the given function with the given arguments in a worker thread. If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, the thread will still run its course but its return value (or any raised exception) will be ignored. :param func: a callable :param args: positional arguments for the callable :param cancellable: ``True`` to allow cancellation of the operation :param limiter: capacity limiter to use to limit the total amount of threads running (if omitted, the default limiter is used) :return: an awaitable that yields the return value of the function. """ return await get_asynclib().run_sync_in_worker_thread(func, *args, cancellable=cancellable, limiter=limiter) async def run_sync_in_worker_thread( func: Callable[..., T_Retval], *args: object, cancellable: bool = False, limiter: Optional[CapacityLimiter] = None) -> T_Retval: warn('run_sync_in_worker_thread() has been deprecated, use anyio.to_thread.run_sync() instead', DeprecationWarning) return await run_sync(func, *args, cancellable=cancellable, limiter=limiter) def current_default_thread_limiter() -> CapacityLimiter: """ Return the capacity limiter that is used by default to limit the number of concurrent threads. :return: a capacity limiter object """ return get_asynclib().current_default_thread_limiter() def current_default_worker_thread_limiter() -> CapacityLimiter: warn('current_default_worker_thread_limiter() has been deprecated, ' 'use anyio.to_thread.current_default_thread_limiter() instead', DeprecationWarning) return current_default_thread_limiter() anyio-3.5.0/tests/000077500000000000000000000000001416724134300140055ustar00rootroot00000000000000anyio-3.5.0/tests/__init__.py000066400000000000000000000000001416724134300161040ustar00rootroot00000000000000anyio-3.5.0/tests/conftest.py000066400000000000000000000042251416724134300162070ustar00rootroot00000000000000import asyncio import ssl from ssl import SSLContext from typing import Any, Dict, Generator, Tuple import pytest import trustme from _pytest.fixtures import SubRequest from trustme import CA uvloop_marks = [] uvloop_policy = None try: import uvloop except ImportError: uvloop_marks.append(pytest.mark.skip(reason='uvloop not available')) else: if (hasattr(asyncio.AbstractEventLoop, 'shutdown_default_executor') and not hasattr(uvloop.loop.Loop, 'shutdown_default_executor')): uvloop_marks.append( pytest.mark.skip(reason='uvloop is missing shutdown_default_executor()')) else: uvloop_policy = uvloop.EventLoopPolicy() pytest_plugins = ['pytester', 'pytest_mock'] @pytest.fixture(params=[ pytest.param(('asyncio', {'debug': True, 'policy': asyncio.DefaultEventLoopPolicy()}), id='asyncio'), pytest.param(('asyncio', {'debug': True, 'policy': uvloop_policy}), marks=uvloop_marks, id='asyncio+uvloop'), pytest.param('trio') ]) def anyio_backend(request: SubRequest) -> Tuple[str, Dict[str, Any]]: return request.param @pytest.fixture(scope='session') def ca() -> CA: return trustme.CA() @pytest.fixture(scope='session') def server_context(ca: CA) -> SSLContext: server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) if hasattr(ssl, 'OP_IGNORE_UNEXPECTED_EOF'): server_context.options ^= ssl.OP_IGNORE_UNEXPECTED_EOF # type: ignore[attr-defined] ca.issue_cert('localhost').configure_cert(server_context) return server_context @pytest.fixture(scope='session') def client_context(ca: CA) -> SSLContext: client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if hasattr(ssl, 'OP_IGNORE_UNEXPECTED_EOF'): client_context.options ^= ssl.OP_IGNORE_UNEXPECTED_EOF # type: ignore[attr-defined] ca.configure_trust(client_context) return client_context @pytest.fixture def asyncio_event_loop() -> Generator[asyncio.AbstractEventLoop, None, None]: loop = asyncio.DefaultEventLoopPolicy().new_event_loop() asyncio.set_event_loop(loop) yield loop asyncio.set_event_loop(None) loop.close() anyio-3.5.0/tests/streams/000077500000000000000000000000001416724134300154635ustar00rootroot00000000000000anyio-3.5.0/tests/streams/__init__.py000066400000000000000000000000001416724134300175620ustar00rootroot00000000000000anyio-3.5.0/tests/streams/test_buffered.py000066400000000000000000000033511416724134300206600ustar00rootroot00000000000000import pytest from anyio import IncompleteRead, create_memory_object_stream from anyio.streams.buffered import BufferedByteReceiveStream pytestmark = pytest.mark.anyio async def test_receive_exactly() -> None: send_stream, receive_stream = create_memory_object_stream(2) buffered_stream = BufferedByteReceiveStream(receive_stream) await send_stream.send(b'abcd') await send_stream.send(b'efgh') result = await buffered_stream.receive_exactly(8) assert result == b'abcdefgh' assert isinstance(result, bytes) async def test_receive_exactly_incomplete() -> None: send_stream, receive_stream = create_memory_object_stream(1) buffered_stream = BufferedByteReceiveStream(receive_stream) await send_stream.send(b'abcd') await send_stream.aclose() with pytest.raises(IncompleteRead): await buffered_stream.receive_exactly(8) async def test_receive_until() -> None: send_stream, receive_stream = create_memory_object_stream(2) buffered_stream = BufferedByteReceiveStream(receive_stream) await send_stream.send(b'abcd') await send_stream.send(b'efgh') result = await buffered_stream.receive_until(b'de', 10) assert result == b'abc' assert isinstance(result, bytes) result = await buffered_stream.receive_until(b'h', 10) assert result == b'fg' assert isinstance(result, bytes) async def test_receive_until_incomplete() -> None: send_stream, receive_stream = create_memory_object_stream(1) buffered_stream = BufferedByteReceiveStream(receive_stream) await send_stream.send(b'abcd') await send_stream.aclose() with pytest.raises(IncompleteRead): assert await buffered_stream.receive_until(b'de', 10) assert buffered_stream.buffer == b'abcd' anyio-3.5.0/tests/streams/test_file.py000066400000000000000000000075571416724134300200310ustar00rootroot00000000000000from pathlib import Path from typing import Union import pytest from _pytest.fixtures import SubRequest from _pytest.tmpdir import TempPathFactory from anyio import ClosedResourceError, EndOfStream from anyio.abc import ByteReceiveStream from anyio.streams.file import FileReadStream, FileStreamAttribute, FileWriteStream pytestmark = pytest.mark.anyio class TestFileReadStream: @pytest.fixture(scope='class') def file_path(self, tmp_path_factory: TempPathFactory) -> Path: path = tmp_path_factory.mktemp('filestream') / 'data.txt' path.write_text('Hello') return path @pytest.fixture(params=[False, True], ids=["str", "path"]) def file_path_or_str(self, request: SubRequest, file_path: Path) -> Union[Path, str]: return file_path if request.param else str(file_path) async def _run_filestream_test(self, stream: ByteReceiveStream) -> None: assert await stream.receive(3) == b'Hel' assert await stream.receive(3) == b'lo' with pytest.raises(EndOfStream): await stream.receive(1) async def test_read_file_as_path(self, file_path_or_str: Union[Path, str]) -> None: async with await FileReadStream.from_path(file_path_or_str) as stream: await self._run_filestream_test(stream) async def test_read_file(self, file_path: Path) -> None: with file_path.open('rb') as file: async with FileReadStream(file) as stream: await self._run_filestream_test(stream) async def test_read_after_close(self, file_path: Path) -> None: async with await FileReadStream.from_path(file_path) as stream: pass with pytest.raises(ClosedResourceError): await stream.receive() async def test_seek(self, file_path: Path) -> None: with file_path.open('rb') as file: async with FileReadStream(file) as stream: await stream.seek(2) assert await stream.tell() == 2 data = await stream.receive() assert data == b'llo' assert await stream.tell() == 5 async def test_extra_attributes(self, file_path: Path) -> None: async with await FileReadStream.from_path(file_path) as stream: path = stream.extra(FileStreamAttribute.path) assert path == file_path fileno = stream.extra(FileStreamAttribute.fileno) assert fileno > 2 file = stream.extra(FileStreamAttribute.file) assert file.fileno() == fileno class TestFileWriteStream: @pytest.fixture def file_path(self, tmp_path: Path) -> Path: return tmp_path / 'written_data.txt' async def test_write_file(self, file_path: Path) -> None: async with await FileWriteStream.from_path(file_path) as stream: await stream.send(b'Hel') await stream.send(b'lo') assert file_path.read_text() == 'Hello' async def test_append_file(self, file_path: Path) -> None: file_path.write_text('Hello') async with await FileWriteStream.from_path(file_path, True) as stream: await stream.send(b', World!') assert file_path.read_text() == 'Hello, World!' async def test_write_after_close(self, file_path: Path) -> None: async with await FileWriteStream.from_path(file_path, True) as stream: pass with pytest.raises(ClosedResourceError): await stream.send(b'foo') async def test_extra_attributes(self, file_path: Path) -> None: async with await FileWriteStream.from_path(file_path) as stream: path = stream.extra(FileStreamAttribute.path) assert path == file_path fileno = stream.extra(FileStreamAttribute.fileno) assert fileno > 2 file = stream.extra(FileStreamAttribute.file) assert file.fileno() == fileno anyio-3.5.0/tests/streams/test_memory.py000066400000000000000000000251211416724134300204050ustar00rootroot00000000000000from typing import List, Union import pytest from anyio import ( BrokenResourceError, CancelScope, ClosedResourceError, EndOfStream, WouldBlock, create_memory_object_stream, create_task_group, fail_after, wait_all_tasks_blocked) from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream pytestmark = pytest.mark.anyio def test_invalid_max_buffer() -> None: pytest.raises(ValueError, create_memory_object_stream, 1.0).\ match('max_buffer_size must be either an integer or math.inf') def test_negative_max_buffer() -> None: pytest.raises(ValueError, create_memory_object_stream, -1).\ match('max_buffer_size cannot be negative') async def test_receive_then_send() -> None: async def receiver() -> None: received_objects.append(await receive.receive()) received_objects.append(await receive.receive()) send, receive = create_memory_object_stream(0) received_objects: List[str] = [] async with create_task_group() as tg: tg.start_soon(receiver) await wait_all_tasks_blocked() await send.send('hello') await send.send('anyio') assert received_objects == ['hello', 'anyio'] async def test_receive_then_send_nowait() -> None: async def receiver() -> None: received_objects.append(await receive.receive()) send, receive = create_memory_object_stream(0) received_objects: List[str] = [] async with create_task_group() as tg: tg.start_soon(receiver) tg.start_soon(receiver) await wait_all_tasks_blocked() send.send_nowait('hello') send.send_nowait('anyio') assert sorted(received_objects, reverse=True) == ['hello', 'anyio'] async def test_send_then_receive_nowait() -> None: send, receive = create_memory_object_stream(0) async with create_task_group() as tg: tg.start_soon(send.send, 'hello') await wait_all_tasks_blocked() assert receive.receive_nowait() == 'hello' async def test_send_is_unblocked_after_receive_nowait() -> None: send, receive = create_memory_object_stream(1) send.send_nowait('hello') with fail_after(1): async with create_task_group() as tg: tg.start_soon(send.send, 'anyio') await wait_all_tasks_blocked() assert receive.receive_nowait() == 'hello' assert receive.receive_nowait() == 'anyio' async def test_send_nowait_then_receive_nowait() -> None: send, receive = create_memory_object_stream(2) send.send_nowait('hello') send.send_nowait('anyio') assert receive.receive_nowait() == 'hello' assert receive.receive_nowait() == 'anyio' async def test_iterate() -> None: async def receiver() -> None: async for item in receive: received_objects.append(item) send, receive = create_memory_object_stream() received_objects: List[str] = [] async with create_task_group() as tg: tg.start_soon(receiver) await send.send('hello') await send.send('anyio') await send.aclose() assert received_objects == ['hello', 'anyio'] async def test_receive_send_closed_send_stream() -> None: send, receive = create_memory_object_stream() await send.aclose() with pytest.raises(EndOfStream): receive.receive_nowait() with pytest.raises(ClosedResourceError): await send.send(None) async def test_receive_send_closed_receive_stream() -> None: send, receive = create_memory_object_stream() await receive.aclose() with pytest.raises(ClosedResourceError): receive.receive_nowait() with pytest.raises(BrokenResourceError): await send.send(None) async def test_cancel_receive() -> None: send, receive = create_memory_object_stream() async with create_task_group() as tg: tg.start_soon(receive.receive) await wait_all_tasks_blocked() tg.cancel_scope.cancel() with pytest.raises(WouldBlock): send.send_nowait('hello') async def test_cancel_send() -> None: send, receive = create_memory_object_stream() async with create_task_group() as tg: tg.start_soon(send.send, 'hello') await wait_all_tasks_blocked() tg.cancel_scope.cancel() with pytest.raises(WouldBlock): receive.receive_nowait() async def test_clone() -> None: send1, receive1 = create_memory_object_stream(1) send2 = send1.clone() receive2 = receive1.clone() await send1.aclose() await receive1.aclose() send2.send_nowait('hello') assert receive2.receive_nowait() == 'hello' async def test_clone_closed() -> None: send, receive = create_memory_object_stream(1) await send.aclose() await receive.aclose() pytest.raises(ClosedResourceError, send.clone) pytest.raises(ClosedResourceError, receive.clone) async def test_close_send_while_receiving() -> None: send, receive = create_memory_object_stream(1) with pytest.raises(EndOfStream): async with create_task_group() as tg: tg.start_soon(receive.receive) await wait_all_tasks_blocked() await send.aclose() async def test_close_receive_while_sending() -> None: send, receive = create_memory_object_stream(0) with pytest.raises(BrokenResourceError): async with create_task_group() as tg: tg.start_soon(send.send, 'hello') await wait_all_tasks_blocked() await receive.aclose() async def test_receive_after_send_closed() -> None: send, receive = create_memory_object_stream(1) await send.send('hello') await send.aclose() assert await receive.receive() == 'hello' async def test_receive_when_cancelled() -> None: """ Test that calling receive() in a cancelled scope prevents it from going through with the operation. """ send, receive = create_memory_object_stream() async with create_task_group() as tg: tg.start_soon(send.send, 'hello') await wait_all_tasks_blocked() tg.start_soon(send.send, 'world') await wait_all_tasks_blocked() with CancelScope() as scope: scope.cancel() await receive.receive() assert await receive.receive() == 'hello' assert await receive.receive() == 'world' async def test_send_when_cancelled() -> None: """ Test that calling send() in a cancelled scope prevents it from going through with the operation. """ async def receiver() -> None: received.append(await receive.receive()) received: List[str] = [] send, receive = create_memory_object_stream() async with create_task_group() as tg: tg.start_soon(receiver) with CancelScope() as scope: scope.cancel() await send.send('hello') await send.send('world') assert received == ['world'] async def test_cancel_during_receive() -> None: """ Test that cancelling a pending receive() operation does not cause an item in the stream to be lost. """ receiver_scope = None async def scoped_receiver() -> None: nonlocal receiver_scope with CancelScope() as receiver_scope: received.append(await receive.receive()) assert receiver_scope.cancel_called received: List[str] = [] send, receive = create_memory_object_stream() async with create_task_group() as tg: tg.start_soon(scoped_receiver) await wait_all_tasks_blocked() send.send_nowait('hello') assert receiver_scope is not None receiver_scope.cancel() assert received == ['hello'] async def test_close_receive_after_send() -> None: async def send() -> None: async with send_stream: await send_stream.send('test') async def receive() -> None: async with receive_stream: assert await receive_stream.receive() == 'test' send_stream, receive_stream = create_memory_object_stream() async with create_task_group() as tg: tg.start_soon(send) tg.start_soon(receive) async def test_statistics() -> None: send_stream, receive_stream = create_memory_object_stream(1) streams: List[Union[MemoryObjectReceiveStream[int], MemoryObjectSendStream[int]]] = [ send_stream, receive_stream] for stream in streams: statistics = stream.statistics() assert statistics.max_buffer_size == 1 assert statistics.current_buffer_used == 0 assert statistics.open_send_streams == 1 assert statistics.open_receive_streams == 1 assert statistics.tasks_waiting_send == 0 assert statistics.tasks_waiting_receive == 0 for stream in streams: async with create_task_group() as tg: # Test tasks_waiting_send send_stream.send_nowait(None) assert stream.statistics().current_buffer_used == 1 tg.start_soon(send_stream.send, None) await wait_all_tasks_blocked() assert stream.statistics().current_buffer_used == 1 assert stream.statistics().tasks_waiting_send == 1 receive_stream.receive_nowait() assert stream.statistics().current_buffer_used == 1 assert stream.statistics().tasks_waiting_send == 0 receive_stream.receive_nowait() assert stream.statistics().current_buffer_used == 0 # Test tasks_waiting_receive tg.start_soon(receive_stream.receive) await wait_all_tasks_blocked() assert stream.statistics().tasks_waiting_receive == 1 send_stream.send_nowait(None) assert stream.statistics().tasks_waiting_receive == 0 async with create_task_group() as tg: # Test tasks_waiting_send send_stream.send_nowait(None) assert stream.statistics().tasks_waiting_send == 0 for _ in range(3): tg.start_soon(send_stream.send, None) await wait_all_tasks_blocked() assert stream.statistics().tasks_waiting_send == 3 for i in range(2, -1, -1): receive_stream.receive_nowait() assert stream.statistics().tasks_waiting_send == i receive_stream.receive_nowait() assert stream.statistics().current_buffer_used == 0 assert stream.statistics().tasks_waiting_send == 0 assert stream.statistics().tasks_waiting_receive == 0 async def test_sync_close() -> None: send_stream, receive_stream = create_memory_object_stream(1) with send_stream, receive_stream: pass with pytest.raises(ClosedResourceError): send_stream.send_nowait(None) with pytest.raises(ClosedResourceError): receive_stream.receive_nowait() anyio-3.5.0/tests/streams/test_stapled.py000066400000000000000000000127251416724134300205370ustar00rootroot00000000000000from collections import deque from dataclasses import InitVar, dataclass, field from typing import Deque, Iterable, List, TypeVar import pytest from anyio import ClosedResourceError, EndOfStream from anyio.abc import ByteReceiveStream, ByteSendStream, ObjectReceiveStream, ObjectSendStream from anyio.streams.stapled import StapledByteStream, StapledObjectStream pytestmark = pytest.mark.anyio @dataclass class DummyByteReceiveStream(ByteReceiveStream): data: InitVar[bytes] buffer: bytearray = field(init=False) _closed: bool = field(init=False, default=False) def __post_init__(self, data: bytes) -> None: self.buffer = bytearray(data) async def receive(self, max_bytes: int = 65536) -> bytes: if self._closed: raise ClosedResourceError data = bytes(self.buffer[:max_bytes]) del self.buffer[:max_bytes] return data async def aclose(self) -> None: self._closed = True @dataclass class DummyByteSendStream(ByteSendStream): buffer: bytearray = field(init=False, default_factory=bytearray) _closed: bool = field(init=False, default=False) async def send(self, item: bytes) -> None: if self._closed: raise ClosedResourceError self.buffer.extend(item) async def aclose(self) -> None: self._closed = True class TestStapledByteStream: @pytest.fixture def send_stream(self) -> DummyByteSendStream: return DummyByteSendStream() @pytest.fixture def receive_stream(self) -> DummyByteReceiveStream: return DummyByteReceiveStream(b'hello, world') @pytest.fixture def stapled(self, send_stream: DummyByteSendStream, receive_stream: DummyByteReceiveStream) -> StapledByteStream: return StapledByteStream(send_stream, receive_stream) async def test_receive_send(self, stapled: StapledByteStream, send_stream: DummyByteSendStream) -> None: assert await stapled.receive(3) == b'hel' assert await stapled.receive() == b'lo, world' assert await stapled.receive() == b'' await stapled.send(b'how are you ') await stapled.send(b'today?') assert stapled.send_stream is send_stream assert bytes(send_stream.buffer) == b'how are you today?' async def test_send_eof(self, stapled: StapledByteStream) -> None: await stapled.send_eof() await stapled.send_eof() with pytest.raises(ClosedResourceError): await stapled.send(b'world') assert await stapled.receive() == b'hello, world' async def test_aclose(self, stapled: StapledByteStream) -> None: await stapled.aclose() with pytest.raises(ClosedResourceError): await stapled.receive() with pytest.raises(ClosedResourceError): await stapled.send(b'') T_Item = TypeVar('T_Item') @dataclass class DummyObjectReceiveStream(ObjectReceiveStream[T_Item]): data: InitVar[Iterable[T_Item]] buffer: Deque[T_Item] = field(init=False) _closed: bool = field(init=False, default=False) def __post_init__(self, data: Iterable[T_Item]) -> None: self.buffer = deque(data) async def receive(self) -> T_Item: if self._closed: raise ClosedResourceError if not self.buffer: raise EndOfStream return self.buffer.popleft() async def aclose(self) -> None: self._closed = True @dataclass class DummyObjectSendStream(ObjectSendStream[T_Item]): buffer: List[T_Item] = field(init=False, default_factory=list) _closed: bool = field(init=False, default=False) async def send(self, item: T_Item) -> None: if self._closed: raise ClosedResourceError self.buffer.append(item) async def aclose(self) -> None: self._closed = True class TestStapledObjectStream: @pytest.fixture def receive_stream(self) -> DummyObjectReceiveStream[str]: return DummyObjectReceiveStream(['hello', 'world']) @pytest.fixture def send_stream(self) -> DummyObjectSendStream[str]: return DummyObjectSendStream[str]() @pytest.fixture def stapled(self, receive_stream: DummyObjectReceiveStream[str], send_stream: DummyObjectSendStream[str]) -> StapledObjectStream[str]: return StapledObjectStream(send_stream, receive_stream) async def test_receive_send(self, stapled: StapledObjectStream[str], send_stream: DummyObjectSendStream[str]) -> None: assert await stapled.receive() == 'hello' assert await stapled.receive() == 'world' with pytest.raises(EndOfStream): await stapled.receive() await stapled.send('how are you ') await stapled.send('today?') assert stapled.send_stream is send_stream assert send_stream.buffer == ['how are you ', 'today?'] async def test_send_eof(self, stapled: StapledObjectStream[str]) -> None: await stapled.send_eof() await stapled.send_eof() with pytest.raises(ClosedResourceError): await stapled.send('world') assert await stapled.receive() == 'hello' assert await stapled.receive() == 'world' async def test_aclose(self, stapled: StapledObjectStream[str]) -> None: await stapled.aclose() with pytest.raises(ClosedResourceError): await stapled.receive() with pytest.raises(ClosedResourceError): await stapled.send(b'') # type: ignore[arg-type] anyio-3.5.0/tests/streams/test_text.py000066400000000000000000000042641416724134300200660ustar00rootroot00000000000000import platform import sys import pytest from anyio import create_memory_object_stream from anyio.streams.stapled import StapledObjectStream from anyio.streams.text import TextReceiveStream, TextSendStream, TextStream pytestmark = pytest.mark.anyio async def test_receive() -> None: send_stream, receive_stream = create_memory_object_stream(1) text_stream = TextReceiveStream(receive_stream) await send_stream.send(b'\xc3\xa5\xc3\xa4\xc3') # ends with half of the "ö" letter assert await text_stream.receive() == 'åä' # Send the missing byte for "ö" await send_stream.send(b'\xb6') assert await text_stream.receive() == 'ö' async def test_send() -> None: send_stream, receive_stream = create_memory_object_stream(1) text_stream = TextSendStream(send_stream) await text_stream.send('åäö') assert await receive_stream.receive() == b'\xc3\xa5\xc3\xa4\xc3\xb6' @pytest.mark.xfail(platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (7, 3, 2), # type: ignore[attr-defined] reason='PyPy has a bug in its incremental UTF-8 decoder (#3274)') async def test_receive_encoding_error() -> None: send_stream, receive_stream = create_memory_object_stream(1) text_stream = TextReceiveStream(receive_stream, errors='replace') await send_stream.send(b'\xe5\xe4\xf6') # "åäö" in latin-1 assert await text_stream.receive() == '���' async def test_send_encoding_error() -> None: send_stream, receive_stream = create_memory_object_stream(1) text_stream = TextSendStream(send_stream, encoding='iso-8859-1', errors='replace') await text_stream.send('€') assert await receive_stream.receive() == b'?' async def test_bidirectional_stream() -> None: send_stream, receive_stream = create_memory_object_stream(1) stapled_stream = StapledObjectStream(send_stream, receive_stream) text_stream = TextStream(stapled_stream) await text_stream.send('åäö') assert await receive_stream.receive() == b'\xc3\xa5\xc3\xa4\xc3\xb6' await send_stream.send(b'\xc3\xa6\xc3\xb8') assert await text_stream.receive() == 'æø' assert text_stream.extra_attributes == {} anyio-3.5.0/tests/streams/test_tls.py000066400000000000000000000342701416724134300177040ustar00rootroot00000000000000import socket import ssl from contextlib import ExitStack from threading import Thread from typing import ContextManager, NoReturn import pytest from trustme import CA from anyio import ( BrokenResourceError, EndOfStream, Event, connect_tcp, create_task_group, create_tcp_listener) from anyio.abc import AnyByteStream, SocketAttribute, SocketStream from anyio.streams.tls import TLSAttribute, TLSListener, TLSStream pytestmark = pytest.mark.anyio class TestTLSStream: async def test_send_receive(self, server_context: ssl.SSLContext, client_context: ssl.SSLContext) -> None: def serve_sync() -> None: conn, addr = server_sock.accept() conn.settimeout(1) data = conn.recv(10) conn.send(data[::-1]) conn.close() server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=False) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync) server_thread.start() async with await connect_tcp(*server_sock.getsockname()) as stream: wrapper = await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context) await wrapper.send(b'hello') response = await wrapper.receive() server_thread.join() server_sock.close() assert response == b'olleh' async def test_extra_attributes(self, server_context: ssl.SSLContext, client_context: ssl.SSLContext) -> None: def serve_sync() -> None: conn, addr = server_sock.accept() with conn: conn.settimeout(1) conn.recv(1) server_context.set_alpn_protocols(['h2']) client_context.set_alpn_protocols(['h2']) server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=True) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync) server_thread.start() async with await connect_tcp(*server_sock.getsockname()) as stream: wrapper = await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context, standard_compatible=False) async with wrapper: for name, attribute in SocketAttribute.__dict__.items(): if not name.startswith('_'): assert wrapper.extra(attribute) == stream.extra(attribute) assert wrapper.extra(TLSAttribute.alpn_protocol) == 'h2' assert isinstance(wrapper.extra(TLSAttribute.channel_binding_tls_unique), bytes) assert isinstance(wrapper.extra(TLSAttribute.cipher), tuple) assert isinstance(wrapper.extra(TLSAttribute.peer_certificate), dict) assert isinstance(wrapper.extra(TLSAttribute.peer_certificate_binary), bytes) assert wrapper.extra(TLSAttribute.server_side) is False assert isinstance(wrapper.extra(TLSAttribute.shared_ciphers), list) assert isinstance(wrapper.extra(TLSAttribute.ssl_object), ssl.SSLObject) assert wrapper.extra(TLSAttribute.standard_compatible) is False assert wrapper.extra(TLSAttribute.tls_version).startswith('TLSv') await wrapper.send(b'\x00') server_thread.join() server_sock.close() async def test_unwrap(self, server_context: ssl.SSLContext, client_context: ssl.SSLContext) -> None: def serve_sync() -> None: conn, addr = server_sock.accept() conn.settimeout(1) conn.send(b'encrypted') unencrypted = conn.unwrap() unencrypted.send(b'unencrypted') unencrypted.close() server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=False) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync) server_thread.start() async with await connect_tcp(*server_sock.getsockname()) as stream: wrapper = await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context) msg1 = await wrapper.receive() unwrapped_stream, msg2 = await wrapper.unwrap() if msg2 != b'unencrypted': msg2 += await unwrapped_stream.receive() server_thread.join() server_sock.close() assert msg1 == b'encrypted' assert msg2 == b'unencrypted' @pytest.mark.skipif(not ssl.HAS_ALPN, reason='ALPN support not available') async def test_alpn_negotiation(self, server_context: ssl.SSLContext, client_context: ssl.SSLContext) -> None: def serve_sync() -> None: conn, addr = server_sock.accept() conn.settimeout(1) selected_alpn_protocol = conn.selected_alpn_protocol() assert selected_alpn_protocol is not None conn.send(selected_alpn_protocol.encode()) conn.close() server_context.set_alpn_protocols(['dummy1', 'dummy2']) client_context.set_alpn_protocols(['dummy2', 'dummy3']) server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=False) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync) server_thread.start() async with await connect_tcp(*server_sock.getsockname()) as stream: wrapper = await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context) assert wrapper.extra(TLSAttribute.alpn_protocol) == 'dummy2' server_alpn_protocol = await wrapper.receive() server_thread.join() server_sock.close() assert server_alpn_protocol == b'dummy2' @pytest.mark.parametrize('server_compatible, client_compatible', [ pytest.param(True, True, id='both_standard'), pytest.param(True, False, id='server_standard'), pytest.param(False, True, id='client_standard'), pytest.param(False, False, id='neither_standard') ]) async def test_ragged_eofs(self, server_context: ssl.SSLContext, client_context: ssl.SSLContext, server_compatible: bool, client_compatible: bool) -> None: server_exc = None def serve_sync() -> None: nonlocal server_exc conn, addr = server_sock.accept() try: conn.settimeout(1) conn.sendall(b'hello') if server_compatible: conn.unwrap() except BaseException as exc: server_exc = exc finally: conn.close() client_cm: ContextManager = ExitStack() if client_compatible and not server_compatible: client_cm = pytest.raises(BrokenResourceError) server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=not server_compatible) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync, daemon=True) server_thread.start() async with await connect_tcp(*server_sock.getsockname()) as stream: wrapper = await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context, standard_compatible=client_compatible) with client_cm: assert await wrapper.receive() == b'hello' await wrapper.aclose() server_thread.join() server_sock.close() if not client_compatible and server_compatible: assert isinstance(server_exc, OSError) assert not isinstance(server_exc, socket.timeout) else: assert server_exc is None async def test_ragged_eof_on_receive(self, server_context: ssl.SSLContext, client_context: ssl.SSLContext) -> None: server_exc = None def serve_sync() -> None: nonlocal server_exc conn, addr = server_sock.accept() try: conn.settimeout(1) conn.sendall(b'hello') except BaseException as exc: server_exc = exc finally: conn.close() server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=True) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync, daemon=True) server_thread.start() try: async with await connect_tcp(*server_sock.getsockname()) as stream: wrapper = await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context, standard_compatible=False) assert await wrapper.receive() == b'hello' with pytest.raises(EndOfStream): await wrapper.receive() finally: server_thread.join() server_sock.close() assert server_exc is None async def test_receive_send_after_eof(self, server_context: ssl.SSLContext, client_context: ssl.SSLContext) -> None: def serve_sync() -> None: conn, addr = server_sock.accept() conn.sendall(b'hello') conn.unwrap() conn.close() server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=False) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync, daemon=True) server_thread.start() stream = await connect_tcp(*server_sock.getsockname()) async with await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context) as wrapper: assert await wrapper.receive() == b'hello' with pytest.raises(EndOfStream): await wrapper.receive() server_thread.join() server_sock.close() @pytest.mark.parametrize('force_tlsv12', [ pytest.param(False, marks=[pytest.mark.skipif(not getattr(ssl, 'HAS_TLSv1_3', False), reason='No TLS 1.3 support')]), pytest.param(True) ], ids=['tlsv13', 'tlsv12']) async def test_send_eof_not_implemented(self, server_context: ssl.SSLContext, ca: CA, force_tlsv12: bool) -> None: def serve_sync() -> None: conn, addr = server_sock.accept() conn.sendall(b'hello') conn.unwrap() conn.close() client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) ca.configure_trust(client_context) if force_tlsv12: expected_pattern = r'send_eof\(\) requires at least TLSv1.3' if hasattr(ssl, 'TLSVersion'): client_context.maximum_version = ssl.TLSVersion.TLSv1_2 else: # Python 3.6 client_context.options |= ssl.OP_NO_TLSv1_3 else: expected_pattern = r'send_eof\(\) has not yet been implemented for TLS streams' server_sock = server_context.wrap_socket(socket.socket(), server_side=True, suppress_ragged_eofs=False) server_sock.settimeout(1) server_sock.bind(('127.0.0.1', 0)) server_sock.listen() server_thread = Thread(target=serve_sync, daemon=True) server_thread.start() stream = await connect_tcp(*server_sock.getsockname()) async with await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context) as wrapper: assert await wrapper.receive() == b'hello' with pytest.raises(NotImplementedError) as exc: await wrapper.send_eof() exc.match(expected_pattern) server_thread.join() server_sock.close() class TestTLSListener: async def test_handshake_fail(self, server_context: ssl.SSLContext) -> None: def handler(stream: object) -> NoReturn: pytest.fail('This function should never be called in this scenario') exception = None class CustomTLSListener(TLSListener): @staticmethod async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None: nonlocal exception await TLSListener.handle_handshake_error(exc, stream) assert isinstance(stream, SocketStream) exception = exc event.set() event = Event() listener = await create_tcp_listener(local_host='127.0.0.1') tls_listener = CustomTLSListener(listener, server_context) async with tls_listener, create_task_group() as tg: tg.start_soon(tls_listener.serve, handler) sock = socket.socket() sock.connect(listener.extra(SocketAttribute.local_address)) sock.close() await event.wait() tg.cancel_scope.cancel() assert isinstance(exception, BrokenResourceError) anyio-3.5.0/tests/test_compat.py000066400000000000000000000155711416724134300167120ustar00rootroot00000000000000import pickle import signal import sys import threading from typing import Union import pytest from anyio import ( CancelScope, CapacityLimiter, Condition, Event, Lock, Semaphore, TaskInfo, create_blocking_portal, create_memory_object_stream, create_task_group, current_default_worker_thread_limiter, current_effective_deadline, current_time, fail_after, get_current_task, get_running_tasks, maybe_async, maybe_async_cm, move_on_after, open_signal_receiver, run_async_from_thread, run_sync_from_thread, run_sync_in_worker_thread, to_thread) from anyio._core._compat import ( DeprecatedAwaitable, DeprecatedAwaitableFloat, DeprecatedAwaitableList) pytestmark = pytest.mark.anyio class TestMaybeAsync: async def test_cancel_scope(self) -> None: with CancelScope() as scope: await maybe_async(scope.cancel()) async def test_current_time(self) -> None: value = await maybe_async(current_time()) assert type(value) is float async def test_current_effective_deadline(self) -> None: value = await maybe_async(current_effective_deadline()) assert type(value) is float async def test_get_running_tasks(self) -> None: tasks = await maybe_async(get_running_tasks()) assert type(tasks) is list async def test_get_current_task(self) -> None: task = await maybe_async(get_current_task()) assert type(task) is TaskInfo async def test_maybe_async_cm() -> None: async with maybe_async_cm(CancelScope()): pass class TestDeprecations: async def test_current_effective_deadlinee(self) -> None: with pytest.deprecated_call(): deadline = await current_effective_deadline() assert isinstance(deadline, float) async def test_current_time(self) -> None: with pytest.deprecated_call(): timestamp = await current_time() assert isinstance(timestamp, float) async def test_get_current_task(self) -> None: with pytest.deprecated_call(): task = await get_current_task() assert isinstance(task, TaskInfo) async def test_running_tasks(self) -> None: with pytest.deprecated_call(): tasks = await get_running_tasks() assert tasks assert all(isinstance(task, TaskInfo) for task in tasks) @pytest.mark.skipif(sys.platform == 'win32', reason='Signal delivery cannot be tested on Windows') async def test_open_signal_receiver(self) -> None: with pytest.deprecated_call(): async with open_signal_receiver(signal.SIGINT): pass async def test_cancelscope_cancel(self) -> None: with CancelScope() as scope: with pytest.deprecated_call(): await scope.cancel() async def test_taskgroup_cancel(self) -> None: async with create_task_group() as tg: with pytest.deprecated_call(): await tg.cancel_scope.cancel() async def test_capacitylimiter_acquire_nowait(self) -> None: limiter = CapacityLimiter(1) with pytest.deprecated_call(): await limiter.acquire_nowait() async def test_capacitylimiter_acquire_on_behalf_of_nowait(self) -> None: limiter = CapacityLimiter(1) with pytest.deprecated_call(): await limiter.acquire_on_behalf_of_nowait(object()) async def test_capacitylimiter_set_total_tokens(self) -> None: limiter = CapacityLimiter(1) with pytest.deprecated_call(): await limiter.set_total_tokens(3) assert limiter.total_tokens == 3 async def test_condition_release(self) -> None: condition = Condition() condition.acquire_nowait() with pytest.deprecated_call(): await condition.release() async def test_event_set(self) -> None: event = Event() with pytest.deprecated_call(): await event.set() async def test_lock_release(self) -> None: lock = Lock() lock.acquire_nowait() with pytest.deprecated_call(): await lock.release() async def test_memory_object_stream_send_nowait(self) -> None: send, receive = create_memory_object_stream(1) with pytest.deprecated_call(): await send.send_nowait(None) async def test_semaphore_release(self) -> None: semaphore = Semaphore(1) semaphore.acquire_nowait() with pytest.deprecated_call(): await semaphore.release() async def test_move_on_after(self) -> None: with pytest.deprecated_call(): async with move_on_after(0): pass async def test_fail_after(self) -> None: with pytest.raises(TimeoutError), pytest.deprecated_call(): async with fail_after(0): pass async def test_run_sync_in_worker_thread(self) -> None: with pytest.deprecated_call(): thread_id = await run_sync_in_worker_thread(threading.get_ident) assert thread_id != threading.get_ident() async def test_run_async_from_thread(self) -> None: async def get_ident() -> int: return threading.get_ident() def thread_func() -> int: with pytest.deprecated_call(): return run_async_from_thread(get_ident) assert await to_thread.run_sync(thread_func) == threading.get_ident() async def test_run_sync_from_thread(self) -> None: def thread_func() -> int: with pytest.deprecated_call(): return run_sync_from_thread(threading.get_ident) assert await to_thread.run_sync(thread_func) == threading.get_ident() async def test_current_default_worker_thread_limiter(self) -> None: with pytest.deprecated_call(): default_limiter = to_thread.current_default_thread_limiter() assert current_default_worker_thread_limiter() is default_limiter async def test_create_blocking_portal(self) -> None: with pytest.deprecated_call(): async with create_blocking_portal(): pass class TestPickle: def test_deprecated_awaitable_none(self) -> None: def fn() -> DeprecatedAwaitable: return DeprecatedAwaitable(fn) obj = fn() result = pickle.loads(pickle.dumps(obj)) assert result is None def test_deprecated_awaitable_float(self) -> None: def fn() -> DeprecatedAwaitableFloat: return DeprecatedAwaitableFloat(2.3, fn) obj = fn() result = pickle.loads(pickle.dumps(obj)) assert type(result) is float assert result == 2.3 def test_deprecated_awaitable_list(self) -> None: def fn() -> DeprecatedAwaitableList[Union[str, int]]: return DeprecatedAwaitableList([1, 'a'], func=fn) obj = fn() result = pickle.loads(pickle.dumps(obj)) assert type(result) is list assert result == [1, 'a'] anyio-3.5.0/tests/test_debugging.py000066400000000000000000000106751416724134300173620ustar00rootroot00000000000000import asyncio import sys from typing import Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Union, cast import pytest import anyio from anyio import ( Event, TaskInfo, create_task_group, get_current_task, get_running_tasks, move_on_after, wait_all_tasks_blocked) from anyio.abc import TaskStatus pytestmark = pytest.mark.anyio if sys.version_info >= (3, 8): get_coro = asyncio.Task.get_coro else: def get_coro(self: asyncio.Task) -> Any: return self._coro def test_main_task_name(anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: task_name = None async def main() -> None: nonlocal task_name task_name = get_current_task().name anyio.run(main, backend=anyio_backend_name, backend_options=anyio_backend_options) assert task_name == 'tests.test_debugging.test_main_task_name..main' # Work around sniffio/asyncio bug that leaves behind an unclosed event loop if anyio_backend_name == 'asyncio': import asyncio import gc for loop in [obj for obj in gc.get_objects() if isinstance(obj, asyncio.AbstractEventLoop)]: loop.close() @pytest.mark.parametrize( "name_input,expected", [ (None, 'tests.test_debugging.test_non_main_task_name..non_main'), (b'name', "b'name'"), ("name", "name"), ("", ""), ], ) async def test_non_main_task_name(name_input: Optional[Union[bytes, str]], expected: str) -> None: async def non_main(*, task_status: TaskStatus) -> None: task_status.started(anyio.get_current_task().name) async with anyio.create_task_group() as tg: name = await tg.start(non_main, name=name_input) assert name == expected async def test_get_running_tasks() -> None: async def inspect() -> None: await wait_all_tasks_blocked() new_tasks = set(get_running_tasks()) - existing_tasks task_infos[:] = sorted(new_tasks, key=lambda info: info.name or '') event.set() event = Event() task_infos: List[TaskInfo] = [] host_task = get_current_task() async with create_task_group() as tg: existing_tasks = set(get_running_tasks()) tg.start_soon(event.wait, name='task1') tg.start_soon(event.wait, name='task2') tg.start_soon(inspect) assert len(task_infos) == 3 expected_names = ['task1', 'task2', 'tests.test_debugging.test_get_running_tasks..inspect'] for task, expected_name in zip(task_infos, expected_names): assert task.parent_id == host_task.id assert task.name == expected_name assert repr(task) == f'TaskInfo(id={task.id}, name={expected_name!r})' @pytest.mark.filterwarnings('ignore:"@coroutine" decorator is deprecated:DeprecationWarning') def test_wait_generator_based_task_blocked(asyncio_event_loop: asyncio.AbstractEventLoop) -> None: async def native_coro_part() -> None: await wait_all_tasks_blocked() gen = cast(Generator, get_coro(gen_task)) assert not gen.gi_running if sys.version_info < (3, 7): assert gen.gi_yieldfrom.gi_code.co_name == 'wait' else: coro = cast(Coroutine, gen.gi_yieldfrom) assert coro.cr_code.co_name == 'wait' event.set() @asyncio.coroutine def generator_part() -> Generator[object, BaseException, None]: yield from event.wait() event = asyncio.Event() gen_task = asyncio_event_loop.create_task(generator_part()) asyncio_event_loop.run_until_complete(native_coro_part()) @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_wait_all_tasks_blocked_asend(anyio_backend: str) -> None: """Test that wait_all_tasks_blocked() does not crash on an `asend()` object.""" async def agen_func() -> AsyncGenerator[None, None]: yield agen = agen_func() coro = agen.asend(None) loop = asyncio.get_event_loop() task = loop.create_task(coro) await wait_all_tasks_blocked() await task await agen.aclose() async def test_wait_all_tasks_blocked_cancelled_task() -> None: done = False async def self_cancel(*, task_status: TaskStatus) -> None: nonlocal done task_status.started() with move_on_after(-1): await Event().wait() done = True async with create_task_group() as tg: await tg.start(self_cancel) await wait_all_tasks_blocked() assert done anyio-3.5.0/tests/test_eventloop.py000066400000000000000000000020501416724134300174260ustar00rootroot00000000000000import math import sys import pytest from pytest_mock.plugin import MockerFixture from anyio import sleep_forever, sleep_until if sys.version_info < (3, 8): from mock import AsyncMock else: from unittest.mock import AsyncMock pytestmark = pytest.mark.anyio fake_current_time = 1620581544.0 @pytest.fixture def fake_sleep(mocker: MockerFixture) -> AsyncMock: mocker.patch('anyio._core._eventloop.current_time', return_value=fake_current_time) return mocker.patch('anyio._core._eventloop.sleep', AsyncMock()) async def test_sleep_until(fake_sleep: AsyncMock) -> None: deadline = fake_current_time + 500.102352 await sleep_until(deadline) fake_sleep.assert_called_once_with(deadline - fake_current_time) async def test_sleep_until_in_past(fake_sleep: AsyncMock) -> None: deadline = fake_current_time - 500.102352 await sleep_until(deadline) fake_sleep.assert_called_once_with(0) async def test_sleep_forever(fake_sleep: AsyncMock) -> None: await sleep_forever() fake_sleep.assert_called_once_with(math.inf) anyio-3.5.0/tests/test_fileio.py000066400000000000000000000432061416724134300166720ustar00rootroot00000000000000import os import pathlib import platform import socket import stat from typing import Tuple import pytest from _pytest.tmpdir import TempPathFactory from anyio import AsyncFile, Path, open_file, wrap_file pytestmark = pytest.mark.anyio class TestAsyncFile: @pytest.fixture(scope='class') def testdata(cls) -> bytes: return b''.join(bytes([i] * 1000) for i in range(10)) @pytest.fixture def testdatafile(self, tmp_path_factory: TempPathFactory, testdata: bytes) -> pathlib.Path: file = tmp_path_factory.mktemp('file').joinpath('testdata') file.write_bytes(testdata) return file async def test_open_close(self, testdatafile: pathlib.Path) -> None: f = await open_file(testdatafile) await f.aclose() async def test_read(self, testdatafile: pathlib.Path, testdata: bytes) -> None: async with await open_file(testdatafile, 'rb') as f: data = await f.read() assert f.closed assert data == testdata async def test_write(self, testdatafile: pathlib.Path, testdata: bytes) -> None: async with await open_file(testdatafile, 'ab') as f: await f.write(b'f' * 1000) assert testdatafile.stat().st_size == len(testdata) + 1000 async def test_async_iteration(self, tmp_path: pathlib.Path) -> None: lines = ['blah blah\n', 'foo foo\n', 'bar bar'] testpath = tmp_path.joinpath('testfile') testpath.write_text(''.join(lines), 'ascii') async with await open_file(str(testpath)) as f: lines_i = iter(lines) async for line in f: assert line == next(lines_i) async def test_wrap_file(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testdata' with path.open('w') as fp: wrapped = wrap_file(fp) await wrapped.write('dummydata') assert path.read_text() == 'dummydata' class TestPath: @pytest.fixture def populated_tmpdir(self, tmp_path: pathlib.Path) -> pathlib.Path: tmp_path.joinpath('testfile').touch() tmp_path.joinpath('testfile2').touch() subdir = tmp_path / 'subdir' subdir.mkdir() subdir.joinpath('dummyfile1.txt').touch() subdir.joinpath('dummyfile2.txt').touch() return tmp_path async def test_properties(self) -> None: """Ensure that all public properties and methods are available on the async Path class.""" path = pathlib.Path('/test/path/another/part') stdlib_properties = {p for p in dir(path) if p.startswith('__') or not p.startswith('_')} stdlib_properties.discard('link_to') stdlib_properties.discard('__class_getitem__') stdlib_properties.discard('__enter__') stdlib_properties.discard('__exit__') async_path = Path(path) anyio_properties = {p for p in dir(async_path) if p.startswith('__') or not p.startswith('_')} missing = stdlib_properties - anyio_properties assert not missing def test_repr(self) -> None: assert repr(Path('/foo')) == "Path('/foo')" def test_bytes(self) -> None: assert bytes(Path('/foo-åäö')) == os.fsencode(f'{os.path.sep}foo-åäö') def test_hash(self) -> None: assert hash(Path('/foo')) == hash(pathlib.Path('/foo')) def test_comparison(self) -> None: path1 = Path('/foo1') path2 = Path('/foo2') assert path1 < path2 assert path1 <= path2 assert path2 > path1 assert path2 >= path1 def test_truediv(self) -> None: result = Path('/foo') / 'bar' assert isinstance(result, Path) assert result == pathlib.Path('/foo/bar') def test_rtruediv(self) -> None: result = '/foo' / Path('bar') assert isinstance(result, Path) assert result == pathlib.Path('/foo/bar') def test_parts_property(self) -> None: assert Path('/abc/xyz/foo.txt').parts == (os.path.sep, 'abc', 'xyz', 'foo.txt') @pytest.mark.skipif(platform.system() != 'Windows', reason='Drive only makes sense on Windows') def test_drive_property(self) -> None: assert Path('c:\\abc\\xyz').drive == 'c:' def test_root_property(self) -> None: assert Path('/abc/xyz/foo.txt').root == os.path.sep def test_anchor_property(self) -> None: assert Path('/abc/xyz/foo.txt.zip').anchor == os.path.sep def test_parents_property(self) -> None: parents = Path('/abc/xyz/foo.txt').parents assert len(parents) == 3 assert all(isinstance(parent, Path) for parent in parents) assert str(parents[0]) == f'{os.path.sep}abc{os.path.sep}xyz' assert str(parents[1]) == f'{os.path.sep}abc' assert str(parents[2]) == os.path.sep def test_parent_property(self) -> None: parent = Path('/abc/xyz/foo.txt').parent assert isinstance(parent, Path) assert str(parent) == f'{os.path.sep}abc{os.path.sep}xyz' def test_name_property(self) -> None: assert Path('/abc/xyz/foo.txt.zip').name == 'foo.txt.zip' def test_suffix_property(self) -> None: assert Path('/abc/xyz/foo.txt.zip').suffix == '.zip' def test_suffixes_property(self) -> None: assert Path('/abc/xyz/foo.tar.gz').suffixes == ['.tar', '.gz'] def test_stem_property(self) -> None: assert Path('/abc/xyz/foo.txt.zip').stem == 'foo.txt' async def test_absolute(self) -> None: result = await Path('../foo/bar').absolute() assert isinstance(result, Path) assert result == pathlib.Path.cwd() / '../foo/bar' @pytest.mark.skipif(platform.system() != 'Windows', reason='Only makes sense on Windows') def test_as_posix(self) -> None: assert Path('c:\\foo\\bar').as_posix() == 'c:/foo/bar' def test_as_uri(self) -> None: if platform.system() == 'Windows': assert Path('c:\\foo\\bar').as_uri() == 'file:///c:/foo/bar' else: assert Path('/foo/bar').as_uri() == 'file:///foo/bar' async def test_cwd(self) -> None: result = await Path.cwd() assert isinstance(result, Path) assert result == pathlib.Path.cwd() async def test_exists(self, tmp_path: pathlib.Path) -> None: assert not await Path('~/btelkbee').exists() assert await Path(tmp_path).exists() async def test_expanduser(self) -> None: result = await Path('~/btelkbee').expanduser() assert isinstance(result, Path) assert str(result) == os.path.expanduser(f'~{os.path.sep}btelkbee') async def test_home(self) -> None: result = await Path.home() assert isinstance(result, Path) assert result == pathlib.Path.home() @pytest.mark.parametrize('arg, result', [ ('c:/xyz' if platform.system() == 'Windows' else '/xyz', True), ('../xyz', False) ]) def test_is_absolute(self, arg: str, result: bool) -> None: assert Path(arg).is_absolute() == result @pytest.mark.skipif(platform.system() == 'Windows', reason='Block devices are not available on Windows') async def test_is_block_device(self) -> None: assert not await Path('/btelkbee').is_block_device() with os.scandir('/dev') as iterator: for entry in iterator: if stat.S_ISBLK(entry.stat().st_mode): assert await Path(entry.path).is_block_device() break else: pytest.skip('Could not find a suitable block device') @pytest.mark.skipif(platform.system() == 'Windows', reason='Character devices are not available on Windows') async def test_is_char_device(self) -> None: assert not await Path('/btelkbee').is_char_device() assert await Path('/dev/random').is_char_device() async def test_is_dir(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somedir' assert not await Path(path).is_dir() path.mkdir() assert await Path(path).is_dir() @pytest.mark.skipif(platform.system() == 'Windows', reason='mkfifo() is not available on Windows') async def test_is_fifo(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somefifo' assert not await Path(path).is_fifo() os.mkfifo(path) assert await Path(path).is_fifo() async def test_is_file(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somefile' assert not await Path(path).is_file() path.touch() assert await Path(path).is_file() async def test_is_mount(self) -> None: assert not await Path('/gfobj4ewiotj').is_mount() assert await Path('/').is_mount() def test_is_reserved(self) -> None: expected_result = platform.system() == 'Windows' assert Path('nul').is_reserved() == expected_result @pytest.mark.skipif(platform.system() == 'Windows', reason='UNIX sockets are not available on Windows') async def test_is_socket(self, tmp_path_factory: TempPathFactory) -> None: path = tmp_path_factory.mktemp('unix').joinpath('socket') assert not await Path(path).is_socket() with socket.socket(socket.AF_UNIX) as sock: sock.bind(str(path)) assert await Path(path).is_socket() @pytest.mark.skipif(platform.system() == 'Windows', reason='symbolic links are not supported on Windows') async def test_is_symlink(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' assert not await Path(path).is_symlink() path.symlink_to('/foo') assert await Path(path).is_symlink() @pytest.mark.parametrize('args, result', [ (('/xyz', 'abc'), True), (('/xyz', 'baz'), False) ]) def test_is_relative_to(self, args: Tuple[str], result: bool) -> None: assert Path('/xyz/abc/foo').is_relative_to(*args) == result async def test_glob(self, populated_tmpdir: pathlib.Path) -> None: all_paths = [] async for path in Path(populated_tmpdir).glob('**/*.txt'): assert isinstance(path, Path) all_paths.append(path.name) all_paths.sort() assert all_paths == ['dummyfile1.txt', 'dummyfile2.txt'] async def test_rglob(self, populated_tmpdir: pathlib.Path) -> None: all_paths = [] async for path in Path(populated_tmpdir).rglob('*.txt'): assert isinstance(path, Path) all_paths.append(path.name) all_paths.sort() assert all_paths == ['dummyfile1.txt', 'dummyfile2.txt'] async def test_iterdir(self, populated_tmpdir: pathlib.Path) -> None: all_paths = [] async for path in Path(populated_tmpdir).iterdir(): assert isinstance(path, Path) all_paths.append(path.name) all_paths.sort() assert all_paths == ['subdir', 'testfile', 'testfile2'] def test_joinpath(self) -> None: path = Path('/foo').joinpath('bar') assert path == Path('/foo/bar') def test_match(self) -> None: assert Path('/foo/bar').match('/foo/*') assert not Path('/foo/bar').match('/baz/*') @pytest.mark.skipif(platform.system() == 'Windows', reason='chmod() is not available on Windows') async def test_chmod(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' path.touch(0o666) await Path(path).chmod(0o444) assert path.stat().st_mode & 0o777 == 0o444 @pytest.mark.skipif(platform.system() == 'Windows', reason='hard links are not supported on Windows') async def test_hardlink_to(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' target = tmp_path / 'link' target.touch() await Path(path).hardlink_to(Path(target)) assert path.stat().st_nlink == 2 assert target.stat().st_nlink == 2 @pytest.mark.skipif(not hasattr(os, 'lchmod'), reason='os.lchmod() is not available') async def test_lchmod(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' path.symlink_to('/foo/bar/baz') await Path(path).lchmod(0o600) assert path.lstat().st_mode & 0o777 == 0o600 @pytest.mark.skipif(platform.system() == 'Windows', reason='symbolic links are not supported on Windows') async def test_lstat(self, tmp_path: pathlib.Path) -> None: path = tmp_path.joinpath('testfile') path.symlink_to('/foo/bar/baz') result = await Path(path).lstat() assert isinstance(result, os.stat_result) @pytest.mark.skipif(platform.system() == 'Windows', reason='owner and group are not supported on Windows') async def test_group(self, tmp_path: pathlib.Path) -> None: import grp group_name = grp.getgrgid(os.getegid()).gr_name assert await Path(tmp_path).group() == group_name async def test_mkdir(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testdir' await Path(path).mkdir() assert path.is_dir() async def test_open(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' path.write_bytes(b'bibbitibobbitiboo') fp = await Path(path).open('rb') assert isinstance(fp, AsyncFile) assert fp.name == str(path) await fp.aclose() @pytest.mark.skipif(platform.system() == 'Windows', reason='owner and group are not supported on Windows') async def test_owner(self, tmp_path: pathlib.Path) -> None: import pwd user_name = pwd.getpwuid(os.geteuid()).pw_name assert await Path(tmp_path).owner() == user_name @pytest.mark.skipif(platform.system() == 'Windows', reason='symbolic links are not supported on Windows') async def test_readlink(self, tmp_path: pathlib.Path) -> None: path = tmp_path.joinpath('testfile') path.symlink_to('/foo/bar/baz') link_target = await Path(path).readlink() assert isinstance(link_target, Path) assert str(link_target) == '/foo/bar/baz' async def test_read_bytes(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' path.write_bytes(b'bibbitibobbitiboo') assert await Path(path).read_bytes() == b'bibbitibobbitiboo' async def test_read_text(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' path.write_text('some text åäö', encoding='utf-8') assert await Path(path).read_text(encoding='utf-8') == 'some text åäö' async def test_rename(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somefile' path.touch() target = tmp_path / 'anotherfile' result = await Path(path).rename(Path(target)) assert isinstance(result, Path) assert result == target async def test_replace(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somefile' path.write_text('hello') target = tmp_path / 'anotherfile' target.write_text('world') result = await Path(path).replace(Path(target)) assert isinstance(result, Path) assert result == target assert target.read_text() == 'hello' async def test_resolve(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somedir' / '..' / 'somefile' result = await Path(path).resolve() assert result == tmp_path / 'somefile' async def test_rmdir(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somedir' path.mkdir() await Path(path).rmdir() assert not path.exists() async def test_samefile(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'somefile' path.touch() assert await Path(tmp_path / 'somefile').samefile(Path(path)) async def test_stat(self, tmp_path: pathlib.Path) -> None: result = await Path(tmp_path).stat() assert isinstance(result, os.stat_result) async def test_touch(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' await Path(path).touch() assert path.is_file() @pytest.mark.skipif(platform.system() == 'Windows', reason='symbolic links are not supported on Windows') async def test_symlink_to(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' target = tmp_path / 'link' await Path(path).symlink_to(Path(target)) assert path.is_symlink() async def test_unlink(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' path.touch() await Path(path).unlink() assert not path.exists() async def test_unlink_missing_file(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' await Path(path).unlink(missing_ok=True) with pytest.raises(FileNotFoundError): await Path(path).unlink(missing_ok=False) def test_with_name(self) -> None: assert Path('/xyz/foo.txt').with_name('bar').name == 'bar' def test_with_stem(self) -> None: assert Path('/xyz/foo.txt').with_stem('bar').name == 'bar.txt' def test_with_suffix(self) -> None: assert Path('/xyz/foo.txt.gz').with_suffix('.zip').name == 'foo.txt.zip' async def test_write_bytes(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' await Path(path).write_bytes(b'bibbitibobbitiboo') assert path.read_bytes() == b'bibbitibobbitiboo' async def test_write_text(self, tmp_path: pathlib.Path) -> None: path = tmp_path / 'testfile' await Path(path).write_text('some text åäö', encoding='utf-8') assert path.read_text(encoding='utf-8') == 'some text åäö' anyio-3.5.0/tests/test_from_thread.py000066400000000000000000000440241416724134300177140ustar00rootroot00000000000000import sys import threading import time from concurrent.futures import CancelledError from contextlib import suppress from contextvars import ContextVar from typing import Any, AsyncGenerator, Dict, List, NoReturn, Optional import pytest from _pytest.logging import LogCaptureFixture from anyio import ( Event, create_task_group, from_thread, get_cancelled_exc_class, get_current_task, run, sleep, to_thread, wait_all_tasks_blocked) from anyio.abc import TaskStatus from anyio.from_thread import BlockingPortal, start_blocking_portal from anyio.lowlevel import checkpoint if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal if sys.version_info >= (3, 7): from contextlib import asynccontextmanager else: from contextlib2 import asynccontextmanager pytestmark = pytest.mark.anyio class TestRunAsyncFromThread: async def test_run_async_from_thread(self) -> None: async def add(a: int, b: int) -> int: assert threading.get_ident() == event_loop_thread_id return a + b def worker(a: int, b: int) -> int: assert threading.get_ident() != event_loop_thread_id return from_thread.run(add, a, b) event_loop_thread_id = threading.get_ident() result = await to_thread.run_sync(worker, 1, 2) assert result == 3 async def test_run_sync_from_thread(self) -> None: def add(a: int, b: int) -> int: assert threading.get_ident() == event_loop_thread_id return a + b def worker(a: int, b: int) -> int: assert threading.get_ident() != event_loop_thread_id return from_thread.run_sync(add, a, b) event_loop_thread_id = threading.get_ident() result = await to_thread.run_sync(worker, 1, 2) assert result == 3 def test_run_sync_from_thread_pooling(self) -> None: async def main() -> None: thread_ids = set() for _ in range(5): thread_ids.add(await to_thread.run_sync(threading.get_ident)) # Expects that all the work has been done in the same worker thread assert len(thread_ids) == 1 assert thread_ids.pop() != threading.get_ident() assert threading.active_count() == initial_count + 1 # The thread should not exist after the event loop has been closed initial_count = threading.active_count() run(main, backend='asyncio') for _ in range(10): if threading.active_count() == initial_count: return time.sleep(0.1) pytest.fail('Worker thread did not exit within 1 second') async def test_run_async_from_thread_exception(self) -> None: async def add(a: int, b: int) -> int: assert threading.get_ident() == event_loop_thread_id return a + b def worker(a: int, b: int) -> int: assert threading.get_ident() != event_loop_thread_id return from_thread.run(add, a, b) event_loop_thread_id = threading.get_ident() with pytest.raises(TypeError) as exc: await to_thread.run_sync(worker, 1, 'foo') exc.match("unsupported operand type") async def test_run_sync_from_thread_exception(self) -> None: def add(a: int, b: int) -> int: assert threading.get_ident() == event_loop_thread_id return a + b def worker(a: int, b: int) -> int: assert threading.get_ident() != event_loop_thread_id return from_thread.run_sync(add, a, b) event_loop_thread_id = threading.get_ident() with pytest.raises(TypeError) as exc: await to_thread.run_sync(worker, 1, 'foo') exc.match("unsupported operand type") async def test_run_anyio_async_func_from_thread(self) -> None: def worker(*args: int) -> Literal[True]: from_thread.run(sleep, *args) return True assert await to_thread.run_sync(worker, 0) def test_run_async_from_unclaimed_thread(self) -> None: async def foo() -> None: pass exc = pytest.raises(RuntimeError, from_thread.run, foo) exc.match('This function can only be run from an AnyIO worker thread') async def test_contextvar_propagation(self, anyio_backend_name: str) -> None: if anyio_backend_name == 'asyncio' and sys.version_info < (3, 7): pytest.skip('Asyncio does not propagate context before Python 3.7') var = ContextVar('var', default=1) async def async_func() -> int: await checkpoint() return var.get() def worker() -> int: var.set(6) return from_thread.run(async_func) assert await to_thread.run_sync(worker) == 6 class TestRunSyncFromThread: def test_run_sync_from_unclaimed_thread(self) -> None: def foo() -> None: pass exc = pytest.raises(RuntimeError, from_thread.run_sync, foo) exc.match('This function can only be run from an AnyIO worker thread') async def test_contextvar_propagation(self) -> None: var = ContextVar('var', default=1) def worker() -> int: var.set(6) return from_thread.run_sync(var.get) assert await to_thread.run_sync(worker) == 6 class TestBlockingPortal: class AsyncCM: def __init__(self, ignore_error: bool): self.ignore_error = ignore_error async def __aenter__(self) -> Literal["test"]: return 'test' async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> bool: return self.ignore_error async def test_successful_call(self) -> None: async def async_get_thread_id() -> int: return threading.get_ident() def external_thread() -> None: thread_ids.append(portal.call(threading.get_ident)) thread_ids.append(portal.call(async_get_thread_id)) thread_ids: List[int] = [] async with BlockingPortal() as portal: thread = threading.Thread(target=external_thread) thread.start() await to_thread.run_sync(thread.join) for thread_id in thread_ids: assert thread_id == threading.get_ident() async def test_aexit_with_exception(self) -> None: """Test that when the portal exits with an exception, all tasks are cancelled.""" def external_thread() -> None: try: portal.call(sleep, 3) except BaseException as exc: results.append(exc) else: results.append(None) results: List[Optional[BaseException]] = [] with suppress(Exception): async with BlockingPortal() as portal: thread1 = threading.Thread(target=external_thread) thread1.start() thread2 = threading.Thread(target=external_thread) thread2.start() await sleep(0.1) assert not results raise Exception await to_thread.run_sync(thread1.join) await to_thread.run_sync(thread2.join) assert len(results) == 2 assert isinstance(results[0], CancelledError) assert isinstance(results[1], CancelledError) async def test_aexit_without_exception(self) -> None: """Test that when the portal exits, it waits for all tasks to finish.""" def external_thread() -> None: try: portal.call(sleep, 0.2) except BaseException as exc: results.append(exc) else: results.append(None) results: List[Optional[BaseException]] = [] async with BlockingPortal() as portal: thread1 = threading.Thread(target=external_thread) thread1.start() thread2 = threading.Thread(target=external_thread) thread2.start() await sleep(0.1) assert not results await to_thread.run_sync(thread1.join) await to_thread.run_sync(thread2.join) assert results == [None, None] async def test_call_portal_from_event_loop_thread(self) -> None: async with BlockingPortal() as portal: exc = pytest.raises(RuntimeError, portal.call, threading.get_ident) exc.match('This method cannot be called from the event loop thread') def test_start_with_new_event_loop(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: async def async_get_thread_id() -> int: return threading.get_ident() with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: thread_id = portal.call(async_get_thread_id) assert isinstance(thread_id, int) assert thread_id != threading.get_ident() def test_start_with_nonexistent_backend(self) -> None: with pytest.raises(LookupError) as exc: with start_blocking_portal('foo'): pass exc.match('No such backend: foo') def test_call_stopped_portal(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: pass pytest.raises(RuntimeError, portal.call, threading.get_ident).\ match('This portal is not running') def test_start_task_soon(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: async def event_waiter() -> Literal["test"]: await event1.wait() event2.set() return 'test' with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: event1 = portal.call(Event) event2 = portal.call(Event) future = portal.start_task_soon(event_waiter) portal.call(event1.set) portal.call(event2.wait) assert future.result() == 'test' def test_start_task_soon_cancel_later(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: async def noop() -> None: await sleep(2) with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: future = portal.start_task_soon(noop) portal.call(wait_all_tasks_blocked) future.cancel() assert future.cancelled() def test_start_task_soon_cancel_immediately(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: cancelled = False async def event_waiter() -> None: nonlocal cancelled try: await sleep(3) except get_cancelled_exc_class(): cancelled = True with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: future = portal.start_task_soon(event_waiter) future.cancel() assert cancelled def test_start_task_soon_with_name(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: task_name = None async def taskfunc() -> None: nonlocal task_name task_name = get_current_task().name with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: portal.start_task_soon(taskfunc, name='testname') assert task_name == 'testname' def test_async_context_manager_success(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(False)) as cm: assert cm == 'test' def test_async_context_manager_error(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: with pytest.raises(Exception) as exc: with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(False)) as cm: assert cm == 'test' raise Exception('should NOT be ignored') exc.match('should NOT be ignored') def test_async_context_manager_error_ignore(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(True)) as cm: assert cm == 'test' raise Exception('should be ignored') def test_async_context_manager_exception_in_task_group( self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: """Regression test for #381.""" async def failing_func() -> None: 0 / 0 @asynccontextmanager async def run_in_context() -> AsyncGenerator[None, None]: async with create_task_group() as tg: tg.start_soon(failing_func) yield with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: with pytest.raises(ZeroDivisionError): with portal.wrap_async_context_manager(run_in_context()): pass def test_start_no_value(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: def taskfunc(*, task_status: TaskStatus) -> None: task_status.started() with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: future, value = portal.start_task(taskfunc) # type: ignore[arg-type] assert value is None assert future.result() is None def test_start_with_value(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: def taskfunc(*, task_status: TaskStatus) -> None: task_status.started('foo') with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: future, value = portal.start_task(taskfunc) # type: ignore[arg-type] assert value == 'foo' assert future.result() is None def test_start_crash_before_started_call(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: def taskfunc(*, task_status: object) -> NoReturn: raise Exception('foo') with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: with pytest.raises(Exception, match='foo'): portal.start_task(taskfunc) def test_start_crash_after_started_call(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: def taskfunc(*, task_status: TaskStatus) -> NoReturn: task_status.started(2) raise Exception('foo') with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: future, value = portal.start_task(taskfunc) assert value == 2 with pytest.raises(Exception, match='foo'): future.result() def test_start_no_started_call(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: def taskfunc(*, task_status: TaskStatus) -> None: pass with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: with pytest.raises(RuntimeError, match='Task exited'): portal.start_task(taskfunc) # type: ignore[arg-type] def test_start_with_name(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: def taskfunc(*, task_status: TaskStatus) -> None: task_status.started(get_current_task().name) with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: future, start_value = portal.start_task( taskfunc, name='testname') # type: ignore[arg-type] assert start_value == 'testname' def test_contextvar_propagation_sync(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: if anyio_backend_name == 'asyncio' and sys.version_info < (3, 7): pytest.skip('Asyncio does not propagate context before Python 3.7') var = ContextVar('var', default=1) var.set(6) with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: propagated_value = portal.call(var.get) assert propagated_value == 6 def test_contextvar_propagation_async(self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: if anyio_backend_name == 'asyncio' and sys.version_info < (3, 7): pytest.skip('Asyncio does not propagate context before Python 3.7') var = ContextVar('var', default=1) var.set(6) async def get_var() -> int: await checkpoint() return var.get() with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: propagated_value = portal.call(get_var) assert propagated_value == 6 @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_asyncio_run_sync_called(self, caplog: LogCaptureFixture) -> None: """Regression test for #357.""" async def in_loop() -> None: raise CancelledError async with BlockingPortal() as portal: await to_thread.run_sync(portal.start_task_soon, in_loop) assert not caplog.text anyio-3.5.0/tests/test_lowlevel.py000066400000000000000000000063701416724134300172550ustar00rootroot00000000000000from typing import Any, Dict import pytest from anyio import create_task_group, run from anyio.lowlevel import RunVar, cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled pytestmark = pytest.mark.anyio @pytest.mark.parametrize('cancel', [False, True]) async def test_checkpoint_if_cancelled(cancel: bool) -> None: finished = second_finished = False async def func() -> None: nonlocal finished tg.start_soon(second_func) if cancel: tg.cancel_scope.cancel() await checkpoint_if_cancelled() finished = True async def second_func() -> None: nonlocal second_finished assert finished != cancel second_finished = True async with create_task_group() as tg: tg.start_soon(func) assert finished != cancel assert second_finished @pytest.mark.parametrize('cancel', [False, True]) async def test_cancel_shielded_checkpoint(cancel: bool) -> None: finished = second_finished = False async def func() -> None: nonlocal finished await cancel_shielded_checkpoint() finished = True async def second_func() -> None: nonlocal second_finished assert not finished second_finished = True async with create_task_group() as tg: tg.start_soon(func) tg.start_soon(second_func) if cancel: tg.cancel_scope.cancel() assert finished assert second_finished @pytest.mark.parametrize('cancel', [False, True]) async def test_checkpoint(cancel: bool) -> None: finished = second_finished = False async def func() -> None: nonlocal finished await checkpoint() finished = True async def second_func() -> None: nonlocal second_finished assert not finished second_finished = True async with create_task_group() as tg: tg.start_soon(func) tg.start_soon(second_func) if cancel: tg.cancel_scope.cancel() assert finished != cancel assert second_finished class TestRunVar: def test_get_set( self, anyio_backend_name: str, anyio_backend_options: Dict[str, Any], ) -> None: async def taskfunc(index: int) -> None: assert var.get() == index var.set(index + 1) async def main() -> None: pytest.raises(LookupError, var.get) for i in range(2): var.set(i) async with create_task_group() as tg: tg.start_soon(taskfunc, i) assert var.get() == i + 1 var = RunVar[int]('var') for _ in range(2): run(main, backend=anyio_backend_name, backend_options=anyio_backend_options) async def test_reset_token_used_on_wrong_runvar(self) -> None: var1 = RunVar[str]('var1') var2 = RunVar[str]('var2') token = var1.set('blah') with pytest.raises(ValueError, match='This token does not belong to this RunVar'): var2.reset(token) async def test_reset_token_used_twice(self) -> None: var = RunVar[str]('var') token = var.set('blah') var.reset(token) with pytest.raises(ValueError, match='This token has already been used'): var.reset(token) anyio-3.5.0/tests/test_pytest_plugin.py000066400000000000000000000161061416724134300203300ustar00rootroot00000000000000import pytest from _pytest.pytester import Testdir from anyio import get_all_backends pytestmark = pytest.mark.filterwarnings( 'ignore:The TerminalReporter.writer attribute is deprecated:pytest.PytestDeprecationWarning:') pytest_args = '-v', '-p', 'anyio', '-p', 'no:asyncio' def test_plugin(testdir: Testdir) -> None: testdir.makeconftest( """ import sniffio import pytest from anyio import sleep @pytest.fixture async def async_fixture(): await sleep(0) return sniffio.current_async_library() @pytest.fixture async def some_feature(): yield None await sleep(0) """ ) testdir.makepyfile( """ import asyncio import pytest import sniffio from hypothesis import strategies, given from anyio import get_all_backends, sleep @pytest.mark.anyio async def test_marked_test() -> None: # Test that tests marked with @pytest.mark.anyio are run pass @pytest.mark.anyio async def test_async_fixture_from_marked_test(async_fixture): # Test that async functions can use async fixtures assert async_fixture in get_all_backends() def test_async_fixture_from_sync_test(anyio_backend_name, async_fixture): # Test that regular functions can use async fixtures too assert async_fixture == anyio_backend_name @pytest.mark.anyio async def test_skip_inline(some_feature): # Test for github #214 pytest.skip("Test that skipping works") """ ) result = testdir.runpytest(*pytest_args) result.assert_outcomes(passed=3 * len(get_all_backends()), skipped=len(get_all_backends())) def test_asyncio(testdir: Testdir) -> None: testdir.makeconftest( """ import asyncio import pytest @pytest.fixture(scope='class') def anyio_backend(): return 'asyncio' @pytest.fixture async def setup_fail_fixture(): def callback(): raise RuntimeError('failing fixture setup') asyncio.get_event_loop().call_soon(callback) await asyncio.sleep(0) yield None @pytest.fixture async def teardown_fail_fixture(): def callback(): raise RuntimeError('failing fixture teardown') yield None asyncio.get_event_loop().call_soon(callback) await asyncio.sleep(0) """ ) testdir.makepyfile( """ import asyncio import pytest pytestmark = pytest.mark.anyio class TestClassFixtures: @pytest.fixture(scope='class') async def async_class_fixture(self, anyio_backend): await asyncio.sleep(0) return anyio_backend def test_class_fixture_in_test_method(self, async_class_fixture, anyio_backend_name): assert anyio_backend_name == 'asyncio' assert async_class_fixture == 'asyncio' async def test_callback_exception_during_test() -> None: def callback(): nonlocal started started = True raise Exception('foo') started = False asyncio.get_event_loop().call_soon(callback) await asyncio.sleep(0) assert started async def test_callback_exception_during_setup(setup_fail_fixture): pass async def test_callback_exception_during_teardown(teardown_fail_fixture): pass """ ) result = testdir.runpytest(*pytest_args) result.assert_outcomes(passed=2, failed=1, errors=2) def test_autouse_async_fixture(testdir: Testdir) -> None: testdir.makeconftest( """ import pytest autouse_backend = None @pytest.fixture(autouse=True) async def autouse_async_fixture(anyio_backend_name): global autouse_backend autouse_backend = anyio_backend_name @pytest.fixture def autouse_backend_name(): return autouse_backend """ ) testdir.makepyfile( """ import pytest import sniffio from anyio import get_all_backends, sleep def test_autouse_backend(autouse_backend_name): # Test that async autouse fixtures are triggered assert autouse_backend_name in get_all_backends() """ ) result = testdir.runpytest_subprocess(*pytest_args) result.assert_outcomes(passed=len(get_all_backends())) def test_cancel_scope_in_asyncgen_fixture(testdir: Testdir) -> None: testdir.makepyfile( """ import pytest from anyio import create_task_group, sleep @pytest.fixture async def asyncgen_fixture(): async with create_task_group() as tg: tg.cancel_scope.cancel() await sleep(1) yield 1 @pytest.mark.anyio async def test_cancel_in_asyncgen_fixture(asyncgen_fixture): assert asyncgen_fixture == 1 """ ) result = testdir.runpytest_subprocess(*pytest_args) result.assert_outcomes(passed=len(get_all_backends())) def test_hypothesis_module_mark(testdir: Testdir) -> None: testdir.makepyfile( """ import pytest from hypothesis import given from hypothesis.strategies import just pytestmark = pytest.mark.anyio @given(x=just(1)) async def test_hypothesis_wrapper(x): assert isinstance(x, int) @given(x=just(1)) def test_hypothesis_wrapper_regular(x): assert isinstance(x, int) @pytest.mark.xfail(strict=True) @given(x=just(1)) async def test_hypothesis_wrapper_failing(x): pytest.fail('This test failed successfully') """ ) result = testdir.runpytest(*pytest_args) result.assert_outcomes(passed=len(get_all_backends()) + 1, xfailed=len(get_all_backends())) def test_hypothesis_function_mark(testdir: Testdir) -> None: testdir.makepyfile( """ import pytest from hypothesis import given from hypothesis.strategies import just @pytest.mark.anyio @given(x=just(1)) async def test_anyio_mark_first(x): assert isinstance(x, int) @given(x=just(1)) @pytest.mark.anyio async def test_anyio_mark_last(x): assert isinstance(x, int) @pytest.mark.xfail(strict=True) @pytest.mark.anyio @given(x=just(1)) async def test_anyio_mark_first_fail(x): pytest.fail('This test failed successfully') @given(x=just(1)) @pytest.mark.xfail(strict=True) @pytest.mark.anyio async def test_anyio_mark_last_fail(x): pytest.fail('This test failed successfully') """ ) result = testdir.runpytest(*pytest_args) result.assert_outcomes(passed=2 * len(get_all_backends()), xfailed=2 * len(get_all_backends())) anyio-3.5.0/tests/test_signals.py000066400000000000000000000032601416724134300170570ustar00rootroot00000000000000import os import signal import sys from typing import AsyncIterable import pytest from anyio import create_task_group, fail_after, open_signal_receiver, to_thread pytestmark = [ pytest.mark.anyio, pytest.mark.skipif( sys.platform == 'win32', reason='Signal delivery cannot be tested on Windows', ), ] async def test_receive_signals() -> None: with open_signal_receiver(signal.SIGUSR1, signal.SIGUSR2) as sigiter: await to_thread.run_sync(os.kill, os.getpid(), signal.SIGUSR1) await to_thread.run_sync(os.kill, os.getpid(), signal.SIGUSR2) with fail_after(1): assert await sigiter.__anext__() == signal.SIGUSR1 assert await sigiter.__anext__() == signal.SIGUSR2 async def test_task_group_cancellation_open() -> None: async def signal_handler() -> None: with open_signal_receiver(signal.SIGUSR1) as sigiter: async for v in sigiter: pytest.fail("SIGUSR1 should not be sent") pytest.fail("signal_handler should have been cancelled") pytest.fail("open_signal_receiver should not suppress cancellation") async with create_task_group() as tg: tg.start_soon(signal_handler) tg.cancel_scope.cancel() async def test_task_group_cancellation_consume() -> None: async def consume(sigiter: AsyncIterable[int]) -> None: async for v in sigiter: pytest.fail("SIGUSR1 should not be sent") pytest.fail("consume should have been cancelled") with open_signal_receiver(signal.SIGUSR1) as sigiter: async with create_task_group() as tg: tg.start_soon(consume, sigiter) tg.cancel_scope.cancel() anyio-3.5.0/tests/test_sockets.py000066400000000000000000001405241416724134300170770ustar00rootroot00000000000000import array import gc import io import os import platform import socket import sys import threading import time from contextlib import suppress from pathlib import Path from socket import AddressFamily from ssl import SSLContext, SSLError from threading import Thread from typing import Any, Iterable, Iterator, List, NoReturn, Tuple, Type, TypeVar, Union, cast import pytest from _pytest.fixtures import SubRequest from _pytest.logging import LogCaptureFixture from _pytest.monkeypatch import MonkeyPatch from _pytest.tmpdir import TempPathFactory from anyio import ( BrokenResourceError, BusyResourceError, ClosedResourceError, Event, ExceptionGroup, TypedAttributeLookupError, connect_tcp, connect_unix, create_connected_udp_socket, create_task_group, create_tcp_listener, create_udp_socket, create_unix_listener, fail_after, getaddrinfo, getnameinfo, move_on_after, sleep, wait_all_tasks_blocked) from anyio.abc import IPSockAddrType, Listener, SocketAttribute, SocketListener, SocketStream from anyio.streams.stapled import MultiListener if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal AnyIPAddressFamily = Literal[AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6] pytestmark = pytest.mark.anyio # If a socket can bind to ::1, the current environment has IPv6 properly configured has_ipv6 = False if socket.has_ipv6: try: s = socket.socket(AddressFamily.AF_INET6) try: s.bind(('::1', 0)) finally: s.close() del s except OSError: pass else: has_ipv6 = True @pytest.fixture def fake_localhost_dns(monkeypatch: MonkeyPatch) -> None: def fake_getaddrinfo(*args: Any, **kwargs: Any) -> object: # Make it return IPv4 addresses first so we can test the IPv6 preference results = real_getaddrinfo(*args, **kwargs) return sorted(results, key=lambda item: item[0]) real_getaddrinfo = socket.getaddrinfo monkeypatch.setattr('socket.getaddrinfo', fake_getaddrinfo) @pytest.fixture(params=[ pytest.param(AddressFamily.AF_INET, id='ipv4'), pytest.param(AddressFamily.AF_INET6, id='ipv6', marks=[pytest.mark.skipif(not has_ipv6, reason='no IPv6 support')]) ]) def family(request: SubRequest) -> AnyIPAddressFamily: return request.param @pytest.fixture def check_asyncio_bug(anyio_backend_name: str, family: AnyIPAddressFamily) -> None: if ( anyio_backend_name == 'asyncio' and sys.platform == 'win32' and family == AddressFamily.AF_INET6 ): import asyncio policy = asyncio.get_event_loop_policy() if policy.__class__.__name__ == 'WindowsProactorEventLoopPolicy': pytest.skip('Does not work due to a known bug (39148)') _T = TypeVar("_T") def _identity(v: _T) -> _T: return v # _ProactorBasePipeTransport.abort() after _ProactorBasePipeTransport.close() # does not cancel writes: https://bugs.python.org/issue44428 _ignore_win32_resource_warnings = pytest.mark.filterwarnings( "ignore:unclosed Iterator[socket.socket]: sock = socket.socket(family, socket.SOCK_STREAM) sock.settimeout(1) sock.bind(('localhost', 0)) sock.listen() yield sock sock.close() @pytest.fixture def server_addr(self, server_sock: socket.socket) -> Tuple[str, int]: return server_sock.getsockname()[:2] async def test_extra_attributes(self, server_sock: socket.socket, server_addr: Tuple[str, int], family: AnyIPAddressFamily) -> None: async with await connect_tcp(*server_addr) as stream: raw_socket = stream.extra(SocketAttribute.raw_socket) assert stream.extra(SocketAttribute.family) == family assert stream.extra(SocketAttribute.local_address) == raw_socket.getsockname()[:2] assert stream.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1] assert stream.extra(SocketAttribute.remote_address) == server_addr assert stream.extra(SocketAttribute.remote_port) == server_addr[1] async def test_send_receive(self, server_sock: socket.socket, server_addr: Tuple[str, int]) -> None: async with await connect_tcp(*server_addr) as stream: client, _ = server_sock.accept() await stream.send(b'blah') request = client.recv(100) client.sendall(request[::-1]) response = await stream.receive() client.close() assert response == b'halb' async def test_send_large_buffer(self, server_sock: socket.socket, server_addr: Tuple[str, int]) -> None: def serve() -> None: client, _ = server_sock.accept() client.sendall(buffer) client.close() buffer = b'\xff' * 1024 * 1024 # should exceed the maximum kernel send buffer size async with await connect_tcp(*server_addr) as stream: thread = Thread(target=serve, daemon=True) thread.start() response = b'' while len(response) < len(buffer): response += await stream.receive() thread.join() assert response == buffer async def test_send_eof(self, server_sock: socket.socket, server_addr: Tuple[str, int]) -> None: def serve() -> None: client, _ = server_sock.accept() request = b'' while True: data = client.recv(100) request += data if not data: break client.sendall(request[::-1]) client.close() async with await connect_tcp(*server_addr) as stream: thread = Thread(target=serve, daemon=True) thread.start() await stream.send(b'hello, ') await stream.send(b'world\n') await stream.send_eof() response = await stream.receive() thread.join() assert response == b'\ndlrow ,olleh' async def test_iterate(self, server_sock: socket.socket, server_addr: Tuple[str, int]) -> None: def serve() -> None: client, _ = server_sock.accept() client.sendall(b'bl') event.wait(1) client.sendall(b'ah') client.close() event = threading.Event() thread = Thread(target=serve, daemon=True) thread.start() chunks = [] async with await connect_tcp(*server_addr) as stream: async for chunk in stream: chunks.append(chunk) event.set() thread.join() assert chunks == [b'bl', b'ah'] async def test_socket_options(self, family: AnyIPAddressFamily, server_addr: Tuple[str, int]) -> None: async with await connect_tcp(*server_addr) as stream: raw_socket = stream.extra(SocketAttribute.raw_socket) assert raw_socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) != 0 @pytest.mark.skipif(not has_ipv6, reason='IPv6 is not available') @pytest.mark.parametrize('local_addr, expected_client_addr', [ pytest.param('', '::1', id='dualstack'), pytest.param('127.0.0.1', '127.0.0.1', id='ipv4'), pytest.param('::1', '::1', id='ipv6') ]) async def test_happy_eyeballs(self, local_addr: str, expected_client_addr: str, fake_localhost_dns: None) -> None: client_addr = None, None def serve() -> None: nonlocal client_addr client, client_addr = server_sock.accept() client.close() family = AddressFamily.AF_INET if local_addr == '127.0.0.1' else AddressFamily.AF_INET6 server_sock = socket.socket(family) server_sock.bind((local_addr, 0)) server_sock.listen() port = server_sock.getsockname()[1] thread = Thread(target=serve, daemon=True) thread.start() async with await connect_tcp('localhost', port): pass thread.join() server_sock.close() assert client_addr[0] == expected_client_addr @pytest.mark.parametrize('target, exception_class', [ pytest.param( 'localhost', ExceptionGroup, id='multi', marks=[pytest.mark.skipif(not has_ipv6, reason='IPv6 is not available')] ), pytest.param('127.0.0.1', ConnectionRefusedError, id='single') ]) async def test_connection_refused( self, target: str, exception_class: Union[Type[ExceptionGroup], Type[ConnectionRefusedError]], fake_localhost_dns: None, ) -> None: dummy_socket = socket.socket(AddressFamily.AF_INET6) dummy_socket.bind(('::', 0)) free_port = dummy_socket.getsockname()[1] dummy_socket.close() with pytest.raises(OSError) as exc: await connect_tcp(target, free_port) assert exc.match('All connection attempts failed') assert isinstance(exc.value.__cause__, exception_class) if isinstance(exc.value.__cause__, ExceptionGroup): for exception in exc.value.__cause__.exceptions: assert isinstance(exception, ConnectionRefusedError) async def test_receive_timeout(self, server_sock: socket.socket, server_addr: Tuple[str, int]) -> None: def serve() -> None: conn, _ = server_sock.accept() time.sleep(1) conn.close() thread = Thread(target=serve, daemon=True) thread.start() async with await connect_tcp(*server_addr) as stream: start_time = time.monotonic() with move_on_after(0.1): while time.monotonic() - start_time < 0.3: await stream.receive(1) pytest.fail('The timeout was not respected') async def test_concurrent_send(self, server_addr: Tuple[str, int]) -> None: async def send_data() -> NoReturn: while True: await stream.send(b'\x00' * 4096) async with await connect_tcp(*server_addr) as stream: async with create_task_group() as tg: tg.start_soon(send_data) await wait_all_tasks_blocked() with pytest.raises(BusyResourceError) as exc: await stream.send(b'foo') exc.match('already writing to') tg.cancel_scope.cancel() async def test_concurrent_receive(self, server_addr: Tuple[str, int]) -> None: async with await connect_tcp(*server_addr) as client: async with create_task_group() as tg: tg.start_soon(client.receive) await wait_all_tasks_blocked() try: with pytest.raises(BusyResourceError) as exc: await client.receive() exc.match('already reading from') finally: tg.cancel_scope.cancel() async def test_close_during_receive(self, server_addr: Tuple[str, int]) -> None: async def interrupt() -> None: await wait_all_tasks_blocked() await stream.aclose() async with await connect_tcp(*server_addr) as stream: async with create_task_group() as tg: tg.start_soon(interrupt) with pytest.raises(ClosedResourceError): await stream.receive() async def test_receive_after_close(self, server_addr: Tuple[str, int]) -> None: stream = await connect_tcp(*server_addr) await stream.aclose() with pytest.raises(ClosedResourceError): await stream.receive() async def test_send_after_close(self, server_addr: Tuple[str, int]) -> None: stream = await connect_tcp(*server_addr) await stream.aclose() with pytest.raises(ClosedResourceError): await stream.send(b'foo') async def test_send_after_peer_closed(self, family: AnyIPAddressFamily) -> None: def serve_once() -> None: client_sock, _ = server_sock.accept() client_sock.close() server_sock.close() server_sock = socket.socket(family, socket.SOCK_STREAM) server_sock.settimeout(1) server_sock.bind(('localhost', 0)) server_addr = server_sock.getsockname()[:2] server_sock.listen() thread = Thread(target=serve_once, daemon=True) thread.start() with pytest.raises(BrokenResourceError): async with await connect_tcp(*server_addr) as stream: for _ in range(1000): await stream.send(b'foo') thread.join() async def test_connect_tcp_with_tls(self, server_context: SSLContext, client_context: SSLContext, server_sock: socket.socket, server_addr: Tuple[str, int]) -> None: def serve() -> None: with suppress(socket.timeout): client, addr = server_sock.accept() client.settimeout(1) client = server_context.wrap_socket(client, server_side=True) data = client.recv(100) client.sendall(data[::-1]) client.unwrap() client.close() # The TLSStream tests are more comprehensive than this one! thread = Thread(target=serve, daemon=True) thread.start() async with await connect_tcp(*server_addr, tls_hostname='localhost', ssl_context=client_context) as stream: await stream.send(b'hello') response = await stream.receive() assert response == b'olleh' thread.join() async def test_connect_tcp_with_tls_cert_check_fail(self, server_context: SSLContext, server_sock: socket.socket, server_addr: Tuple[str, int]) -> None: thread_exception = None def serve() -> None: nonlocal thread_exception client, addr = server_sock.accept() with client: client.settimeout(1) try: server_context.wrap_socket(client, server_side=True) except OSError: pass except BaseException as exc: thread_exception = exc thread = Thread(target=serve, daemon=True) thread.start() with pytest.raises(SSLError): await connect_tcp(*server_addr, tls_hostname='localhost') thread.join() assert thread_exception is None @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_unretrieved_future_exception_server_crash( self, family: AnyIPAddressFamily, caplog: LogCaptureFixture) -> None: """ Tests that there won't be any leftover Futures that don't get their exceptions retrieved. See https://github.com/encode/httpcore/issues/382 for details. """ def serve() -> None: sock, addr = server_sock.accept() event.wait(3) del sock gc.collect() server_sock = socket.socket(family, socket.SOCK_STREAM) server_sock.settimeout(1) server_sock.bind(('localhost', 0)) server_sock.listen() server_addr = server_sock.getsockname()[:2] event = threading.Event() thread = Thread(target=serve) thread.start() async with await connect_tcp(*server_addr) as stream: await stream.send(b'GET') event.set() with pytest.raises(BrokenResourceError): await stream.receive() thread.join() gc.collect() assert not caplog.text class TestTCPListener: async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None: async with await create_tcp_listener(local_host='localhost', family=family) as multi: assert multi.extra(SocketAttribute.family) == family for listener in multi.listeners: raw_socket = listener.extra(SocketAttribute.raw_socket) assert listener.extra(SocketAttribute.family) == family assert listener.extra(SocketAttribute.local_address) == \ raw_socket.getsockname()[:2] assert listener.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1] pytest.raises(TypedAttributeLookupError, listener.extra, SocketAttribute.remote_address) pytest.raises(TypedAttributeLookupError, listener.extra, SocketAttribute.remote_port) @pytest.mark.parametrize('family', [ pytest.param(AddressFamily.AF_INET, id='ipv4'), pytest.param(AddressFamily.AF_INET6, id='ipv6', marks=[pytest.mark.skipif(not has_ipv6, reason='no IPv6 support')]), pytest.param(socket.AF_UNSPEC, id='both', marks=[pytest.mark.skipif(not has_ipv6, reason='no IPv6 support')]) ]) async def test_accept(self, family: AnyIPAddressFamily) -> None: async with await create_tcp_listener(local_host='localhost', family=family) as multi: for listener in multi.listeners: client = socket.socket(listener.extra(SocketAttribute.family)) client.settimeout(1) client.connect(listener.extra(SocketAttribute.local_address)) assert isinstance(listener, SocketListener) stream = await listener.accept() client.sendall(b'blah') request = await stream.receive() await stream.send(request[::-1]) assert client.recv(100) == b'halb' client.close() await stream.aclose() async def test_accept_after_close(self, family: AnyIPAddressFamily) -> None: async with await create_tcp_listener(local_host='localhost', family=family) as multi: for listener in multi.listeners: await listener.aclose() assert isinstance(listener, SocketListener) with pytest.raises(ClosedResourceError): await listener.accept() async def test_socket_options(self, family: AnyIPAddressFamily) -> None: async with await create_tcp_listener(local_host='localhost', family=family) as multi: for listener in multi.listeners: raw_socket = listener.extra(SocketAttribute.raw_socket) if sys.platform == 'win32': assert raw_socket.getsockopt( socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE) != 0 else: assert raw_socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) != 0 raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 80000) assert raw_socket.getsockopt( socket.SOL_SOCKET, socket.SO_RCVBUF) in (80000, 160000) client = socket.socket(raw_socket.family) client.settimeout(1) client.connect(raw_socket.getsockname()) assert isinstance(listener, SocketListener) async with await listener.accept() as stream: raw_socket = stream.extra(SocketAttribute.raw_socket) assert raw_socket.gettimeout() == 0 assert raw_socket.family == listener.extra(SocketAttribute.family) assert raw_socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) != 0 client.close() @pytest.mark.skipif(not hasattr(socket, "SO_REUSEPORT"), reason='SO_REUSEPORT option not supported') async def test_reuse_port(self, family: AnyIPAddressFamily) -> None: multi1 = await create_tcp_listener(local_host='localhost', family=family, reuse_port=True) assert len(multi1.listeners) == 1 multi2 = await create_tcp_listener( local_host='localhost', local_port=multi1.listeners[0].extra(SocketAttribute.local_port), family=family, reuse_port=True) assert len(multi2.listeners) == 1 assert multi1.listeners[0].extra(SocketAttribute.local_address) == \ multi2.listeners[0].extra(SocketAttribute.local_address) await multi1.aclose() await multi2.aclose() async def test_close_from_other_task(self, family: AnyIPAddressFamily) -> None: listener = await create_tcp_listener(local_host='localhost', family=family) with pytest.raises(ClosedResourceError): async with create_task_group() as tg: tg.start_soon(listener.serve, lambda stream: None) await wait_all_tasks_blocked() await listener.aclose() tg.cancel_scope.cancel() async def test_send_after_eof(self, family: AnyIPAddressFamily) -> None: async def handle(stream: SocketStream) -> None: async with stream: await stream.send(b'Hello\n') multi = await create_tcp_listener(family=family, local_host='localhost') async with multi, create_task_group() as tg: tg.start_soon(multi.serve, handle) await wait_all_tasks_blocked() with socket.socket(family) as client: client.connect(multi.extra(SocketAttribute.local_address)) client.shutdown(socket.SHUT_WR) client.setblocking(False) with fail_after(1): while True: try: message = client.recv(10) except BlockingIOError: await sleep(0) else: assert message == b'Hello\n' break tg.cancel_scope.cancel() @pytest.mark.skipif(sys.platform == 'win32', reason='UNIX sockets are not available on Windows') class TestUNIXStream: @pytest.fixture def socket_path(self, tmp_path_factory: TempPathFactory) -> Path: return tmp_path_factory.mktemp('unix').joinpath('socket') @pytest.fixture(params=[False, True], ids=["str", "path"]) def socket_path_or_str(self, request: SubRequest, socket_path: Path) -> Union[Path, str]: return socket_path if request.param else str(socket_path) @pytest.fixture def server_sock(self, socket_path: Path) -> Iterable[socket.socket]: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(1) sock.bind(str(socket_path)) sock.listen() yield sock sock.close() async def test_extra_attributes(self, server_sock: socket.socket, socket_path: Path) -> None: async with await connect_unix(socket_path) as stream: raw_socket = stream.extra(SocketAttribute.raw_socket) assert stream.extra(SocketAttribute.family) == socket.AF_UNIX assert stream.extra(SocketAttribute.local_address) == raw_socket.getsockname() assert stream.extra(SocketAttribute.remote_address) == str(socket_path) pytest.raises(TypedAttributeLookupError, stream.extra, SocketAttribute.local_port) pytest.raises(TypedAttributeLookupError, stream.extra, SocketAttribute.remote_port) async def test_send_receive(self, server_sock: socket.socket, socket_path_or_str: Union[Path, str]) -> None: async with await connect_unix(socket_path_or_str) as stream: client, _ = server_sock.accept() await stream.send(b'blah') request = client.recv(100) client.sendall(request[::-1]) response = await stream.receive() client.close() assert response == b'halb' async def test_send_large_buffer(self, server_sock: socket.socket, socket_path: Path) -> None: def serve() -> None: client, _ = server_sock.accept() client.sendall(buffer) client.close() buffer = b'\xff' * 1024 * 1024 # should exceed the maximum kernel send buffer size async with await connect_unix(socket_path) as stream: thread = Thread(target=serve, daemon=True) thread.start() response = b'' while len(response) < len(buffer): response += await stream.receive() thread.join() assert response == buffer async def test_receive_fds(self, server_sock: socket.socket, socket_path: Path, tmp_path: Path) -> None: def serve() -> None: path1 = tmp_path / 'file1' path2 = tmp_path / 'file2' path1.write_text('Hello, ') path2.write_text('World!') with path1.open() as file1, path2.open() as file2: fdarray = array.array('i', [file1.fileno(), file2.fileno()]) client, _ = server_sock.accept() cmsg = (socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray) with client: client.sendmsg([b'test'], [cmsg]) async with await connect_unix(socket_path) as stream: thread = Thread(target=serve, daemon=True) thread.start() message, fds = await stream.receive_fds(10, 2) thread.join() text = '' for fd in fds: with os.fdopen(fd) as file: text += file.read() assert message == b'test' assert text == 'Hello, World!' async def test_receive_fds_bad_args(self, server_sock: socket.socket, socket_path: Path) -> None: async with await connect_unix(socket_path) as stream: for msglen in (-1, 'foo'): with pytest.raises(ValueError, match='msglen must be a non-negative integer'): await stream.receive_fds(msglen, 0) # type: ignore[arg-type] for maxfds in (0, 'foo'): with pytest.raises(ValueError, match='maxfds must be a positive integer'): await stream.receive_fds(0, maxfds) # type: ignore[arg-type] async def test_send_fds(self, server_sock: socket.socket, socket_path: Path, tmp_path: Path) -> None: def serve() -> None: fds = array.array('i') client, _ = server_sock.accept() msg, ancdata, *_ = client.recvmsg(10, socket.CMSG_LEN(2 * fds.itemsize)) client.close() assert msg == b'test' for cmsg_level, cmsg_type, cmsg_data in ancdata: assert cmsg_level == socket.SOL_SOCKET assert cmsg_type == socket.SCM_RIGHTS fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) text = '' for fd in fds: with os.fdopen(fd) as file: text += file.read() assert text == 'Hello, World!' path1 = tmp_path / 'file1' path2 = tmp_path / 'file2' path1.write_text('Hello, ') path2.write_text('World!') with path1.open() as file1, path2.open() as file2, fail_after(2): assert isinstance(file1, io.TextIOWrapper) assert isinstance(file2, io.TextIOWrapper) async with await connect_unix(socket_path) as stream: thread = Thread(target=serve, daemon=True) thread.start() await stream.send_fds(b'test', [file1, file2]) thread.join() async def test_send_eof(self, server_sock: socket.socket, socket_path: Path) -> None: def serve() -> None: client, _ = server_sock.accept() request = b'' while True: data = client.recv(100) request += data if not data: break client.sendall(request[::-1]) client.close() async with await connect_unix(socket_path) as stream: thread = Thread(target=serve, daemon=True) thread.start() await stream.send(b'hello, ') await stream.send(b'world\n') await stream.send_eof() response = await stream.receive() thread.join() assert response == b'\ndlrow ,olleh' async def test_iterate(self, server_sock: socket.socket, socket_path: Path) -> None: def serve() -> None: client, _ = server_sock.accept() client.sendall(b'bl') time.sleep(0.05) client.sendall(b'ah') client.close() thread = Thread(target=serve, daemon=True) thread.start() chunks = [] async with await connect_unix(socket_path) as stream: async for chunk in stream: chunks.append(chunk) thread.join() assert chunks == [b'bl', b'ah'] async def test_send_fds_bad_args(self, server_sock: socket.socket, socket_path: Path) -> None: async with await connect_unix(socket_path) as stream: with pytest.raises(ValueError, match='message must not be empty'): await stream.send_fds(b'', [0]) with pytest.raises(ValueError, match='fds must not be empty'): await stream.send_fds(b'test', []) async def test_concurrent_send(self, server_sock: socket.socket, socket_path: Path) -> None: async def send_data() -> NoReturn: while True: await client.send(b'\x00' * 4096) async with await connect_unix(socket_path) as client: async with create_task_group() as tg: tg.start_soon(send_data) await wait_all_tasks_blocked() with pytest.raises(BusyResourceError) as exc: await client.send(b'foo') exc.match('already writing to') tg.cancel_scope.cancel() async def test_concurrent_receive(self, server_sock: socket.socket, socket_path: Path) -> None: async with await connect_unix(socket_path) as client: async with create_task_group() as tg: tg.start_soon(client.receive) await wait_all_tasks_blocked() try: with pytest.raises(BusyResourceError) as exc: await client.receive() exc.match('already reading from') finally: tg.cancel_scope.cancel() async def test_close_during_receive(self, server_sock: socket.socket, socket_path: Path) -> None: async def interrupt() -> None: await wait_all_tasks_blocked() await stream.aclose() async with await connect_unix(socket_path) as stream: async with create_task_group() as tg: tg.start_soon(interrupt) with pytest.raises(ClosedResourceError): await stream.receive() async def test_receive_after_close(self, server_sock: socket.socket, socket_path: Path) -> None: stream = await connect_unix(socket_path) await stream.aclose() with pytest.raises(ClosedResourceError): await stream.receive() async def test_send_after_close(self, server_sock: socket.socket, socket_path: Path) -> None: stream = await connect_unix(socket_path) await stream.aclose() with pytest.raises(ClosedResourceError): await stream.send(b'foo') async def test_cannot_connect(self, socket_path: Path) -> None: with pytest.raises(FileNotFoundError): await connect_unix(socket_path) @pytest.mark.skipif(sys.platform == 'win32', reason='UNIX sockets are not available on Windows') class TestUNIXListener: @pytest.fixture def socket_path(self, tmp_path_factory: TempPathFactory) -> Path: return tmp_path_factory.mktemp('unix').joinpath('socket') @pytest.fixture(params=[False, True], ids=["str", "path"]) def socket_path_or_str(self, request: SubRequest, socket_path: Path) -> Union[Path, str]: return socket_path if request.param else str(socket_path) async def test_extra_attributes(self, socket_path: Path) -> None: async with await create_unix_listener(socket_path) as listener: raw_socket = listener.extra(SocketAttribute.raw_socket) assert listener.extra(SocketAttribute.family) == socket.AF_UNIX assert listener.extra(SocketAttribute.local_address) == raw_socket.getsockname() pytest.raises(TypedAttributeLookupError, listener.extra, SocketAttribute.local_port) pytest.raises(TypedAttributeLookupError, listener.extra, SocketAttribute.remote_address) pytest.raises(TypedAttributeLookupError, listener.extra, SocketAttribute.remote_port) async def test_accept(self, socket_path_or_str: Union[Path, str]) -> None: async with await create_unix_listener(socket_path_or_str) as listener: client = socket.socket(socket.AF_UNIX) client.settimeout(1) client.connect(str(socket_path_or_str)) stream = await listener.accept() client.sendall(b'blah') request = await stream.receive() await stream.send(request[::-1]) assert client.recv(100) == b'halb' client.close() await stream.aclose() async def test_socket_options(self, socket_path: Path) -> None: async with await create_unix_listener(socket_path) as listener: listener_socket = listener.extra(SocketAttribute.raw_socket) assert listener_socket.family == socket.AddressFamily.AF_UNIX listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 80000) assert listener_socket.getsockopt( socket.SOL_SOCKET, socket.SO_RCVBUF) in (80000, 160000) client = socket.socket(listener_socket.family) client.settimeout(1) client.connect(listener_socket.getsockname()) async with await listener.accept() as stream: assert stream.extra(SocketAttribute.raw_socket).gettimeout() == 0 assert stream.extra(SocketAttribute.family) == listener_socket.family client.close() async def test_send_after_eof(self, socket_path: Path) -> None: async def handle(stream: SocketStream) -> None: async with stream: await stream.send(b'Hello\n') async with await create_unix_listener(socket_path) as listener, create_task_group() as tg: tg.start_soon(listener.serve, handle) await wait_all_tasks_blocked() with socket.socket(socket.AF_UNIX) as client: client.connect(str(socket_path)) client.shutdown(socket.SHUT_WR) client.setblocking(False) with fail_after(1): while True: try: message = client.recv(10) except BlockingIOError: await sleep(0) else: assert message == b'Hello\n' break tg.cancel_scope.cancel() async def test_bind_twice(self, socket_path: Path) -> None: """Test that the previous socket is removed before binding to the path.""" for _ in range(2): async with await create_unix_listener(socket_path): pass async def test_multi_listener(tmp_path_factory: TempPathFactory) -> None: async def handle(stream: SocketStream) -> None: client_addresses.append(stream.extra(SocketAttribute.remote_address)) event.set() await stream.aclose() client_addresses: List[Union[str, IPSockAddrType]] = [] listeners: List[Listener] = [await create_tcp_listener(local_host='localhost')] if sys.platform != 'win32': socket_path = tmp_path_factory.mktemp('unix').joinpath('socket') listeners.append(await create_unix_listener(socket_path)) expected_addresses: List[Union[str, IPSockAddrType]] = [] async with MultiListener(listeners) as multi_listener: async with create_task_group() as tg: tg.start_soon(multi_listener.serve, handle) for listener in multi_listener.listeners: event = Event() local_address = listener.extra(SocketAttribute.local_address) if sys.platform != 'win32' and listener.extra(SocketAttribute.family) == \ socket.AddressFamily.AF_UNIX: assert isinstance(local_address, str) stream: SocketStream = await connect_unix(local_address) else: assert isinstance(local_address, tuple) stream = await connect_tcp(*local_address) expected_addresses.append(stream.extra(SocketAttribute.local_address)) await event.wait() await stream.aclose() tg.cancel_scope.cancel() assert client_addresses == expected_addresses @pytest.mark.usefixtures('check_asyncio_bug') class TestUDPSocket: async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None: async with await create_udp_socket(family=family, local_host='localhost') as udp: raw_socket = udp.extra(SocketAttribute.raw_socket) assert raw_socket.gettimeout() == 0 assert udp.extra(SocketAttribute.family) == family assert udp.extra(SocketAttribute.local_address) == raw_socket.getsockname()[:2] assert udp.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1] pytest.raises(TypedAttributeLookupError, udp.extra, SocketAttribute.remote_address) pytest.raises(TypedAttributeLookupError, udp.extra, SocketAttribute.remote_port) async def test_send_receive(self, family: AnyIPAddressFamily) -> None: async with await create_udp_socket(local_host='localhost', family=family) as sock: host, port = sock.extra(SocketAttribute.local_address) # type: ignore[misc] await sock.sendto(b'blah', host, port) request, addr = await sock.receive() assert request == b'blah' assert addr == sock.extra(SocketAttribute.local_address) await sock.sendto(b'halb', host, port) response, addr = await sock.receive() assert response == b'halb' assert addr == (host, port) async def test_iterate(self, family: AnyIPAddressFamily) -> None: async def serve() -> None: async for packet, addr in server: await server.send((packet[::-1], addr)) async with await create_udp_socket(family=family, local_host='localhost') as server: host, port = server.extra(SocketAttribute.local_address) # type: ignore[misc] async with await create_udp_socket(family=family, local_host='localhost') as client: async with create_task_group() as tg: tg.start_soon(serve) await client.sendto(b'FOOBAR', host, port) assert await client.receive() == (b'RABOOF', (host, port)) await client.sendto(b'123456', host, port) assert await client.receive() == (b'654321', (host, port)) tg.cancel_scope.cancel() @pytest.mark.skipif(not hasattr(socket, "SO_REUSEPORT"), reason='SO_REUSEPORT option not supported') async def test_reuse_port(self, family: AnyIPAddressFamily) -> None: async with await create_udp_socket(family=family, local_host='localhost', reuse_port=True) as udp: port = udp.extra(SocketAttribute.local_port) assert port != 0 async with await create_udp_socket(family=family, local_host='localhost', local_port=port, reuse_port=True) as udp2: assert port == udp2.extra(SocketAttribute.local_port) async def test_concurrent_receive(self) -> None: async with await create_udp_socket(family=AddressFamily.AF_INET, local_host='localhost') as udp: async with create_task_group() as tg: tg.start_soon(udp.receive) await wait_all_tasks_blocked() try: with pytest.raises(BusyResourceError) as exc: await udp.receive() exc.match('already reading from') finally: tg.cancel_scope.cancel() async def test_close_during_receive(self) -> None: async def close_when_blocked() -> None: await wait_all_tasks_blocked() await udp.aclose() async with await create_udp_socket(family=AddressFamily.AF_INET, local_host='localhost') as udp: async with create_task_group() as tg: tg.start_soon(close_when_blocked) with pytest.raises(ClosedResourceError): await udp.receive() async def test_receive_after_close(self) -> None: udp = await create_udp_socket(family=AddressFamily.AF_INET, local_host='localhost') await udp.aclose() with pytest.raises(ClosedResourceError): await udp.receive() async def test_send_after_close(self) -> None: udp = await create_udp_socket(family=AddressFamily.AF_INET, local_host='localhost') host, port = udp.extra(SocketAttribute.local_address) # type: ignore[misc] await udp.aclose() with pytest.raises(ClosedResourceError): await udp.sendto(b'foo', host, port) async def test_create_unbound_socket(self, family: AnyIPAddressFamily) -> None: """Regression test for #360.""" async with await create_udp_socket(family=family) as udp: local_address = cast(IPSockAddrType, udp.extra(SocketAttribute.local_address)) assert local_address[1] > 0 @pytest.mark.usefixtures('check_asyncio_bug') class TestConnectedUDPSocket: async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None: async with await create_connected_udp_socket('localhost', 5000, family=family) as udp: raw_socket = udp.extra(SocketAttribute.raw_socket) assert udp.extra(SocketAttribute.family) == family assert udp.extra(SocketAttribute.local_address) == raw_socket.getsockname()[:2] assert udp.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1] assert udp.extra(SocketAttribute.remote_address) == raw_socket.getpeername()[:2] assert udp.extra(SocketAttribute.remote_port) == 5000 async def test_send_receive(self, family: AnyIPAddressFamily) -> None: async with await create_udp_socket(family=family, local_host='localhost') as udp1: host, port = udp1.extra(SocketAttribute.local_address) # type: ignore[misc] async with await create_connected_udp_socket( host, port, local_host='localhost', family=family) as udp2: host, port = udp2.extra(SocketAttribute.local_address) # type: ignore[misc] await udp2.send(b'blah') request = await udp1.receive() assert request == (b'blah', (host, port)) await udp1.sendto(b'halb', host, port) response = await udp2.receive() assert response == b'halb' async def test_iterate(self, family: AnyIPAddressFamily) -> None: async def serve() -> None: async for packet in udp2: await udp2.send(packet[::-1]) async with await create_udp_socket(family=family, local_host='localhost') as udp1: host, port = udp1.extra(SocketAttribute.local_address) # type: ignore[misc] async with await create_connected_udp_socket(host, port) as udp2: host, port = udp2.extra(SocketAttribute.local_address) # type: ignore[misc] async with create_task_group() as tg: tg.start_soon(serve) await udp1.sendto(b'FOOBAR', host, port) assert await udp1.receive() == (b'RABOOF', (host, port)) await udp1.sendto(b'123456', host, port) assert await udp1.receive() == (b'654321', (host, port)) tg.cancel_scope.cancel() @pytest.mark.skipif(not hasattr(socket, "SO_REUSEPORT"), reason='SO_REUSEPORT option not supported') async def test_reuse_port(self, family: AnyIPAddressFamily) -> None: async with await create_connected_udp_socket( 'localhost', 6000, family=family, local_host='localhost', reuse_port=True) as udp: port = udp.extra(SocketAttribute.local_port) assert port != 0 async with await create_connected_udp_socket( 'localhost', 6001, family=family, local_host='localhost', local_port=port, reuse_port=True) as udp2: assert port == udp2.extra(SocketAttribute.local_port) async def test_concurrent_receive(self) -> None: async with await create_connected_udp_socket( 'localhost', 5000, local_host='localhost', family=AddressFamily.AF_INET) as udp: async with create_task_group() as tg: tg.start_soon(udp.receive) await wait_all_tasks_blocked() try: with pytest.raises(BusyResourceError) as exc: await udp.receive() exc.match('already reading from') finally: tg.cancel_scope.cancel() async def test_close_during_receive(self) -> None: async def close_when_blocked() -> None: await wait_all_tasks_blocked() await udp.aclose() async with await create_connected_udp_socket( 'localhost', 5000, local_host='localhost', family=AddressFamily.AF_INET) as udp: async with create_task_group() as tg: tg.start_soon(close_when_blocked) with pytest.raises(ClosedResourceError): await udp.receive() async def test_receive_after_close(self, family: AnyIPAddressFamily) -> None: udp = await create_connected_udp_socket('localhost', 5000, local_host='localhost', family=family) await udp.aclose() with pytest.raises(ClosedResourceError): await udp.receive() async def test_send_after_close(self, family: AnyIPAddressFamily) -> None: udp = await create_connected_udp_socket('localhost', 5000, local_host='localhost', family=family) await udp.aclose() with pytest.raises(ClosedResourceError): await udp.send(b'foo') @pytest.mark.network async def test_getaddrinfo() -> None: # IDNA 2003 gets this wrong correct = await getaddrinfo('faß.de', 0) wrong = await getaddrinfo('fass.de', 0) assert correct != wrong @pytest.mark.parametrize('sock_type', [socket.SOCK_STREAM, socket.SocketKind.SOCK_STREAM]) async def test_getaddrinfo_ipv6addr(sock_type: Literal[socket.SocketKind.SOCK_STREAM]) -> None: # IDNA trips up over raw IPv6 addresses proto = 0 if platform.system() == 'Windows' else 6 assert await getaddrinfo('::1', 0, type=sock_type) == [ (socket.AddressFamily.AF_INET6, socket.SocketKind.SOCK_STREAM, proto, '', ('::1', 0)) ] async def test_getnameinfo() -> None: expected_result = socket.getnameinfo(('127.0.0.1', 6666), 0) result = await getnameinfo(('127.0.0.1', 6666)) assert result == expected_result anyio-3.5.0/tests/test_subprocesses.py000066400000000000000000000073061416724134300201440ustar00rootroot00000000000000import os import platform import sys from pathlib import Path from subprocess import CalledProcessError from textwrap import dedent from typing import List, Union import pytest from anyio import open_process, run_process from anyio.streams.buffered import BufferedByteReceiveStream pytestmark = pytest.mark.anyio @pytest.fixture(autouse=True) def check_compatibility(anyio_backend_name: str) -> None: if anyio_backend_name == 'asyncio': if platform.system() == 'Windows' and sys.version_info < (3, 8): pytest.skip('Python < 3.8 uses SelectorEventLoop by default and it does not support ' 'subprocesses') @pytest.mark.parametrize('shell, command', [ pytest.param(True, f'{sys.executable} -c "import sys; print(sys.stdin.read()[::-1])"', id='shell'), pytest.param(False, [sys.executable, '-c', 'import sys; print(sys.stdin.read()[::-1])'], id='exec') ]) async def test_run_process(shell: bool, command: Union[str, List[str]], anyio_backend_name: str) -> None: process = await run_process(command, input=b'abc') assert process.returncode == 0 assert process.stdout.rstrip() == b'cba' async def test_run_process_checked() -> None: with pytest.raises(CalledProcessError) as exc: await run_process([sys.executable, '-c', 'import sys; print("stderr-text", file=sys.stderr); ' 'print("stdout-text"); sys.exit(1)'], check=True) assert exc.value.returncode == 1 assert exc.value.stdout.rstrip() == b'stdout-text' assert exc.value.stderr.rstrip() == b'stderr-text' @pytest.mark.skipif(platform.system() == 'Windows', reason='process.terminate() kills the process instantly on Windows') async def test_terminate(tmp_path: Path) -> None: script_path = tmp_path / 'script.py' script_path.write_text(dedent("""\ import signal, sys, time def terminate(signum, frame): sys.exit(2) signal.signal(signal.SIGTERM, terminate) print('ready', flush=True) time.sleep(5) """)) async with await open_process([sys.executable, str(script_path)]) as process: stdout = process.stdout assert stdout is not None buffered_stdout = BufferedByteReceiveStream(stdout) line = await buffered_stdout.receive_until(b'\n', 100) assert line.rstrip() == b'ready' process.terminate() assert await process.wait() == 2 async def test_process_cwd(tmp_path: Path) -> None: """Test that `cwd` is successfully passed to the subprocess implementation""" cmd = [sys.executable, "-c", "import os; print(os.getcwd())"] result = await run_process(cmd, cwd=tmp_path) assert result.stdout.decode().strip() == str(tmp_path) async def test_process_env() -> None: """Test that `env` is successfully passed to the subprocess implementation""" env = os.environ.copy() env.update({"foo": "bar"}) cmd = [sys.executable, "-c", "import os; print(os.environ['foo'])"] result = await run_process(cmd, env=env) assert result.stdout.decode().strip() == env["foo"] @pytest.mark.skipif(platform.system() == 'Windows', reason='Windows does not have os.getsid()') async def test_process_new_session_sid() -> None: """Test that start_new_session is successfully passed to the subprocess implementation""" sid = os.getsid(os.getpid()) cmd = [sys.executable, "-c", "import os; print(os.getsid(os.getpid()))"] result = await run_process(cmd) assert result.stdout.decode().strip() == str(sid) result = await run_process(cmd, start_new_session=True) assert result.stdout.decode().strip() != str(sid) anyio-3.5.0/tests/test_synchronization.py000066400000000000000000000407571416724134300206740ustar00rootroot00000000000000import asyncio from typing import Optional import pytest from anyio import ( CancelScope, Condition, Event, Lock, Semaphore, WouldBlock, create_task_group, to_thread, wait_all_tasks_blocked) from anyio.abc import CapacityLimiter, TaskStatus pytestmark = pytest.mark.anyio class TestLock: async def test_contextmanager(self) -> None: async def task() -> None: assert lock.locked() async with lock: results.append('2') results = [] lock = Lock() async with create_task_group() as tg: async with lock: tg.start_soon(task) await wait_all_tasks_blocked() results.append('1') assert not lock.locked() assert results == ['1', '2'] async def test_manual_acquire(self) -> None: async def task() -> None: assert lock.locked() await lock.acquire() try: results.append('2') finally: lock.release() results = [] lock = Lock() async with create_task_group() as tg: await lock.acquire() try: tg.start_soon(task) await wait_all_tasks_blocked() results.append('1') finally: lock.release() assert not lock.locked() assert results == ['1', '2'] async def test_acquire_nowait(self) -> None: lock = Lock() lock.acquire_nowait() assert lock.locked() async def test_acquire_nowait_wouldblock(self) -> None: async def try_lock() -> None: pytest.raises(WouldBlock, lock.acquire_nowait) lock = Lock() async with lock, create_task_group() as tg: assert lock.locked() tg.start_soon(try_lock) @pytest.mark.parametrize('release_first', [ pytest.param(False, id='releaselast'), pytest.param(True, id='releasefirst') ]) async def test_cancel_during_acquire(self, release_first: bool) -> None: acquired = False async def task(*, task_status: TaskStatus) -> None: nonlocal acquired task_status.started() async with lock: acquired = True lock = Lock() async with create_task_group() as tg: await lock.acquire() await tg.start(task) tg.cancel_scope.cancel() with CancelScope(shield=True): if release_first: lock.release() await wait_all_tasks_blocked() else: await wait_all_tasks_blocked() lock.release() assert not acquired assert not lock.locked() async def test_statistics(self) -> None: async def waiter() -> None: async with lock: pass lock = Lock() async with create_task_group() as tg: assert not lock.statistics().locked assert lock.statistics().tasks_waiting == 0 async with lock: assert lock.statistics().locked assert lock.statistics().tasks_waiting == 0 for i in range(1, 3): tg.start_soon(waiter) await wait_all_tasks_blocked() assert lock.statistics().tasks_waiting == i assert not lock.statistics().locked assert lock.statistics().tasks_waiting == 0 @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_asyncio_deadlock(self) -> None: """Regression test for #398.""" lock = Lock() async def acquire() -> None: async with lock: await asyncio.sleep(0) loop = asyncio.get_event_loop() task1 = loop.create_task(acquire()) task2 = loop.create_task(acquire()) await asyncio.sleep(0) task1.cancel() await asyncio.wait_for(task2, 1) class TestEvent: async def test_event(self) -> None: async def setter() -> None: assert not event.is_set() event.set() event = Event() async with create_task_group() as tg: tg.start_soon(setter) await event.wait() assert event.is_set() async def test_event_cancel(self) -> None: task_started = event_set = False async def task() -> None: nonlocal task_started, event_set task_started = True await event.wait() event_set = True event = Event() async with create_task_group() as tg: tg.start_soon(task) tg.cancel_scope.cancel() event.set() assert task_started assert not event_set async def test_statistics(self) -> None: async def waiter() -> None: await event.wait() event = Event() async with create_task_group() as tg: assert event.statistics().tasks_waiting == 0 for i in range(1, 3): tg.start_soon(waiter) await wait_all_tasks_blocked() assert event.statistics().tasks_waiting == i event.set() assert event.statistics().tasks_waiting == 0 class TestCondition: async def test_contextmanager(self) -> None: async def notifier() -> None: async with condition: condition.notify_all() condition = Condition() async with create_task_group() as tg: async with condition: assert condition.locked() tg.start_soon(notifier) await condition.wait() async def test_manual_acquire(self) -> None: async def notifier() -> None: await condition.acquire() try: condition.notify_all() finally: condition.release() condition = Condition() async with create_task_group() as tg: await condition.acquire() try: assert condition.locked() tg.start_soon(notifier) await condition.wait() finally: condition.release() async def test_acquire_nowait(self) -> None: condition = Condition() condition.acquire_nowait() assert condition.locked() async def test_acquire_nowait_wouldblock(self) -> None: async def try_lock() -> None: pytest.raises(WouldBlock, condition.acquire_nowait) condition = Condition() async with condition, create_task_group() as tg: assert condition.locked() tg.start_soon(try_lock) async def test_wait_cancel(self) -> None: task_started = notified = False async def task() -> None: nonlocal task_started, notified task_started = True async with condition: event.set() await condition.wait() notified = True event = Event() condition = Condition() async with create_task_group() as tg: tg.start_soon(task) await event.wait() await wait_all_tasks_blocked() tg.cancel_scope.cancel() assert task_started assert not notified async def test_statistics(self) -> None: async def waiter() -> None: async with condition: await condition.wait() condition = Condition() async with create_task_group() as tg: assert not condition.statistics().lock_statistics.locked assert condition.statistics().tasks_waiting == 0 async with condition: assert condition.statistics().lock_statistics.locked assert condition.statistics().tasks_waiting == 0 for i in range(1, 3): tg.start_soon(waiter) await wait_all_tasks_blocked() assert condition.statistics().tasks_waiting == i for i in range(1, -1, -1): async with condition: condition.notify(1) await wait_all_tasks_blocked() assert condition.statistics().tasks_waiting == i assert not condition.statistics().lock_statistics.locked assert condition.statistics().tasks_waiting == 0 class TestSemaphore: async def test_contextmanager(self) -> None: async def acquire() -> None: async with semaphore: assert semaphore.value in (0, 1) semaphore = Semaphore(2) async with create_task_group() as tg: tg.start_soon(acquire, name='task 1') tg.start_soon(acquire, name='task 2') assert semaphore.value == 2 async def test_manual_acquire(self) -> None: async def acquire() -> None: await semaphore.acquire() try: assert semaphore.value in (0, 1) finally: semaphore.release() semaphore = Semaphore(2) async with create_task_group() as tg: tg.start_soon(acquire, name='task 1') tg.start_soon(acquire, name='task 2') assert semaphore.value == 2 async def test_acquire_nowait(self) -> None: semaphore = Semaphore(1) semaphore.acquire_nowait() assert semaphore.value == 0 pytest.raises(WouldBlock, semaphore.acquire_nowait) @pytest.mark.parametrize('release_first', [ pytest.param(False, id='releaselast'), pytest.param(True, id='releasefirst') ]) async def test_cancel_during_acquire(self, release_first: bool) -> None: acquired = False async def task(*, task_status: TaskStatus) -> None: nonlocal acquired task_status.started() async with semaphore: acquired = True semaphore = Semaphore(1) async with create_task_group() as tg: await semaphore.acquire() await tg.start(task) tg.cancel_scope.cancel() with CancelScope(shield=True): if release_first: semaphore.release() await wait_all_tasks_blocked() else: await wait_all_tasks_blocked() semaphore.release() assert not acquired assert semaphore.value == 1 @pytest.mark.parametrize('max_value', [2, None]) async def test_max_value(self, max_value: Optional[int]) -> None: semaphore = Semaphore(0, max_value=max_value) assert semaphore.max_value == max_value async def test_max_value_exceeded(self) -> None: semaphore = Semaphore(1, max_value=2) semaphore.release() pytest.raises(ValueError, semaphore.release) async def test_statistics(self) -> None: async def waiter() -> None: async with semaphore: pass semaphore = Semaphore(1) async with create_task_group() as tg: assert semaphore.statistics().tasks_waiting == 0 async with semaphore: assert semaphore.statistics().tasks_waiting == 0 for i in range(1, 3): tg.start_soon(waiter) await wait_all_tasks_blocked() assert semaphore.statistics().tasks_waiting == i assert semaphore.statistics().tasks_waiting == 0 async def test_acquire_race(self) -> None: """ Test against a race condition: when a task waiting on acquire() is rescheduled but another task snatches the last available slot, the task should not raise WouldBlock. """ semaphore = Semaphore(1) async with create_task_group() as tg: semaphore.acquire_nowait() tg.start_soon(semaphore.acquire) await wait_all_tasks_blocked() semaphore.release() pytest.raises(WouldBlock, semaphore.acquire_nowait) @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_asyncio_deadlock(self) -> None: """Regression test for #398.""" semaphore = Semaphore(1) async def acquire() -> None: async with semaphore: await asyncio.sleep(0) loop = asyncio.get_event_loop() task1 = loop.create_task(acquire()) task2 = loop.create_task(acquire()) await asyncio.sleep(0) task1.cancel() await asyncio.wait_for(task2, 1) class TestCapacityLimiter: async def test_bad_init_type(self) -> None: pytest.raises(TypeError, CapacityLimiter, 1.0).\ match('total_tokens must be an int or math.inf') async def test_bad_init_value(self) -> None: pytest.raises(ValueError, CapacityLimiter, 0).\ match('total_tokens must be >= 1') async def test_borrow(self) -> None: limiter = CapacityLimiter(2) assert limiter.total_tokens == 2 assert limiter.available_tokens == 2 assert limiter.borrowed_tokens == 0 async with limiter: assert limiter.total_tokens == 2 assert limiter.available_tokens == 1 assert limiter.borrowed_tokens == 1 async def test_limit(self) -> None: value = 0 async def taskfunc() -> None: nonlocal value for _ in range(5): async with limiter: assert value == 0 value = 1 await wait_all_tasks_blocked() value = 0 limiter = CapacityLimiter(1) async with create_task_group() as tg: for _ in range(3): tg.start_soon(taskfunc) async def test_borrow_twice(self) -> None: limiter = CapacityLimiter(1) await limiter.acquire() with pytest.raises(RuntimeError) as exc: await limiter.acquire() exc.match("this borrower is already holding one of this CapacityLimiter's tokens") async def test_bad_release(self) -> None: limiter = CapacityLimiter(1) with pytest.raises(RuntimeError) as exc: limiter.release() exc.match("this borrower isn't holding any of this CapacityLimiter's tokens") async def test_increase_tokens(self) -> None: async def setter() -> None: # Wait until waiter() is inside the limiter block await event1.wait() async with limiter: # This can only happen when total_tokens has been increased event2.set() async def waiter() -> None: async with limiter: event1.set() await event2.wait() limiter = CapacityLimiter(1) event1, event2 = Event(), Event() async with create_task_group() as tg: tg.start_soon(setter) tg.start_soon(waiter) await wait_all_tasks_blocked() assert event1.is_set() assert not event2.is_set() limiter.total_tokens = 2 assert event2.is_set() async def test_current_default_thread_limiter(self) -> None: limiter = to_thread.current_default_thread_limiter() assert isinstance(limiter, CapacityLimiter) assert limiter.total_tokens == 40 async def test_statistics(self) -> None: async def waiter() -> None: async with limiter: pass limiter = CapacityLimiter(1) assert limiter.statistics().total_tokens == 1 assert limiter.statistics().borrowed_tokens == 0 assert limiter.statistics().tasks_waiting == 0 async with create_task_group() as tg: async with limiter: assert limiter.statistics().borrowed_tokens == 1 assert limiter.statistics().tasks_waiting == 0 for i in range(1, 3): tg.start_soon(waiter) await wait_all_tasks_blocked() assert limiter.statistics().tasks_waiting == i assert limiter.statistics().tasks_waiting == 0 assert limiter.statistics().borrowed_tokens == 0 @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_asyncio_deadlock(self) -> None: """Regression test for #398.""" limiter = CapacityLimiter(1) async def acquire() -> None: async with limiter: await asyncio.sleep(0) loop = asyncio.get_event_loop() task1 = loop.create_task(acquire()) task2 = loop.create_task(acquire()) await asyncio.sleep(0) task1.cancel() await asyncio.wait_for(task2, 1) anyio-3.5.0/tests/test_taskgroups.py000066400000000000000000000734541416724134300176350ustar00rootroot00000000000000import asyncio import re import sys import time from typing import Any, AsyncGenerator, Coroutine, Dict, Generator, NoReturn, Optional, Set import pytest import anyio from anyio import ( CancelScope, ExceptionGroup, create_task_group, current_effective_deadline, current_time, fail_after, get_cancelled_exc_class, get_current_task, move_on_after, sleep, wait_all_tasks_blocked) from anyio.abc import TaskGroup, TaskStatus from anyio.lowlevel import checkpoint if sys.version_info < (3, 7): current_task = asyncio.Task.current_task else: current_task = asyncio.current_task pytestmark = pytest.mark.anyio async def async_error(text: str, delay: float = 0.1) -> NoReturn: try: if delay: await sleep(delay) finally: raise Exception(text) async def test_already_closed() -> None: async with create_task_group() as tg: pass with pytest.raises(RuntimeError) as exc: tg.start_soon(async_error, 'fail') exc.match('This task group is not active; no new tasks can be started') async def test_success() -> None: async def async_add(value: str) -> None: results.add(value) results: Set[str] = set() async with create_task_group() as tg: tg.start_soon(async_add, 'a') tg.start_soon(async_add, 'b') assert results == {'a', 'b'} @pytest.mark.parametrize('module', [ pytest.param(asyncio, id='asyncio'), pytest.param(pytest.importorskip('trio'), id='trio') ]) def test_run_natively(module: Any) -> None: async def testfunc() -> None: async with create_task_group() as tg: tg.start_soon(sleep, 0) if module is asyncio: from anyio._backends._asyncio import native_run # type: ignore[attr-defined] try: native_run(testfunc()) finally: asyncio.set_event_loop(None) else: module.run(testfunc) async def test_start_soon_while_running() -> None: async def task_func() -> None: tg.start_soon(sleep, 0) async with create_task_group() as tg: tg.start_soon(task_func) async def test_start_soon_after_error() -> None: with pytest.raises(ZeroDivisionError): async with create_task_group() as tg: a = 1 / 0 # noqa: F841 with pytest.raises(RuntimeError) as exc: tg.start_soon(sleep, 0) exc.match('This task group is not active; no new tasks can be started') async def test_start_no_value() -> None: async def taskfunc(*, task_status: TaskStatus) -> None: task_status.started() async with create_task_group() as tg: value = await tg.start(taskfunc) assert value is None async def test_start_called_twice() -> None: async def taskfunc(*, task_status: TaskStatus) -> None: task_status.started() with pytest.raises(RuntimeError, match="called 'started' twice on the same task status"): task_status.started() async with create_task_group() as tg: value = await tg.start(taskfunc) assert value is None async def test_start_with_value() -> None: async def taskfunc(*, task_status: TaskStatus) -> None: task_status.started('foo') async with create_task_group() as tg: value = await tg.start(taskfunc) assert value == 'foo' async def test_start_crash_before_started_call() -> None: async def taskfunc(*, task_status: TaskStatus) -> NoReturn: raise Exception('foo') async with create_task_group() as tg: with pytest.raises(Exception) as exc: await tg.start(taskfunc) exc.match('foo') async def test_start_crash_after_started_call() -> None: async def taskfunc(*, task_status: TaskStatus) -> NoReturn: task_status.started(2) raise Exception('foo') with pytest.raises(Exception) as exc: async with create_task_group() as tg: value = await tg.start(taskfunc) exc.match('foo') assert value == 2 async def test_start_no_started_call() -> None: async def taskfunc(*, task_status: TaskStatus) -> None: pass async with create_task_group() as tg: with pytest.raises(RuntimeError) as exc: await tg.start(taskfunc) exc.match('hild exited') async def test_start_cancelled() -> None: started = finished = False async def taskfunc(*, task_status: TaskStatus) -> None: nonlocal started, finished started = True await sleep(2) finished = True async with create_task_group() as tg: tg.cancel_scope.cancel() await tg.start(taskfunc) assert started assert not finished @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_start_native_host_cancelled() -> None: started = finished = False async def taskfunc(*, task_status: TaskStatus) -> None: nonlocal started, finished started = True await sleep(2) finished = True async def start_another() -> None: async with create_task_group() as tg: await tg.start(taskfunc) task = asyncio.get_event_loop().create_task(start_another()) await wait_all_tasks_blocked() task.cancel() with pytest.raises(asyncio.CancelledError): await task assert started assert not finished @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_start_native_child_cancelled() -> None: task = None finished = False async def taskfunc(*, task_status: TaskStatus) -> None: nonlocal task, finished task = current_task() await sleep(2) finished = True async def start_another() -> None: async with create_task_group() as tg2: await tg2.start(taskfunc) async with create_task_group() as tg: tg.start_soon(start_another) await wait_all_tasks_blocked() assert task is not None task.cancel() assert not finished async def test_start_exception_delivery() -> None: def task_fn(*, task_status: TaskStatus) -> None: task_status.started("hello") async with anyio.create_task_group() as tg: with pytest.raises(TypeError, match='to be synchronous$'): await tg.start(task_fn) # type: ignore[arg-type] async def test_host_exception() -> None: result = None async def set_result(value: str) -> None: nonlocal result await sleep(3) result = value with pytest.raises(Exception) as exc: async with create_task_group() as tg: tg.start_soon(set_result, 'a') raise Exception('dummy error') exc.match('dummy error') assert result is None async def test_level_cancellation() -> None: marker = None async def dummy() -> None: nonlocal marker marker = 1 # At this point the task has been cancelled so sleep() will raise an exception await sleep(0) # Execution should never get this far marker = 2 async with create_task_group() as tg: tg.start_soon(dummy) assert marker is None tg.cancel_scope.cancel() assert marker == 1 async def test_failing_child_task_cancels_host() -> None: async def child() -> NoReturn: await wait_all_tasks_blocked() raise Exception('foo') sleep_completed = False with pytest.raises(Exception) as exc: async with create_task_group() as tg: tg.start_soon(child) await sleep(0.5) sleep_completed = True exc.match('foo') assert not sleep_completed async def test_failing_host_task_cancels_children() -> None: sleep_completed = False async def child() -> None: nonlocal sleep_completed await sleep(1) sleep_completed = True with pytest.raises(Exception) as exc: async with create_task_group() as tg: tg.start_soon(child) await wait_all_tasks_blocked() raise Exception('foo') exc.match('foo') assert not sleep_completed async def test_cancel_scope_in_another_task() -> None: local_scope = None result = False async def child() -> None: nonlocal result, local_scope with CancelScope() as local_scope: await sleep(2) result = True async with create_task_group() as tg: tg.start_soon(child) while local_scope is None: await sleep(0) local_scope.cancel() assert not result async def test_cancel_propagation() -> None: async def g() -> NoReturn: async with create_task_group(): await sleep(1) assert False async with create_task_group() as tg: tg.start_soon(g) await sleep(0) tg.cancel_scope.cancel() async def test_cancel_twice() -> None: """Test that the same task can receive two cancellations.""" async def cancel_group() -> None: await wait_all_tasks_blocked() tg.cancel_scope.cancel() for _ in range(2): async with create_task_group() as tg: tg.start_soon(cancel_group) await sleep(1) pytest.fail('Execution should not reach this point') async def test_cancel_exiting_task_group() -> None: """ Test that if a task group is waiting for subtasks to finish and it receives a cancellation, the subtasks are also cancelled and the waiting continues. """ cancel_received = False async def waiter() -> None: nonlocal cancel_received try: await sleep(5) finally: cancel_received = True async def subgroup() -> None: async with create_task_group() as tg2: tg2.start_soon(waiter) async with create_task_group() as tg: tg.start_soon(subgroup) await wait_all_tasks_blocked() tg.cancel_scope.cancel() assert cancel_received async def test_exception_group_children() -> None: with pytest.raises(ExceptionGroup) as exc: async with create_task_group() as tg: tg.start_soon(async_error, 'task1') tg.start_soon(async_error, 'task2', 0.15) assert len(exc.value.exceptions) == 2 assert sorted(str(e) for e in exc.value.exceptions) == ['task1', 'task2'] assert exc.match('^2 exceptions were raised in the task group:\n') assert exc.match(r'Exception: task\d\n----') assert re.fullmatch( r"", repr(exc.value)) async def test_exception_group_host() -> None: with pytest.raises(ExceptionGroup) as exc: async with create_task_group() as tg: tg.start_soon(async_error, 'child', 2) await wait_all_tasks_blocked() raise Exception('host') assert len(exc.value.exceptions) == 2 assert sorted(str(e) for e in exc.value.exceptions) == ['child', 'host'] assert exc.match('^2 exceptions were raised in the task group:\n') assert exc.match(r'Exception: host\n----') async def test_escaping_cancelled_exception() -> None: async with create_task_group() as tg: tg.cancel_scope.cancel() await sleep(0) async def test_cancel_scope_cleared() -> None: with move_on_after(0.1): await sleep(1) await sleep(0) @pytest.mark.parametrize('delay', [0, 0.1], ids=['instant', 'delayed']) async def test_fail_after(delay: float) -> None: with pytest.raises(TimeoutError): with fail_after(delay) as scope: await sleep(1) assert scope.cancel_called async def test_fail_after_no_timeout() -> None: with fail_after(None) as scope: assert scope.deadline == float('inf') await sleep(0.1) assert not scope.cancel_called async def test_fail_after_after_cancellation() -> None: event = anyio.Event() async with anyio.create_task_group() as tg: tg.cancel_scope.cancel() await event.wait() block_complete = False with pytest.raises(TimeoutError): with fail_after(0.1): await anyio.sleep(0.5) block_complete = True assert not block_complete @pytest.mark.parametrize('delay', [0, 0.1], ids=['instant', 'delayed']) async def test_move_on_after(delay: float) -> None: result = False with move_on_after(delay) as scope: await sleep(1) result = True assert not result assert scope.cancel_called async def test_move_on_after_no_timeout() -> None: result = False with move_on_after(None) as scope: assert scope.deadline == float('inf') await sleep(0.1) result = True assert result assert not scope.cancel_called async def test_nested_move_on_after() -> None: sleep_completed = inner_scope_completed = False with move_on_after(0.1) as outer_scope: assert current_effective_deadline() == outer_scope.deadline with move_on_after(1) as inner_scope: assert current_effective_deadline() == outer_scope.deadline await sleep(2) sleep_completed = True inner_scope_completed = True assert not sleep_completed assert not inner_scope_completed assert outer_scope.cancel_called assert not inner_scope.cancel_called async def test_shielding() -> None: async def cancel_when_ready() -> None: await wait_all_tasks_blocked() tg.cancel_scope.cancel() inner_sleep_completed = outer_sleep_completed = False async with create_task_group() as tg: tg.start_soon(cancel_when_ready) with move_on_after(10, shield=True) as inner_scope: assert inner_scope.shield await sleep(0.1) inner_sleep_completed = True await sleep(1) outer_sleep_completed = True assert inner_sleep_completed assert not outer_sleep_completed assert tg.cancel_scope.cancel_called assert not inner_scope.cancel_called async def test_cancel_from_shielded_scope() -> None: async with create_task_group() as tg: with CancelScope(shield=True) as inner_scope: assert inner_scope.shield tg.cancel_scope.cancel() with pytest.raises(get_cancelled_exc_class()): await sleep(0.01) with pytest.raises(get_cancelled_exc_class()): await sleep(0.01) @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_cancel_host_asyncgen() -> None: done = False async def host_task() -> None: nonlocal done async with create_task_group() as tg: with CancelScope(shield=True) as inner_scope: assert inner_scope.shield tg.cancel_scope.cancel() with pytest.raises(get_cancelled_exc_class()): await sleep(0) with pytest.raises(get_cancelled_exc_class()): await sleep(0) done = True async def host_agen_fn() -> AsyncGenerator[None, None]: await host_task() yield pytest.fail("host_agen_fn should only be __anext__ed once") host_agen = host_agen_fn() try: await asyncio.get_event_loop().create_task(host_agen.__anext__()) finally: await host_agen.aclose() assert done async def test_shielding_immediate_scope_cancelled() -> None: async def cancel_when_ready() -> None: await wait_all_tasks_blocked() scope.cancel() sleep_completed = False async with create_task_group() as tg: with CancelScope(shield=True) as scope: tg.start_soon(cancel_when_ready) await sleep(0.5) sleep_completed = True assert not sleep_completed async def test_shielding_mutate() -> None: completed = False async def task(task_status: TaskStatus) -> NoReturn: nonlocal completed with CancelScope() as scope: # Enable the shield a little after the scope starts to make this test # general, even though it has no bearing on the current implementation. await sleep(.1) scope.shield = True task_status.started() await sleep(.1) completed = True scope.shield = False await sleep(1) pytest.fail('Execution should not reach this point') async with create_task_group() as tg: await tg.start(task) tg.cancel_scope.cancel() assert completed async def test_cancel_scope_in_child_task() -> None: child_scope = None async def child() -> None: nonlocal child_scope with CancelScope() as child_scope: await sleep(2) host_done = False async with create_task_group() as tg: tg.start_soon(child) await wait_all_tasks_blocked() assert child_scope is not None child_scope.cancel() await sleep(0.1) host_done = True assert host_done assert not tg.cancel_scope.cancel_called async def test_exception_cancels_siblings() -> None: sleep_completed = False async def child(fail: bool) -> None: if fail: raise Exception('foo') else: nonlocal sleep_completed await sleep(1) sleep_completed = True with pytest.raises(Exception) as exc: async with create_task_group() as tg: tg.start_soon(child, False) await wait_all_tasks_blocked() tg.start_soon(child, True) exc.match('foo') assert not sleep_completed async def test_cancel_cascade() -> None: async def do_something() -> NoReturn: async with create_task_group() as tg2: tg2.start_soon(sleep, 1) raise Exception('foo') async with create_task_group() as tg: tg.start_soon(do_something) await wait_all_tasks_blocked() tg.cancel_scope.cancel() async def test_cancelled_parent() -> None: async def child() -> NoReturn: with CancelScope(): await sleep(1) raise Exception('foo') async def parent(tg: TaskGroup) -> None: await wait_all_tasks_blocked() tg.start_soon(child) async with create_task_group() as tg: tg.start_soon(parent, tg) tg.cancel_scope.cancel() async def test_shielded_deadline() -> None: with move_on_after(10): with CancelScope(shield=True): with move_on_after(1000): assert current_effective_deadline() - current_time() > 900 async def test_deadline_reached_on_start() -> None: with move_on_after(0): await sleep(0) pytest.fail('Execution should not reach this point') async def test_deadline_moved() -> None: with fail_after(0.1) as scope: scope.deadline += 0.3 await sleep(0.2) async def test_timeout_error_with_multiple_cancellations() -> None: with pytest.raises(TimeoutError): with fail_after(0.1): async with create_task_group() as tg: tg.start_soon(sleep, 2) await sleep(2) async def test_nested_fail_after() -> None: async def killer(scope: CancelScope) -> None: await wait_all_tasks_blocked() scope.cancel() async with create_task_group() as tg: with CancelScope() as scope: with CancelScope(): tg.start_soon(killer, scope) with fail_after(1): await sleep(2) pytest.fail('Execution should not reach this point') pytest.fail('Execution should not reach this point either') pytest.fail('Execution should also not reach this point') assert scope.cancel_called async def test_nested_shield() -> None: async def killer(scope: CancelScope) -> None: await wait_all_tasks_blocked() scope.cancel() with pytest.raises(TimeoutError): async with create_task_group() as tg: with CancelScope() as scope: with CancelScope(shield=True): tg.start_soon(killer, scope) with fail_after(0.2): await sleep(2) async def test_triple_nested_shield() -> None: """Regression test for #370.""" got_past_checkpoint = False async def taskfunc() -> None: nonlocal got_past_checkpoint with CancelScope() as scope1: with CancelScope() as scope2: with CancelScope(shield=True): scope1.cancel() scope2.cancel() await checkpoint() got_past_checkpoint = True async with create_task_group() as tg: tg.start_soon(taskfunc) assert not got_past_checkpoint def test_task_group_in_generator(anyio_backend_name: str, anyio_backend_options: Dict[str, Any]) -> None: async def task_group_generator() -> AsyncGenerator[None, None]: async with create_task_group(): yield gen = task_group_generator() anyio.run(gen.__anext__, backend=anyio_backend_name, # type: ignore[arg-type] backend_options=anyio_backend_options) pytest.raises(StopAsyncIteration, anyio.run, gen.__anext__, backend=anyio_backend_name, backend_options=anyio_backend_options) async def test_exception_group_filtering() -> None: """Test that CancelledErrors are filtered out of nested exception groups.""" async def fail(name: str) -> NoReturn: try: await anyio.sleep(.1) finally: raise Exception('%s task failed' % name) async def fn() -> None: async with anyio.create_task_group() as tg: tg.start_soon(fail, 'parent') async with anyio.create_task_group() as tg2: tg2.start_soon(fail, 'child') await anyio.sleep(1) with pytest.raises(ExceptionGroup) as exc: await fn() assert len(exc.value.exceptions) == 2 assert str(exc.value.exceptions[0]) == 'parent task failed' assert str(exc.value.exceptions[1]) == 'child task failed' async def test_cancel_propagation_with_inner_spawn() -> None: async def g() -> NoReturn: async with anyio.create_task_group() as tg2: tg2.start_soon(anyio.sleep, 10) await anyio.sleep(1) assert False async with anyio.create_task_group() as tg: tg.start_soon(g) await wait_all_tasks_blocked() tg.cancel_scope.cancel() async def test_escaping_cancelled_error_from_cancelled_task() -> None: """Regression test for issue #88. No CancelledError should escape the outer scope.""" with CancelScope() as scope: with move_on_after(0.1): await sleep(1) scope.cancel() @pytest.mark.filterwarnings('ignore:"@coroutine" decorator is deprecated:DeprecationWarning') def test_cancel_generator_based_task() -> None: from asyncio import coroutine async def native_coro_part() -> None: with CancelScope() as scope: scope.cancel() @coroutine def generator_part() -> Generator[object, BaseException, None]: yield from native_coro_part() anyio.run(generator_part, backend='asyncio') # type: ignore[arg-type] async def test_suppress_exception_context() -> None: """ Test that the __context__ attribute has been cleared when the exception is re-raised in the exception group. This prevents recursive tracebacks. """ with pytest.raises(ValueError) as exc: async with create_task_group() as tg: tg.cancel_scope.cancel() async with create_task_group() as tg2: tg2.start_soon(sleep, 1) raise ValueError assert exc.value.__context__ is None @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_cancel_native_future_tasks() -> None: async def wait_native_future() -> None: loop = asyncio.get_event_loop() await loop.create_future() async with anyio.create_task_group() as tg: tg.start_soon(wait_native_future) tg.cancel_scope.cancel() @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_cancel_native_future_tasks_cancel_scope() -> None: async def wait_native_future() -> None: with anyio.CancelScope(): loop = asyncio.get_event_loop() await loop.create_future() async with anyio.create_task_group() as tg: tg.start_soon(wait_native_future) tg.cancel_scope.cancel() @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_cancel_completed_task() -> None: loop = asyncio.get_event_loop() old_exception_handler = loop.get_exception_handler() exceptions = [] def exception_handler(*args: object, **kwargs: object) -> None: exceptions.append((args, kwargs)) loop.set_exception_handler(exception_handler) try: async def noop() -> None: pass async with anyio.create_task_group() as tg: tg.start_soon(noop) tg.cancel_scope.cancel() assert exceptions == [] finally: loop.set_exception_handler(old_exception_handler) async def test_task_in_sync_spawn_callback() -> None: outer_task_id = anyio.get_current_task().id inner_task_id = None def task_wrap() -> Coroutine[object, object, None]: assert anyio.get_current_task().id == outer_task_id async def corofn() -> None: nonlocal inner_task_id inner_task_id = anyio.get_current_task().id return corofn() async with create_task_group() as tg: tg.start_soon(task_wrap) assert inner_task_id is not None assert inner_task_id != outer_task_id async def test_shielded_cancel_sleep_time() -> None: """Test that cancelling a shielded tasks spends more time sleeping than cancelling.""" event = anyio.Event() hang_time = 0.2 async def set_event() -> None: await sleep(hang_time) event.set() async def never_cancel_task() -> None: with CancelScope(shield=True): await sleep(0.2) await event.wait() async with create_task_group() as tg: tg.start_soon(set_event) async with create_task_group() as tg: tg.start_soon(never_cancel_task) tg.cancel_scope.cancel() process_time = time.process_time() assert (time.process_time() - process_time) < hang_time async def test_cancelscope_wrong_exit_order() -> None: """ Test that a RuntimeError is raised if the task tries to exit cancel scopes in the wrong order. """ scope1 = CancelScope() scope2 = CancelScope() scope1.__enter__() scope2.__enter__() pytest.raises(RuntimeError, scope1.__exit__, None, None, None) async def test_cancelscope_exit_before_enter() -> None: """Test that a RuntimeError is raised if one tries to exit a cancel scope before entering.""" scope = CancelScope() pytest.raises(RuntimeError, scope.__exit__, None, None, None) @pytest.mark.parametrize('anyio_backend', ['asyncio']) # trio does not check for this yet async def test_cancelscope_exit_in_wrong_task() -> None: async def enter_scope(scope: CancelScope) -> None: scope.__enter__() async def exit_scope(scope: CancelScope) -> None: scope.__exit__(None, None, None) scope = CancelScope() async with create_task_group() as tg: tg.start_soon(enter_scope, scope) with pytest.raises(RuntimeError): async with create_task_group() as tg: tg.start_soon(exit_scope, scope) def test_unhandled_exception_group(caplog: pytest.LogCaptureFixture) -> None: def crash() -> NoReturn: raise KeyboardInterrupt async def nested() -> None: async with anyio.create_task_group() as tg: tg.start_soon(anyio.sleep, 5) await anyio.sleep(5) async def main() -> NoReturn: async with anyio.create_task_group() as tg: tg.start_soon(nested) await wait_all_tasks_blocked() asyncio.get_event_loop().call_soon(crash) await anyio.sleep(5) pytest.fail('Execution should never reach this point') with pytest.raises(KeyboardInterrupt): anyio.run(main, backend='asyncio') assert not caplog.messages @pytest.mark.skipif(sys.version_info < (3, 9), reason='Cancel messages are only supported on py3.9+') @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_cancellederror_combination_with_message() -> None: async def taskfunc(*, task_status: TaskStatus) -> NoReturn: task_status.started(asyncio.current_task()) await sleep(5) pytest.fail('Execution should never reach this point') with pytest.raises(asyncio.CancelledError, match='blah'): async with create_task_group() as tg: task = await tg.start(taskfunc) tg.start_soon(sleep, 5) await wait_all_tasks_blocked() assert isinstance(task, asyncio.Task) task.cancel('blah') async def test_start_soon_parent_id() -> None: root_task_id = get_current_task().id parent_id: Optional[int] = None async def subtask() -> None: nonlocal parent_id parent_id = get_current_task().parent_id async def starter_task() -> None: tg.start_soon(subtask) async with anyio.create_task_group() as tg: tg.start_soon(starter_task) assert parent_id == root_task_id async def test_start_parent_id() -> None: root_task_id = get_current_task().id starter_task_id: Optional[int] = None initial_parent_id: Optional[int] = None permanent_parent_id: Optional[int] = None async def subtask(*, task_status: TaskStatus) -> None: nonlocal initial_parent_id, permanent_parent_id initial_parent_id = get_current_task().parent_id task_status.started() permanent_parent_id = get_current_task().parent_id async def starter_task() -> None: nonlocal starter_task_id starter_task_id = get_current_task().id await tg.start(subtask) async with anyio.create_task_group() as tg: tg.start_soon(starter_task) assert initial_parent_id != permanent_parent_id assert initial_parent_id == starter_task_id assert permanent_parent_id == root_task_id anyio-3.5.0/tests/test_to_process.py000066400000000000000000000052371416724134300176050ustar00rootroot00000000000000import os import platform import sys import time from functools import partial import pytest from anyio import CancelScope, create_task_group, fail_after, to_process, wait_all_tasks_blocked pytestmark = pytest.mark.anyio @pytest.fixture(autouse=True) def check_compatibility(anyio_backend_name: str) -> None: if anyio_backend_name == 'asyncio': if platform.system() == 'Windows' and sys.version_info < (3, 8): pytest.skip('Python < 3.8 uses SelectorEventLoop by default and it does not support ' 'subprocesses') async def test_run_sync_in_process_pool() -> None: """ Test that the function runs in a different process, and the same process in both calls. """ worker_pid = await to_process.run_sync(os.getpid) assert worker_pid != os.getpid() assert await to_process.run_sync(os.getpid) == worker_pid async def test_identical_sys_path() -> None: """Test that partial() can be used to pass keyword arguments.""" assert await to_process.run_sync(eval, 'sys.path') == sys.path async def test_partial() -> None: """Test that partial() can be used to pass keyword arguments.""" assert await to_process.run_sync(partial(sorted, reverse=True), ['a', 'b']) == ['b', 'a'] async def test_exception() -> None: """Test that exceptions are delivered properly.""" with pytest.raises(ValueError, match='invalid literal for int'): assert await to_process.run_sync(int, 'a') async def test_print() -> None: """Test that print() won't interfere with parent-worker communication.""" worker_pid = await to_process.run_sync(os.getpid) await to_process.run_sync(print, 'hello') await to_process.run_sync(print, 'world') assert await to_process.run_sync(os.getpid) == worker_pid async def test_cancel_before() -> None: """ Test that starting to_process.run_sync() in a cancelled scope does not cause a worker process to be reserved. """ with CancelScope() as scope: scope.cancel() await to_process.run_sync(os.getpid) pytest.raises(LookupError, to_process._process_pool_workers.get) async def test_cancel_during() -> None: """ Test that cancelling an operation on the worker process causes the process to be killed. """ worker_pid = await to_process.run_sync(os.getpid) with fail_after(4): async with create_task_group() as tg: tg.start_soon(partial(to_process.run_sync, cancellable=True), time.sleep, 5) await wait_all_tasks_blocked() tg.cancel_scope.cancel() # The previous worker was killed so we should get a new one now assert await to_process.run_sync(os.getpid) != worker_pid anyio-3.5.0/tests/test_to_thread.py000066400000000000000000000167361416724134300174040ustar00rootroot00000000000000import asyncio import sys import threading import time from concurrent.futures import Future from contextvars import ContextVar from functools import partial from typing import Any, List, NoReturn, Optional import pytest import sniffio import anyio.to_thread from anyio import ( CapacityLimiter, Event, create_task_group, from_thread, sleep, to_thread, wait_all_tasks_blocked) if sys.version_info < (3, 7): current_task = asyncio.Task.current_task else: current_task = asyncio.current_task pytestmark = pytest.mark.anyio async def test_run_in_thread_cancelled() -> None: state = 0 def thread_worker() -> None: nonlocal state state = 2 async def worker() -> None: nonlocal state state = 1 await to_thread.run_sync(thread_worker) state = 3 async with create_task_group() as tg: tg.start_soon(worker) tg.cancel_scope.cancel() assert state == 1 async def test_run_in_thread_exception() -> None: def thread_worker() -> NoReturn: raise ValueError('foo') with pytest.raises(ValueError) as exc: await to_thread.run_sync(thread_worker) exc.match('^foo$') async def test_run_in_custom_limiter() -> None: num_active_threads = max_active_threads = 0 def thread_worker() -> None: nonlocal num_active_threads, max_active_threads num_active_threads += 1 max_active_threads = max(num_active_threads, max_active_threads) event.wait(1) num_active_threads -= 1 async def task_worker() -> None: await to_thread.run_sync(thread_worker, limiter=limiter) event = threading.Event() limiter = CapacityLimiter(3) async with create_task_group() as tg: for _ in range(4): tg.start_soon(task_worker) await sleep(0.1) assert num_active_threads == 3 assert limiter.borrowed_tokens == 3 event.set() assert num_active_threads == 0 assert max_active_threads == 3 @pytest.mark.parametrize('cancellable, expected_last_active', [ (False, 'task'), (True, 'thread') ], ids=['uncancellable', 'cancellable']) async def test_cancel_worker_thread(cancellable: bool, expected_last_active: str) -> None: """ Test that when a task running a worker thread is cancelled, the cancellation is not acted on until the thread finishes. """ last_active: Optional[str] = None def thread_worker() -> None: nonlocal last_active from_thread.run_sync(sleep_event.set) time.sleep(0.2) last_active = 'thread' from_thread.run_sync(finish_event.set) async def task_worker() -> None: nonlocal last_active try: await to_thread.run_sync(thread_worker, cancellable=cancellable) finally: last_active = 'task' sleep_event = Event() finish_event = Event() async with create_task_group() as tg: tg.start_soon(task_worker) await sleep_event.wait() tg.cancel_scope.cancel() await finish_event.wait() assert last_active == expected_last_active async def test_cancel_wait_on_thread() -> None: event = threading.Event() future: Future[bool] = Future() def wait_event() -> None: future.set_result(event.wait(1)) async with create_task_group() as tg: tg.start_soon(partial(to_thread.run_sync, cancellable=True), wait_event) await wait_all_tasks_blocked() tg.cancel_scope.cancel() await to_thread.run_sync(event.set) assert future.result(1) async def test_contextvar_propagation() -> None: var = ContextVar('var', default=1) var.set(6) assert await to_thread.run_sync(var.get) == 6 async def test_asynclib_detection() -> None: with pytest.raises(sniffio.AsyncLibraryNotFoundError): await to_thread.run_sync(sniffio.current_async_library) @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_asyncio_cancel_native_task() -> None: task: "Optional[asyncio.Task[None]]" = None async def run_in_thread() -> None: nonlocal task task = current_task() await to_thread.run_sync(time.sleep, 0.2, cancellable=True) async with create_task_group() as tg: tg.start_soon(run_in_thread) await wait_all_tasks_blocked() assert task is not None task.cancel() def test_asyncio_no_root_task(asyncio_event_loop: asyncio.AbstractEventLoop) -> None: """ Regression test for #264. Ensures that to_thread.run_sync() does not raise an error when there is no root task, but instead tries to find the top most parent task by traversing the cancel scope tree, or failing that, uses the current task to set up a shutdown callback. """ async def run_in_thread() -> None: try: await to_thread.run_sync(time.sleep, 0) finally: asyncio_event_loop.call_soon(asyncio_event_loop.stop) task = asyncio_event_loop.create_task(run_in_thread()) asyncio_event_loop.run_forever() task.result() # Wait for worker threads to exit for t in threading.enumerate(): if t.name == 'AnyIO worker thread': t.join(2) assert not t.is_alive() def test_asyncio_future_callback_partial(asyncio_event_loop: asyncio.AbstractEventLoop) -> None: """ Regression test for #272. Ensures that futures with partial callbacks are handled correctly when the root task cannot be determined. """ def func(future: object) -> None: pass async def sleep_sync() -> None: return await to_thread.run_sync(time.sleep, 0) task = asyncio_event_loop.create_task(sleep_sync()) task.add_done_callback(partial(func)) asyncio_event_loop.run_until_complete(task) def test_asyncio_run_sync_no_asyncio_run(asyncio_event_loop: asyncio.AbstractEventLoop) -> None: """Test that the thread pool shutdown callback does not raise an exception.""" def exception_handler(loop: object, context: Any = None) -> None: exceptions.append(context['exception']) exceptions: List[BaseException] = [] asyncio_event_loop.set_exception_handler(exception_handler) asyncio_event_loop.run_until_complete(to_thread.run_sync(time.sleep, 0)) assert not exceptions def test_asyncio_run_sync_multiple(asyncio_event_loop: asyncio.AbstractEventLoop) -> None: """Regression test for #304.""" asyncio_event_loop.call_later(0.5, asyncio_event_loop.stop) for _ in range(3): asyncio_event_loop.run_until_complete(to_thread.run_sync(time.sleep, 0)) for t in threading.enumerate(): if t.name == 'AnyIO worker thread': t.join(2) assert not t.is_alive() def test_asyncio_no_recycle_stopping_worker(asyncio_event_loop: asyncio.AbstractEventLoop) -> None: """Regression test for #323.""" async def taskfunc1() -> None: await anyio.to_thread.run_sync(time.sleep, 0) event1.set() await event2.wait() async def taskfunc2() -> None: await event1.wait() asyncio_event_loop.call_soon(event2.set) await anyio.to_thread.run_sync(time.sleep, 0) # At this point, the worker would be stopped but still in the idle workers pool, so the # following would hang prior to the fix await anyio.to_thread.run_sync(time.sleep, 0) event1 = asyncio.Event() event2 = asyncio.Event() task1 = asyncio_event_loop.create_task(taskfunc1()) task2 = asyncio_event_loop.create_task(taskfunc2()) asyncio_event_loop.run_until_complete(asyncio.gather(task1, task2)) anyio-3.5.0/tox.ini000066400000000000000000000012701416724134300141560ustar00rootroot00000000000000# Tox (http://tox.testrun.org/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. [tox] minversion = 3.7.0 envlist = lint, py36, py37, py38, py39, py310, pypy3 skip_missing_interpreters = true isolated_build = true [testenv] depends = lint commands = coverage run -m pytest {posargs} extras = test trio [testenv:lint] depends = basepython = python3 deps = pre-commit commands = pre-commit run --all-files --show-diff-on-failure skip_install = true [testenv:docs] depends = extras = doc commands = sphinx-build docs build/sphinx