pax_global_header 0000666 0000000 0000000 00000000064 14761646711 0014527 g ustar 00root root 0000000 0000000 52 comment=a3274f2eeac78be4213230088c1f120da50ef633
python-aio-pika-9.5.5/ 0000775 0000000 0000000 00000000000 14761646711 0014560 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/.coafile 0000664 0000000 0000000 00000000173 14761646711 0016164 0 ustar 00root root 0000000 0000000 [Default]
bears = PEP8Bear, PyUnusedCodeBear, FilenameBear, InvalidLinkBear
files = aio_pika/**/*.py
max_line_length = 120
python-aio-pika-9.5.5/.coveragerc 0000664 0000000 0000000 00000000173 14761646711 0016702 0 ustar 00root root 0000000 0000000 [run]
omit = aio_pika/compat.py
branch = True
[report]
exclude_lines =
pragma: no cover
raise NotImplementedError
python-aio-pika-9.5.5/.deepsource.toml 0000664 0000000 0000000 00000000134 14761646711 0017667 0 ustar 00root root 0000000 0000000 version = 1
[[analyzers]]
name = "python"
enabled = true
runtime_version = "3.x.x"
python-aio-pika-9.5.5/.drone.yml 0000664 0000000 0000000 00000007223 14761646711 0016474 0 ustar 00root root 0000000 0000000 ---
kind: pipeline
name: default
steps:
- name: prepare toxenv
image: snakepacker/python:all
group: tests
pull: always
commands:
- tox --notest
volumes:
- name: cache
path: /drone/src/.tox
- name: linter
image: snakepacker/python:all
commands:
- tox
environment:
TOXENV: lint
volumes:
- name: cache
path: /drone/src/.tox
- name: mypy
image: snakepacker/python:all
group: tests
pull: always
commands:
- tox
environment:
TOXENV: mypy
volumes:
- name: cache
path: /drone/src/.tox
- name: checkdoc
image: snakepacker/python:all
group: tests
pull: always
commands:
- tox
environment:
TOXENV: checkdoc
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.8
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py38
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.8 uvloop
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py38-uvloop
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.7
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py37
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.7 uvloop
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py37-uvloop
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.6
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py36
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.6 uvloop
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py36-uvloop
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.5
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py35
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: python 3.5 uvloop
image: snakepacker/python:all
commands:
- tox
environment:
AMQP_URL: amqp://guest:guest@rabbitmq
TOXENV: py35-uvloop
COVERALLS_REPO_TOKEN:
from_secret: COVERALLS_TOKEN
volumes:
- name: cache
path: /drone/src/.tox
- name: notify
image: drillster/drone-email
settings:
host:
from_secret: SMTP_HOST
username:
from_secret: SMTP_USERNAME
password:
from_secret: SMTP_PASSWORD
from:
from_secret: SMTP_USERNAME
when:
status:
- changed
- failure
volumes:
- name: cache
temp: {}
services:
- name: rabbitmq
image: rabbitmq:3-alpine
---
kind: signature
hmac: 32a7f019710b16f795a6531ef6fab89d2ab24f50aaee729c3a7379a0dda472b0
...
python-aio-pika-9.5.5/.editorconfig 0000664 0000000 0000000 00000000417 14761646711 0017237 0 ustar 00root root 0000000 0000000 root = true
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8
trim_trailing_whitespace = true
[*.{py,yml}]
indent_style = space
max_line_length = 79
[*.py]
indent_size = 4
[*.rst]
indent_size = 3
[Makefile]
indent_style = tab
[*.yml]
indent_size = 2
python-aio-pika-9.5.5/.github/ 0000775 0000000 0000000 00000000000 14761646711 0016120 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/.github/workflows/ 0000775 0000000 0000000 00000000000 14761646711 0020155 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/.github/workflows/docs.yml 0000664 0000000 0000000 00000004021 14761646711 0021625 0 ustar 00root root 0000000 0000000 name: Deploy Documentation
on:
push:
branches:
- master
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Python 3.12
uses: actions/setup-python@v2
with:
python-version: "3.12"
- name: Cache virtualenv
id: venv-cache
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ github.job }}-${{ github.ref }}-3.12
restore-keys: |
venv-${{ runner.os }}-${{ github.job }}-${{ github.ref }}-
venv-${{ runner.os }}-${{ github.job }}-
venv-${{ runner.os }}-
- name: Install Poetry
run: python -m pip install poetry
- name: Cache Poetry and pip
uses: actions/cache@v3
with:
path: |
~/.cache/pypoetry
~/.cache/pip
key: poetry-pip-${{ runner.os }}-${{ hashFiles('**/poetry.lock') }}
restore-keys: |
poetry-pip-${{ runner.os }}-
- name: Install Dependencies with Poetry
run: poetry install --no-interaction --no-ansi
- name: Build Documentation
run: |
cd docs
poetry -C .. run make html
- name: Install AWS CLI
run: |
sudo apt update
sudo apt install -y awscli
- name: Configure AWS CLI for Cloudflare R2
run: |
aws configure set aws_access_key_id ${{ secrets.CF_R2_ACCESS_KEY_ID }}
aws configure set aws_secret_access_key ${{ secrets.CF_R2_SECRET_ACCESS_KEY }}
aws configure set default.region us-east-1 # R2 uses us-east-1 by default
aws configure set default.output json
- name: Sync to Cloudflare R2
env:
CF_R2_ENDPOINT: ${{ secrets.CF_R2_ENDPOINT }}
CF_R2_BUCKET_NAME: ${{ secrets.CF_R2_BUCKET_NAME }}
run: |
aws s3 sync docs/build/html s3://$CF_R2_BUCKET_NAME \
--delete \
--acl public-read \
--endpoint-url $CF_R2_ENDPOINT
python-aio-pika-9.5.5/.github/workflows/tests.yml 0000664 0000000 0000000 00000004763 14761646711 0022054 0 ustar 00root root 0000000 0000000 name: tests
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
pylama:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup python3.10
uses: actions/setup-python@v2
with:
python-version: "3.10"
- name: Cache virtualenv
id: venv-cache
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ github.job }}-${{ github.ref }}
- run: python -m pip install poetry
- run: poetry install
- run: poetry run pylama
env:
FORCE_COLOR: 1
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup python3.10
uses: actions/setup-python@v2
with:
python-version: "3.10"
- name: Cache virtualenv
id: venv-cache
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ github.job }}-${{ github.ref }}
- run: python -m pip install poetry
- run: poetry install
- run: poetry run mypy
env:
FORCE_COLOR: 1
tests:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python:
- '3.9'
- '3.10'
- '3.11'
- '3.12'
steps:
- uses: actions/checkout@v2
- name: Setup python${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: "${{ matrix.python }}"
- name: Cache virtualenv
id: venv-cache
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ github.job }}-${{ github.ref }}-${{ matrix.python }}
- run: python -m pip install poetry
- run: poetry install --with=uvloop
- name: pytest
run: >-
poetry run pytest \
-vv \
--cov=aio_pika \
--cov-report=term-missing \
--doctest-modules \
--aiomisc-test-timeout=120 \
tests
env:
FORCE_COLOR: 1
- run: poetry run coveralls
env:
COVERALLS_PARALLEL: 'true'
COVERALLS_SERVICE_NAME: github
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
finish:
needs:
- tests
runs-on: ubuntu-latest
steps:
- name: Coveralls Finished
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.github_token }}
parallel-finished: true
python-aio-pika-9.5.5/.gitignore 0000664 0000000 0000000 00000003754 14761646711 0016561 0 ustar 00root root 0000000 0000000 # Created by .ignore support plugin (hsz.mobi)
### VirtualEnv template
# Virtualenv
# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
.Python
[Bb]in
[Ii]nclude
[Ll]ib
[Ll]ib64
[Ll]ocal
[Ss]cripts
pyvenv.cfg
.venv
pip-selfcheck.json
### IPythonNotebook template
# Temporary data
.ipynb_checkpoints/
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
docs/source/apidoc
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# pytest
.pytest_cache
# celery beat schedule file
celerybeat-schedule
# dotenv
.env
# virtualenv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff:
.idea/
.vscode/
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
/out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
/htmlcov
/temp
.DS_Store
.*cache
.nox
python-aio-pika-9.5.5/CHANGELOG.md 0000664 0000000 0000000 00000046257 14761646711 0016407 0 ustar 00root root 0000000 0000000 9.5.5
-----
* Replace WeakSet with set for robust channels tracking #666 by shushpanov
9.5.4
-----
* fix: RobustChannel should not reopen after close() call #658
9.5.3
-----
* python3.8-eol #657
* self-hosted docs
9.5.2
-----
* Fix documentation links
9.5.1
-----
* Fix documentation links
9.5.0
-----
* Fix two bugs by adding more type hints to `CallbackCollection`. @Darsstar
* Dropped python 3.7 @Darsstar
* QueueIterator raises StopAsyncIteration when channel is closed. @Darsstar
9.4.3
-----
* fix: raise ChannelInvalidStateError at exchange.publish with closed channel #637
9.4.2
-----
* Only nack messages upon cancellation of a consumer subscription … #634
9.4.1
-----
* Prevent deadlock in RobustChannel.reopen() #622
* Python 3.12 tests #603
9.4.0
-----
* Support aiormq 6.8.0 #614
9.3.1
-----
* Define empty __slots__ in base classes #598
9.3.0
-----
* new: add custom exchanges to rpc pattern #377 by @cloud-rocket
9.2.3
-----
* Fix restore bug of RobustChannel #578 by @aozupek
9.2.2
-----
* Fix bug with RPC when handling `on_close` with a RobustConnection #573 by @CodeCorrupt
9.2.1
-----
* Fix reopen of robust channel after close #571 by @decaz. Fixes #570
9.2.0
-----
* URL params passing to aiormq #569
* `Connection.KWARGS_TYPES` renamed to `Connection.PARAMETERS` and rework it to `dataclass`
* `Connection._parse_kwargs` renamed to `Connection._parse_parameters`
* [AMQP URL parameters](https://docs.aio-pika.com/#amqp-url-parameters) documentation article
9.1.5
-----
* Fix race condition in RobustChannel in reopen/ready #566 by @isra17
9.1.4
-----
* use fork friendly random robust queue generation way #560
9.1.3
-----
* Ported publisher confirms tutorial by @MaPePeR #550
* Fixed errored response when `aio_pika.patterns.RPC`
can not serialize the result #552
9.1.2
-----
* Fix badges in docs
9.1.1
-----
* Fix readthedocs build file
9.1.0
-----
The bulk of the changes are related to how the library entities are now
interconnected. In previous versions of `aio_pika.Channel` instances not
contains a link to the `aio_pika.Connection` instances for now is contains it.
While I don't want custom code to work directly with the `aiormq.Channel`
instance, this was a public API and I should warn you about the change here.
The `aio_pika.Channel.channel` property is deprecated. Use
`aio_pika.Channel.get_underlay_chanel()` instead.
Now all library entities already use this method.
9.0.7
-----
* Update aiormq version
9.0.6
-----
* Amend Exchange.__repr__ to include class name #527
Also switch to f-strings rather than %-formatting, modelled after
Queue.__repr__.
* Update example code of rpc tutorial #530
* bugfix: kwargs not working in `aio_pika.connect_robust` #531
* Improve type hints for `queue.get()` #542
9.0.5
-----
* Prevent 'Task exception was never retrieved' #524
If future.exception() is not called (even on cancelled futures), it seems Python
will then log 'Task exception was never retrieved'. Rewriting this logic
slightly should hopefully achieve the same functionality while
preventing the Python errors.
* Avoid implicitly depending on setuptools #526
9.0.4
-----
* fix README badge
* upgrade requirements
9.0.3
-----
* RPCs: Show exceptions on Host (remote side) #503
* Fixed queue_name was set as channel_name for `patterns/master.py` #523
9.0.2
-----
* Do not nack if consumer is no_ack in QueueIterator #521
9.0.1
-----
* change classifiers in pyproject.toml
9.0.0
-----
The main goal of this release is the migration to `poetry` and stronger type
checking with mypy.
User code should remain compatible, just test it with mypy. The tests still
work the same, without public API changes, this indicates that your code
should work without changes, but does not prove it.
### Deprecations
* `aio_pika.message.HeaderProxy` - removed
* `aio_pika.message.header_converter` - removed
* `aio_pika.message.format_headers` - removed
* `aio_pika.message.Message.headers_raw` - prints deprecation warning
* `aio_pika.abc.AbstractMessage.headers_raw` - removed
8.3.0
-----
* Update `aiormq~=6.6.3` #512
* Fix getting futures exceptions #509
8.2.4
-----
* Fix memory leaks around channel close callbacks #496
* Fastest way to reject all messages when queue iterator is closing #498
8.2.3
-----
* Fix memory leak when callback collections is chaining #495
8.2.2
-----
* Prevent "Task exception was never retrieved" on timeout #492
8.2.1
-----
* Fix memory leaks on channel close #491
8.2.0
-----
* allow passing ssl_context to the connection #474. A default parameter has
been added to the public API, this does not break anything unless your
code relies on the order of the arguments.
8.1.1
-----
* Generated anonymous queue name may conflict #486
* improve typing in multiple library actors #478
8.1.0
-----
* Bump `aiormq~=6.4.0` with `connection blocking` feature
* `Connection.update_secret` method (#481)
8.0.3
-----
* cannot use client_properties issue #469
8.0.2
-----
* linter fixes in `aio_pika.rpc.__all__`
8.0.1
-----
* aio_pika.rpc fix for `TypeError: invalid exception object` for future
8.0.0
-----
***Release notes***
In this release, there are many changes to the internal API and bug fixes
related to sudden disconnection and correct recovery after reconnection.
Unfortunately, the behavior that was in version 7.x was slightly affected.
It's the reason the major version has been updated.
The entire set of existing tests passes with minimal changes, therefore,
except for some minor changes in behavior, the user code should
work either without any modifications or with minimal changes,
such as replacing removed deprecated functions with alternatives.
This release has been already tested in a working environment, and now it seems
that we have completely resolved all the known issues related to
recovery after network failures.
***Changes***:
* Added tests for unexpected network connection resets and fixed
many related problems.
* Added `UnderlayChannel` and `UnderlayConneciton`, this is `NamedTuple`s
contains all connection and channel related properties.
The `aiormq.Connection` and `aiormq.Channel` objects
are now packaged in this `NamedTuple`s and can be atomically assigned
to `aio_pika.Connection` and `aio_pika.Channel` objects.
The main benefit is the not needed to add locks during the connection,
in the best case, the container object is assigned to callee as usual,
however, if something goes wrong during the connection, there is no need to
clear something in `aio_pika.RobustConnection` or `aio_pika.RobustChannel`.
* An `__init__` method is now a part of abstract classes for most
`aio_pika` entities.
* Removed explicit relations between `aio_pika.Channel`
and `aio_pika.Connection`. Now you can't get a `aio_pika.Connection`
instance from the `aio_pika.Channel` instance.
* Fixed a bug that caused the whole connection was closed when a timeout
occurred in one of the channels, in case the channel was waiting for a
response frame to an amqp-rpc call.
* Removed deprecated `add_close_callback` and `remove_close_callback` methods
in `aio_pika.Channel`.
Use `aio_pika.Channel.close_callbacks.add(callback, ...)` and
`aio_pika.Channel.close_callbacks.remove(callback, ...)` instead.
* Fixed a bug in `aio_pika.RobustChannel` that caused `default_exchane`
broken after reconnecting.
* The `publisher_confirms` property of `aio_pika.Channel` is public now.
* Function `get_exchange_name` is public now.
* Fixed an error in which the queue iterator could enter a deadlock state, with
a sudden disconnection.
* The new entity `OneShotCallback` helps, for example, to call all the closing
callbacks at the channel if the `Connection` was unexpectedly closed, and
the channel closing frame did not come explicitly.
7.2.0
-----
* Make `aio_pika.patterns.rpc` more extendable.
7.1.0
-----
* Fixes in documentation
7.0.0
-----
This release brings support for a new version of `aiormq`, which is used as
a low-level driver for working with AMQP.
The release contains a huge number of changes in the internal structure of the
library, mainly related to type inheritance and abstract types, as well
as typehints checking via mypy.
The biggest change to the user API is the violation of the inheritance order,
due to the introduction of abstract types, so this release is a major one.
### Changes
* There are a lot of changes in the structure of the library,
due to the widespread use of typing.
* `aio_pika.abc` module now contains all types and abstract class prototypes.
* Modern `aiormq~=6.1.1` used.
* Complete type checks coverage via mypy.
* The interface of `aio_pika`'s classes has undergone minimal changes,
but you should double-check your code before migrating, at least because
almost all types are now in `aio_pika.abc`. Module `aio_pika.types`
still exists, but will produce a `DeprecationWarning`.
* Default value for argument `weak` is changed to `False` in
`CallbackCollection.add(func, weak=False)`.
### Known 6.x to 7.x migration issues
* `pamqp.specification` module didn't exist in `pamqp==3.0.1` so you have to
change it:
* `pamqp.commands` for AMPQ-RPC–relates classes
* `pamqp.base` for `Frame` class
* `pamqp.body` for `ContentBody` class
* `pamqp.commands` for `Basic`, `Channel`, `Confirm`, `Exchange`,
`Queue`, `Tx` classes.
* `pamqp.common` for `FieldArray`, `FieldTable`, `FieldValue` classes
* `pamqp.constants` for constants like `REPLY_SUCCESS`.
* `pamqp.header` for `ContentHeader` class.
* `pamqp.heartbeat` for `Heartbeat` class.
* Type definitions related to imports from `aio_pika` might throw warnings
like `'SomeType' is not declared in __all__ `. This is a normal situation,
since now it is necessary to import types from `aio_pika.abc`. In this
release, these are just warnings, but in the next major release, this will
stop working, so you should take care of changes in your code.
Just use `aio_pika.abc` in your imports.
The list of deprecated imports:
* `from aio_pika.message import ReturnCallback`
* `from aio_pika.patterns.rpc import RPCMessageType` - renamed to
`RPCMessageTypes`
* `import aio_pika.types` - module deprecated use `aio_pika.abc` instead
* `from aio_pika.connection import ConnectionType`
6.8.2
-----
* explicit `Channel.is_user_closed` property
* user-friendly exception when channel has been closed
* reopen channels which are closed from the broker side
6.8.1
-----
* Fix flapping test test_robust_duplicate_queue #424
* Fixed callback on_close for rpc #424
6.8.0
-----
* fix: master deserialize types #366
* fix: add missing type hint on exchange publish method #370
* Return self instead of select result in `__aenter__` #373
* fix: call remove_close_callback #374
6.7.1
-----
* Fix breaking change in callback definition #344
6.7.0
-----
* Reworked tests and finally applied PR #311
* Improve documentation examples and snippets #339
* Restore RobustChannel.default_exchange on reconnect #340
* Improve the docs a bit #335
6.6.1
-----
* Add generics to Pool and PoolItemContextManager #321
* Fix Docs for ``DeliveryError`` #322
6.6.0
-----
* message.reject called inside ProcessContext.__exit__ fails when channel is closed #302
6.5.3
-----
* Add docs and github links to setup.py #304
6.5.2
-----
* Type annotation fixes
* Add documentation
6.5.1
-----
* Test fixes
* Add reopen method for channel #263
6.5.0
-----
* Add get methods for exchange and queue #282
* fix type annotation and documentation for Connection.add_close_callback #290
6.4.3
-----
* log channel close status
* add OSError to `CONNECTION_EXCEPTIONS`
6.4.2
-----
* [fix] heartbeat_last to heartbeat_last_received #274
* Fix memory leak #285
* Fix type hint #287
* Pass loop when connecting to aiormq #294
6.4.1
-----
* RobustConnection cleanup fixes #273
6.4.0
-----
* aiormq updates:
* Fixes for python 3.8
[#69](https://github.com/mosquito/aiormq/pull/69)
[#67](https://github.com/mosquito/aiormq/pull/67)
* [passing ``name=`` query parameter](https://github.com/mosquito/aiormq/pull/69/commits/a967502e6dbdf5de422cfb183932bcec134250ad)
from URL to user defined connection name (Rabbitmq 3.8+)
* Fix connection drain [#68](https://github.com/mosquito/aiormq/pull/68)
* Remove ``loop=`` argument from asyncio entities [#67](https://github.com/mosquito/aiormq/pull/67)
* ChannelInvalidStateError exceptions instead of RuntimeError
[#65](https://github.com/mosquito/aiormq/pull/65)
* Update tests for python 3.8
* ``Pool.close()`` method and allow to use ``Pool`` as a context manager
[#269](https://github.com/mosquito/aio-pika/pull/269)
* Fix stuck of ``RobustConnection`` when exclusive queues still locked
on server-side [#267](https://github.com/mosquito/aio-pika/pull/267)
* Add ``global_`` parameter to ``Channel.set_qos`` method
[#266](https://github.com/mosquito/aio-pika/pull/266)
* Fix ``Connection.drain()`` is ``None``
[Fix connection drain](https://github.com/mosquito/aiormq/pull/68)
6.3.0
-----
* passing `client_properties`
6.2.0
-----
* Allow str as an exchange type #260
6.1.2
-----
* Added typing on process method #252
6.1.1
-----
* Documentation fixes
* Missed timeout parameter on `connect()` #245
6.1.0
-----
* Unified `CallbackCollection`s for channels and connections
* Make RobustConnection more robust
* `JsonRPC` and `JsonMaster` adapters
* Improve patterns documentation
6.0.1
-----
* Extended ExchangeType #237. Added `x-modulus-hash` exchange type.
6.0.0
-----
* `RobustConnection` logic changes (see #234).
Thanks to @decaz for analysis and fixes.
5.6.3
-----
* add more type annotations
* consistent setting headers for message #233
5.6.2
-----
* Fixes: set header value on HeaderProxy #232
5.5.3
-----
* Fixed #218. How to properly close RobustConnection?
5.5.2
-----
* Fixed #216. Exception in Queue.consume callback isn't propagated properly.
5.5.1
-----
* Allow to specify `requeue=` and `reject_on_redelivered=` in Master pattern #212
5.5.0
-----
* Fixed #209 int values for headers
5.4.1
-----
* update aiormq version
* use `AMQPError` instead of `AMQPException`. `AMQPException` is now alias for `AMQPError`
5.4.0
-----
* Fix routing key handling (#206 @decaz)
* Fix URL building (#207 @decaz)
* Test suite for `connect` function
5.3.2
-----
* Fix tests for `Pool`
5.3.1
-----
* no duplicate call message when exception
* add robust classes to apidoc
5.3.0
-----
* use None instead of Elipsis for initial state (@chibby0ne)
* `Pool`: enable arguments for pool constructor (@chibby0ne)
* Create py.typed (#176 @zarybnicky)
*
5.2.4
-----
* Fix encode timestamp error on copy (#198 @tzoiker)
* Bump `aiormq`
5.2.2
-----
* Fix HeaderProxy bug (#195 @tzoiker)
5.2.1
-----
* remove non-initialized channels when reconnect
5.2.0
-----
* robust connection close only when unclosed
* `heartbeat_last` property
5.1.1
-----
* Simple test suite for testing robust connection via tcp proxy
5.0.1
-----
* robust connection initialization hotfix
5.0.0
-----
* Connector is now `aiormq` and not `pika`
* Remove vendored `pika`
* Compatibility changes:
* **[HIGH]** Exceptions hierarchy completely changed:
* ``UnroutableError`` removed. Use ``DeliveryError`` instead.
* ``ConnectionRefusedError`` is now standard ``ConnectionError``
* Each error code has separate exception type.
* **[LOW]** ``Connection.close`` method requires exception instead
of ``code`` ``reason`` pair or ``None``
* **[MEDIUM]** ``IncomingMessage.ack`` ``IncomingMessage.nack``
``IncomingMessage.reject`` returns coroutines. Old usage compatible
but event loop might throw warnings.
* **[HIGH]** ``Message.timestamp`` property is now ``datetime.datetime``
* **[LOW]** Tracking of ``publisher confirms`` removed, using
similar feature from ``aiormq`` instead.
* **[LOW]** non async context manager ``IncomingMessage.process()``
is deprecated. Use ``async with message.process():`` instead.
4.9.1
-----
* Fix race condition on callback timeout #180
4.9.0
-----
* Add abstract pool #174
* Fixed Deprecation Warnings in Python 3.7 #153
4.8.1
-----
* Migrate from travis to drone.io
* Use pylava instead of pylama
4.8.0
-----
* save passive flag on reconnect #170
4.7.0
-----
* fixed inconsistent argument type for connection.connect #136
* fixed conditions for creating SSL connection. #135
4.6.4
-----
* Fix UnboundLocalError exception #163
4.6.3
-----
* RobustConnection fixes #162
* Fix code examples in the README.rst
4.6.1
-----
* Close connection in examples
4.6.0
-----
* Add content_type for all patterns
4.5.0
-----
* Add special exceptions for Worker
4.4.0
-----
* More extendable Master
4.3.0
-----
* Fix #112
* Fix #155
4.2.0
-----
* Add default params for RPC.cereate()
4.1.0
-----
* Fix InvalidStateError when connection lost
4.0.1
-----
* Fix: RPC stuck when response deserialization error
4.0.0
-----
* Drop python 3.4 support
2.9.0
-----
* prevent `set_results` on cancelled future #133
* Added asynchronous context manager support for channels #130
2.8.3
-----
* BUGFIX: ChannelClosed exception was never retrieved
2.8.2
-----
* BUGFIX: handle coroutine double wrapping for Python 3.4
2.8.1
-----
* added example for URL which contains ssl required options.
2.8.0
-----
* `ssl_options` for coonect and connect_robust
* default ports for `amqp` and `amqps`
2.7.1
-----
* python 3.4 fix
2.7.0
-----
* Add `message_kwargs` for worker pattern
2.6.0
-----
* Added `timeout` parameter for `Exchange.declare`
* QueueEmpty exception public added to the module `__all__`
2.5.0
-----
* Ability to reconnect on Channel.Close
* Ability to reconnect on Channel.Cancel
2.4.0
-----
* Rollback to pika==0.10 because new one had issues.
2.3.0
-----
* Feature: abillity to use ExternalCredentials with blank login.
2.2.2
-----
* Bugfix: _on_getempty should delete _on_getok_callback #110.
(thank's to @dhontecillas)
2.2.1
-----
* Fixes for pyflakes
2.2.0
-----
* Rework transactions
2.1.0
-----
* Use pika's asyncio adapter
2.0.0
-----
* Rework robust connector
1.9.0
-----
* Ability to disable robustness for single queue in `rubust_connect` mode.
* Ability to pass exchage by name.
1.8.1
-----
* Added `python_requires=">3.4.*, <4",` instead of `if sys.version_info` in the `setup.py`
1.8.0
-----
* Change `TimeoutError` to the `asyncio.TimeoutError`
* Allow to bind queue by exchange name
* Added `extras_require = {':python_version': 'typing >= 3.5.3',` to the `setup.py`
1.7.0
-----
* `aio_pika.patterns` submodule
* `aio_pika.patterns.RPC` - RPC pattern
* `aio_pika.patterns.Master` - Master/Worker pattern
1.5.1
-----
* `passive` argument for excahnge
1.5.0
-----
* `Channel.is_closed` property
* `Channel.close` just return `None` when channel already closed
* `Connection` might be used in `async with` expression
* `Queue` might be used in `async with` and returns `QueueIterator`
* Changing examples
* `Queue.iterator()` method
* `QueueIterator.close()` returns `asyncio.Future` instead of `asyncio.Task`
* Ability to use `QueueIterator` in `async for` expression
* `connect_robust` is a `coroutine` instead of function which returns a coroutine
(PyCharm type checking display warning instead)
* add tests
1.4.2
-----
* Improve documentation. Add examples for connection and channel
* `Conneciton.close` returns `asyncio.Task` instead coroutine.
* `connect_robust` now is function instead of `partial`.
python-aio-pika-9.5.5/COPYING 0000664 0000000 0000000 00000024330 14761646711 0015615 0 ustar 00root root 0000000 0000000 Apache License
==============
_Version 2.0, January 2004_
### Terms and Conditions for use, reproduction, and distribution
#### 1. Definitions
“License” shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
“Licensor” shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
“Legal Entity” shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, “control” means **(i)** the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
outstanding shares, or **(iii)** beneficial ownership of such entity.
“You” (or “Your”) shall mean an individual or Legal Entity exercising
permissions granted by this License.
“Source” form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
“Object” form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
“Work” shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
“Derivative Works” shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
“Contribution” shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
“submitted” means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as “Not a Contribution.”
“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
#### 2. Grant of Copyright License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
#### 3. Grant of Patent License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
#### 4. Redistribution
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
this License; and
* **(b)** You must cause any modified files to carry prominent notices stating that You
changed the files; and
* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
#### 5. Submission of Contributions
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
#### 6. Trademarks
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
#### 7. Disclaimer of Warranty
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
#### 8. Limitation of Liability
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
#### 9. Accepting Warranty or Additional Liability
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
_END OF TERMS AND CONDITIONS_
### APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets `[]` replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same “printed page” as the copyright notice for easier identification within
third-party archives.
Copyright 2023 Dmitry Orlov
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
python-aio-pika-9.5.5/MANIFEST.in 0000664 0000000 0000000 00000000131 14761646711 0016311 0 ustar 00root root 0000000 0000000 recursive-exclude tests *
recursive-exclude __pycache__ *
exclude .*
include README.rst
python-aio-pika-9.5.5/Makefile 0000664 0000000 0000000 00000000703 14761646711 0016220 0 ustar 00root root 0000000 0000000 all: test
RABBITMQ_IMAGE:=mosquito/aiormq-rabbitmq
test:
find . -name "*.pyc" -type f -delete
tox
rabbitmq:
docker kill $(docker ps -f label=aio-pika.rabbitmq -q) || true
docker pull $(RABBITMQ_IMAGE)
docker run --rm -d \
-l aio-pika.rabbitmq \
-p 5671:5671 \
-p 5672:5672 \
-p 15671:15671 \
-p 15672:15672 \
$(RABBITMQ_IMAGE)
upload:
python3.7 setup.py sdist bdist_wheel
twine upload dist/*$(shell python3 setup.py --version)*
python-aio-pika-9.5.5/README.rst 0000664 0000000 0000000 00000035576 14761646711 0016267 0 ustar 00root root 0000000 0000000 .. _documentation: https://docs.aio-pika.com/
.. _adopted official RabbitMQ tutorial: https://docs.aio-pika.com/rabbitmq-tutorial/index.html
aio-pika
========
.. image:: https://coveralls.io/repos/github/mosquito/aio-pika/badge.svg?branch=master
:target: https://coveralls.io/github/mosquito/aio-pika
:alt: Coveralls
.. image:: https://github.com/mosquito/aio-pika/workflows/tests/badge.svg
:target: https://github.com/mosquito/aio-pika/actions?query=workflow%3Atests
:alt: Github Actions
.. image:: https://img.shields.io/pypi/v/aio-pika.svg
:target: https://pypi.python.org/pypi/aio-pika/
:alt: Latest Version
.. image:: https://img.shields.io/pypi/wheel/aio-pika.svg
:target: https://pypi.python.org/pypi/aio-pika/
.. image:: https://img.shields.io/pypi/pyversions/aio-pika.svg
:target: https://pypi.python.org/pypi/aio-pika/
.. image:: https://img.shields.io/pypi/l/aio-pika.svg
:target: https://pypi.python.org/pypi/aio-pika/
A wrapper around `aiormq`_ for asyncio and humans.
Check out the examples and the tutorial in the `documentation`_.
If you are a newcomer to RabbitMQ, please start with the `adopted official RabbitMQ tutorial`_.
.. _aiormq: http://github.com/mosquito/aiormq/
.. note::
Since version ``5.0.0`` this library doesn't use ``pika`` as AMQP connector.
Versions below ``5.0.0`` contains or requires ``pika``'s source code.
.. note::
The version 7.0.0 has breaking API changes, see CHANGELOG.md
for migration hints.
Features
--------
* Completely asynchronous API.
* Object oriented API.
* Transparent auto-reconnects with complete state recovery with `connect_robust`
(e.g. declared queues or exchanges, consuming state and bindings).
* Python 3.7+ compatible.
* For python 3.5 users, aio-pika is available via `aio-pika<7`.
* Transparent `publisher confirms`_ support.
* `Transactions`_ support.
* Complete type-hints coverage.
.. _Transactions: https://www.rabbitmq.com/semantics.html
.. _publisher confirms: https://www.rabbitmq.com/confirms.html
Installation
------------
.. code-block:: shell
pip install aio-pika
Usage example
-------------
Simple consumer:
.. code-block:: python
import asyncio
import aio_pika
import aio_pika.abc
async def main(loop):
# Connecting with the given parameters is also possible.
# aio_pika.connect_robust(host="host", login="login", password="password")
# You can only choose one option to create a connection, url or kw-based params.
connection = await aio_pika.connect_robust(
"amqp://guest:guest@127.0.0.1/", loop=loop
)
async with connection:
queue_name = "test_queue"
# Creating channel
channel: aio_pika.abc.AbstractChannel = await connection.channel()
# Declaring queue
queue: aio_pika.abc.AbstractQueue = await channel.declare_queue(
queue_name,
auto_delete=True
)
async with queue.iterator() as queue_iter:
# Cancel consuming after __aexit__
async for message in queue_iter:
async with message.process():
print(message.body)
if queue.name in message.body.decode():
break
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
loop.close()
Simple publisher:
.. code-block:: python
import asyncio
import aio_pika
import aio_pika.abc
async def main(loop):
# Explicit type annotation
connection: aio_pika.RobustConnection = await aio_pika.connect_robust(
"amqp://guest:guest@127.0.0.1/", loop=loop
)
routing_key = "test_queue"
channel: aio_pika.abc.AbstractChannel = await connection.channel()
await channel.default_exchange.publish(
aio_pika.Message(
body='Hello {}'.format(routing_key).encode()
),
routing_key=routing_key
)
await connection.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
loop.close()
Get single message example:
.. code-block:: python
import asyncio
from aio_pika import connect_robust, Message
async def main(loop):
connection = await connect_robust(
"amqp://guest:guest@127.0.0.1/",
loop=loop
)
queue_name = "test_queue"
routing_key = "test_queue"
# Creating channel
channel = await connection.channel()
# Declaring exchange
exchange = await channel.declare_exchange('direct', auto_delete=True)
# Declaring queue
queue = await channel.declare_queue(queue_name, auto_delete=True)
# Binding queue
await queue.bind(exchange, routing_key)
await exchange.publish(
Message(
bytes('Hello', 'utf-8'),
content_type='text/plain',
headers={'foo': 'bar'}
),
routing_key
)
# Receiving message
incoming_message = await queue.get(timeout=5)
# Confirm message
await incoming_message.ack()
await queue.unbind(exchange, routing_key)
await queue.delete()
await connection.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
There are more examples and the RabbitMQ tutorial in the `documentation`_.
See also
==========
`aiormq`_
---------
`aiormq` is a pure python AMQP client library. It is under the hood of **aio-pika** and might to be used when you really loving works with the protocol low level.
Following examples demonstrates the user API.
Simple consumer:
.. code-block:: python
import asyncio
import aiormq
async def on_message(message):
"""
on_message doesn't necessarily have to be defined as async.
Here it is to show that it's possible.
"""
print(f" [x] Received message {message!r}")
print(f"Message body is: {message.body!r}")
print("Before sleep!")
await asyncio.sleep(5) # Represents async I/O operations
print("After sleep!")
async def main():
# Perform connection
connection = await aiormq.connect("amqp://guest:guest@localhost/")
# Creating a channel
channel = await connection.channel()
# Declaring queue
declare_ok = await channel.queue_declare('helo')
consume_ok = await channel.basic_consume(
declare_ok.queue, on_message, no_ack=True
)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_forever()
Simple publisher:
.. code-block:: python
import asyncio
from typing import Optional
import aiormq
from aiormq.abc import DeliveredMessage
MESSAGE: Optional[DeliveredMessage] = None
async def main():
global MESSAGE
body = b'Hello World!'
# Perform connection
connection = await aiormq.connect("amqp://guest:guest@localhost//")
# Creating a channel
channel = await connection.channel()
declare_ok = await channel.queue_declare("hello", auto_delete=True)
# Sending the message
await channel.basic_publish(body, routing_key='hello')
print(f" [x] Sent {body}")
MESSAGE = await channel.basic_get(declare_ok.queue)
print(f" [x] Received message from {declare_ok.queue!r}")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
assert MESSAGE is not None
assert MESSAGE.routing_key == "hello"
assert MESSAGE.body == b'Hello World!'
The `patio`_ and the `patio-rabbitmq`_
--------------------------------------
**PATIO** is an acronym for Python Asynchronous Tasks for AsyncIO - an easily extensible library, for distributed task execution, like celery, only targeting asyncio as the main design approach.
**patio-rabbitmq** provides you with the ability to use *RPC over RabbitMQ* services with extremely simple implementation:
.. code-block:: python
from patio import Registry, ThreadPoolExecutor
from patio_rabbitmq import RabbitMQBroker
rpc = Registry(project="patio-rabbitmq", auto_naming=False)
@rpc("sum")
def sum(*args):
return sum(args)
async def main():
async with ThreadPoolExecutor(rpc, max_workers=16) as executor:
async with RabbitMQBroker(
executor, amqp_url="amqp://guest:guest@localhost/",
) as broker:
await broker.join()
And the caller side might be written like this:
.. code-block:: python
import asyncio
from patio import NullExecutor, Registry
from patio_rabbitmq import RabbitMQBroker
async def main():
async with NullExecutor(Registry(project="patio-rabbitmq")) as executor:
async with RabbitMQBroker(
executor, amqp_url="amqp://guest:guest@localhost/",
) as broker:
print(await asyncio.gather(
*[
broker.call("mul", i, i, timeout=1) for i in range(10)
]
))
`FastStream`_
---------------
**FastStream** is a powerful and easy-to-use Python library for building asynchronous services that interact with event streams..
If you need no deep dive into **RabbitMQ** details, you can use more high-level **FastStream** interfaces:
.. code-block:: python
from faststream import FastStream
from faststream.rabbit import RabbitBroker
broker = RabbitBroker("amqp://guest:guest@localhost:5672/")
app = FastStream(broker)
@broker.subscriber("user")
async def user_created(user_id: int):
assert isinstance(user_id, int)
return f"user-{user_id}: created"
@app.after_startup
async def pub_smth():
assert (
await broker.publish(1, "user", rpc=True)
) == "user-1: created"
Also, **FastStream** validates messages by **pydantic**, generates your project **AsyncAPI** spec, supports In-Memory testing, RPC calls, and more.
In fact, it is a high-level wrapper on top of **aio-pika**, so you can use both of these libraries' advantages at the same time.
`python-socketio`_
------------------
`Socket.IO`_ is a transport protocol that enables real-time bidirectional event-based communication between clients (typically, though not always, web browsers) and a server. This package provides Python implementations of both, each with standard and asyncio variants.
Also this package is suitable for building messaging services over **RabbitMQ** via **aio-pika** adapter:
.. code-block:: python
import socketio
from aiohttp import web
sio = socketio.AsyncServer(client_manager=socketio.AsyncAioPikaManager())
app = web.Application()
sio.attach(app)
@sio.event
async def chat_message(sid, data):
print("message ", data)
if __name__ == '__main__':
web.run_app(app)
And a client is able to call `chat_message` the following way:
.. code-block:: python
import asyncio
import socketio
sio = socketio.AsyncClient()
async def main():
await sio.connect('http://localhost:8080')
await sio.emit('chat_message', {'response': 'my response'})
if __name__ == '__main__':
asyncio.run(main())
The `taskiq`_ and the `taskiq-aio-pika`_
----------------------------------------
**Taskiq** is an asynchronous distributed task queue for python. The project takes inspiration from big projects such as Celery and Dramatiq. But taskiq can send and run both the sync and async functions.
The library provides you with **aio-pika** broker for running tasks too.
.. code-block:: python
from taskiq_aio_pika import AioPikaBroker
broker = AioPikaBroker()
@broker.task
async def test() -> None:
print("nothing")
async def main():
await broker.startup()
await test.kiq()
`Rasa`_
-------
With over 25 million downloads, Rasa Open Source is the most popular open source framework for building chat and voice-based AI assistants.
With **Rasa**, you can build contextual assistants on:
* Facebook Messenger
* Slack
* Google Hangouts
* Webex Teams
* Microsoft Bot Framework
* Rocket.Chat
* Mattermost
* Telegram
* Twilio
Your own custom conversational channels or voice assistants as:
* Alexa Skills
* Google Home Actions
**Rasa** helps you build contextual assistants capable of having layered conversations with lots of back-and-forth. In order for a human to have a meaningful exchange with a contextual assistant, the assistant needs to be able to use context to build on things that were previously discussed – **Rasa** enables you to build assistants that can do this in a scalable way.
And it also uses **aio-pika** to interact with **RabbitMQ** deep inside!
Versioning
==========
This software follows `Semantic Versioning`_
For contributors
----------------
Setting up development environment
__________________________________
Clone the project:
.. code-block:: shell
git clone https://github.com/mosquito/aio-pika.git
cd aio-pika
Create a new virtualenv for `aio-pika`_:
.. code-block:: shell
python3 -m venv env
source env/bin/activate
Install all requirements for `aio-pika`_:
.. code-block:: shell
pip install -e '.[develop]'
Running Tests
_____________
**NOTE: In order to run the tests locally you need to run a RabbitMQ instance with default user/password (guest/guest) and port (5672).**
The Makefile provides a command to run an appropriate RabbitMQ Docker image:
.. code-block:: bash
make rabbitmq
To test just run:
.. code-block:: bash
make test
Editing Documentation
_____________________
To iterate quickly on the documentation live in your browser, try:
.. code-block:: bash
nox -s docs -- serve
Creating Pull Requests
______________________
Please feel free to create pull requests, but you should describe your use cases and add some examples.
Changes should follow a few simple rules:
* When your changes break the public API, you must increase the major version.
* When your changes are safe for public API (e.g. added an argument with default value)
* You have to add test cases (see `tests/` folder)
* You must add docstrings
* Feel free to add yourself to `"thank's to" section`_
.. _"thank's to" section: https://github.com/mosquito/aio-pika/blob/master/docs/source/index.rst#thanks-for-contributing
.. _Semantic Versioning: http://semver.org/
.. _aio-pika: https://github.com/mosquito/aio-pika/
.. _faststream: https://github.com/airtai/faststream
.. _patio: https://github.com/patio-python/patio
.. _patio-rabbitmq: https://github.com/patio-python/patio-rabbitmq
.. _Socket.IO: https://socket.io/
.. _python-socketio: https://python-socketio.readthedocs.io/en/latest/intro.html
.. _taskiq: https://github.com/taskiq-python/taskiq
.. _taskiq-aio-pika: https://github.com/taskiq-python/taskiq-aio-pika
.. _Rasa: https://rasa.com/docs/rasa/
python-aio-pika-9.5.5/aio_pika/ 0000775 0000000 0000000 00000000000 14761646711 0016334 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/aio_pika/__init__.py 0000664 0000000 0000000 00000002002 14761646711 0020437 0 ustar 00root root 0000000 0000000 from . import abc, patterns, pool
from .abc import DeliveryMode
from .channel import Channel
from .connection import Connection, connect
from .exceptions import AMQPException, MessageProcessError
from .exchange import Exchange, ExchangeType
from .log import logger
from .message import IncomingMessage, Message
from .queue import Queue
from .robust_channel import RobustChannel
from .robust_connection import RobustConnection, connect_robust
from .robust_exchange import RobustExchange
from .robust_queue import RobustQueue
from importlib.metadata import Distribution
__version__ = Distribution.from_name("aio-pika").version
__all__ = (
"AMQPException",
"Channel",
"Connection",
"DeliveryMode",
"Exchange",
"ExchangeType",
"IncomingMessage",
"Message",
"MessageProcessError",
"Queue",
"RobustChannel",
"RobustConnection",
"RobustExchange",
"RobustQueue",
"__version__",
"abc",
"connect",
"connect_robust",
"logger",
"patterns",
"pool",
)
python-aio-pika-9.5.5/aio_pika/abc.py 0000664 0000000 0000000 00000062274 14761646711 0017446 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum, IntEnum, unique
from functools import singledispatch
from types import TracebackType
from typing import (
Any, AsyncContextManager, AsyncIterable, Awaitable, Callable, Dict,
Generator, Iterator, Literal, Mapping, Optional, Tuple, Type, TypedDict,
TypeVar, Union, overload,
)
import aiormq.abc
from aiormq.abc import ExceptionType
from pamqp.common import Arguments, FieldValue
from yarl import URL
from .pool import PoolInstance
from .tools import (
CallbackCollection, CallbackSetType, CallbackType, OneShotCallback,
)
TimeoutType = Optional[Union[int, float]]
NoneType = type(None)
DateType = Optional[Union[int, datetime, float, timedelta]]
ExchangeParamType = Union["AbstractExchange", str]
ConsumerTag = str
MILLISECONDS = 1000
class SSLOptions(TypedDict, total=False):
cafile: str
capath: str
cadata: str
keyfile: str
certfile: str
no_verify_ssl: int
@unique
class ExchangeType(str, Enum):
FANOUT = "fanout"
DIRECT = "direct"
TOPIC = "topic"
HEADERS = "headers"
X_DELAYED_MESSAGE = "x-delayed-message"
X_CONSISTENT_HASH = "x-consistent-hash"
X_MODULUS_HASH = "x-modulus-hash"
@unique
class DeliveryMode(IntEnum):
NOT_PERSISTENT = 1
PERSISTENT = 2
@unique
class TransactionState(str, Enum):
CREATED = "created"
COMMITED = "commited"
ROLLED_BACK = "rolled back"
STARTED = "started"
@dataclasses.dataclass(frozen=True)
class DeclarationResult:
message_count: int
consumer_count: int
class AbstractTransaction:
state: TransactionState
@abstractmethod
async def select(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Tx.SelectOk:
raise NotImplementedError
@abstractmethod
async def rollback(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Tx.RollbackOk:
raise NotImplementedError
async def commit(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Tx.CommitOk:
raise NotImplementedError
async def __aenter__(self) -> "AbstractTransaction":
raise NotImplementedError
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
raise NotImplementedError
HeadersType = Dict[str, FieldValue]
class MessageInfo(TypedDict, total=False):
app_id: Optional[str]
body_size: int
cluster_id: Optional[str]
consumer_tag: Optional[str]
content_encoding: Optional[str]
content_type: Optional[str]
correlation_id: Optional[str]
delivery_mode: DeliveryMode
delivery_tag: Optional[int]
exchange: Optional[str]
expiration: Optional[DateType]
headers: HeadersType
message_id: Optional[str]
priority: Optional[int]
redelivered: Optional[bool]
routing_key: Optional[str]
reply_to: Optional[str]
timestamp: Optional[datetime]
type: str
user_id: Optional[str]
class AbstractMessage(ABC):
__slots__ = ()
body: bytes
body_size: int
headers: HeadersType
content_type: Optional[str]
content_encoding: Optional[str]
delivery_mode: DeliveryMode
priority: Optional[int]
correlation_id: Optional[str]
reply_to: Optional[str]
expiration: Optional[DateType]
message_id: Optional[str]
timestamp: Optional[datetime]
type: Optional[str]
user_id: Optional[str]
app_id: Optional[str]
@abstractmethod
def info(self) -> MessageInfo:
raise NotImplementedError
@property
@abstractmethod
def locked(self) -> bool:
raise NotImplementedError
@property
@abstractmethod
def properties(self) -> aiormq.spec.Basic.Properties:
raise NotImplementedError
@abstractmethod
def __iter__(self) -> Iterator[int]:
raise NotImplementedError
@abstractmethod
def lock(self) -> None:
raise NotImplementedError
def __copy__(self) -> "AbstractMessage":
raise NotImplementedError
class AbstractIncomingMessage(AbstractMessage, ABC):
__slots__ = ()
cluster_id: Optional[str]
consumer_tag: Optional["ConsumerTag"]
delivery_tag: Optional[int]
redelivered: Optional[bool]
message_count: Optional[int]
routing_key: Optional[str]
exchange: Optional[str]
@property
@abstractmethod
def channel(self) -> aiormq.abc.AbstractChannel:
raise NotImplementedError
@abstractmethod
def process(
self,
requeue: bool = False,
reject_on_redelivered: bool = False,
ignore_processed: bool = False,
) -> "AbstractProcessContext":
raise NotImplementedError
@abstractmethod
async def ack(self, multiple: bool = False) -> None:
raise NotImplementedError
@abstractmethod
async def reject(self, requeue: bool = False) -> None:
raise NotImplementedError
@abstractmethod
async def nack(self, multiple: bool = False, requeue: bool = True) -> None:
raise NotImplementedError
def info(self) -> MessageInfo:
raise NotImplementedError
@property
@abstractmethod
def processed(self) -> bool:
raise NotImplementedError
class AbstractProcessContext(AsyncContextManager):
@abstractmethod
async def __aenter__(self) -> AbstractIncomingMessage:
raise NotImplementedError
@abstractmethod
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
raise NotImplementedError
class AbstractQueue:
__slots__ = ()
channel: "AbstractChannel"
name: str
durable: bool
exclusive: bool
auto_delete: bool
arguments: Arguments
passive: bool
declaration_result: aiormq.spec.Queue.DeclareOk
close_callbacks: CallbackCollection[
AbstractQueue,
[Optional[BaseException]],
]
@abstractmethod
def __init__(
self,
channel: aiormq.abc.AbstractChannel,
name: Optional[str],
durable: bool,
exclusive: bool,
auto_delete: bool,
arguments: Arguments,
passive: bool = False,
):
raise NotImplementedError(
dict(
channel=channel,
name=name,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments,
passive=passive,
),
)
@abstractmethod
async def declare(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Queue.DeclareOk:
raise NotImplementedError
@abstractmethod
async def bind(
self,
exchange: ExchangeParamType,
routing_key: Optional[str] = None,
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Queue.BindOk:
raise NotImplementedError
@abstractmethod
async def unbind(
self,
exchange: ExchangeParamType,
routing_key: Optional[str] = None,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Queue.UnbindOk:
raise NotImplementedError
@abstractmethod
async def consume(
self,
callback: Callable[[AbstractIncomingMessage], Awaitable[Any]],
no_ack: bool = False,
exclusive: bool = False,
arguments: Arguments = None,
consumer_tag: Optional[ConsumerTag] = None,
timeout: TimeoutType = None,
) -> ConsumerTag:
raise NotImplementedError
@abstractmethod
async def cancel(
self, consumer_tag: ConsumerTag,
timeout: TimeoutType = None,
nowait: bool = False,
) -> aiormq.spec.Basic.CancelOk:
raise NotImplementedError
@overload
async def get(
self, *, no_ack: bool = False,
fail: Literal[True] = ..., timeout: TimeoutType = ...,
) -> AbstractIncomingMessage:
...
@overload
async def get(
self, *, no_ack: bool = False,
fail: Literal[False] = ..., timeout: TimeoutType = ...,
) -> Optional[AbstractIncomingMessage]:
...
@abstractmethod
async def get(
self, *, no_ack: bool = False,
fail: bool = True, timeout: TimeoutType = 5,
) -> Optional[AbstractIncomingMessage]:
raise NotImplementedError
@abstractmethod
async def purge(
self, no_wait: bool = False, timeout: TimeoutType = None,
) -> aiormq.spec.Queue.PurgeOk:
raise NotImplementedError
@abstractmethod
async def delete(
self, *, if_unused: bool = True, if_empty: bool = True,
timeout: TimeoutType = None,
) -> aiormq.spec.Queue.DeleteOk:
raise NotImplementedError
@abstractmethod
def iterator(self, **kwargs: Any) -> "AbstractQueueIterator":
raise NotImplementedError
class AbstractQueueIterator(AsyncIterable[AbstractIncomingMessage]):
_amqp_queue: AbstractQueue
_queue: asyncio.Queue
_consumer_tag: ConsumerTag
_consume_kwargs: Dict[str, Any]
@abstractmethod
def close(self) -> Awaitable[Any]:
raise NotImplementedError
@abstractmethod
async def on_message(self, message: AbstractIncomingMessage) -> None:
raise NotImplementedError
@abstractmethod
async def consume(self) -> None:
raise NotImplementedError
@abstractmethod
def __aiter__(self) -> "AbstractQueueIterator":
raise NotImplementedError
@abstractmethod
def __aenter__(self) -> Awaitable["AbstractQueueIterator"]:
raise NotImplementedError
@abstractmethod
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
raise NotImplementedError
@abstractmethod
async def __anext__(self) -> AbstractIncomingMessage:
raise NotImplementedError
class AbstractExchange(ABC):
name: str
@abstractmethod
def __init__(
self,
channel: "AbstractChannel",
name: str,
type: Union[ExchangeType, str] = ExchangeType.DIRECT,
*,
auto_delete: bool = False,
durable: bool = False,
internal: bool = False,
passive: bool = False,
arguments: Arguments = None,
):
raise NotImplementedError
@abstractmethod
async def declare(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.DeclareOk:
raise NotImplementedError
@abstractmethod
async def bind(
self,
exchange: ExchangeParamType,
routing_key: str = "",
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.BindOk:
raise NotImplementedError
@abstractmethod
async def unbind(
self,
exchange: ExchangeParamType,
routing_key: str = "",
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.UnbindOk:
raise NotImplementedError
@abstractmethod
async def publish(
self,
message: "AbstractMessage",
routing_key: str,
*,
mandatory: bool = True,
immediate: bool = False,
timeout: TimeoutType = None,
) -> Optional[aiormq.abc.ConfirmationFrameType]:
raise NotImplementedError
@abstractmethod
async def delete(
self, if_unused: bool = False, timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.DeleteOk:
raise NotImplementedError
@dataclasses.dataclass(frozen=True)
class UnderlayChannel:
channel: aiormq.abc.AbstractChannel
close_callback: OneShotCallback
@classmethod
async def create(
cls, connection: aiormq.abc.AbstractConnection,
close_callback: Callable[..., Awaitable[Any]], **kwargs: Any,
) -> "UnderlayChannel":
close_callback = OneShotCallback(close_callback)
await connection.ready()
connection.closing.add_done_callback(close_callback)
channel = await connection.channel(**kwargs)
channel.closing.add_done_callback(close_callback)
return cls(
channel=channel,
close_callback=close_callback,
)
async def close(self, exc: Optional[ExceptionType] = None) -> Any:
if self.close_callback.finished.is_set():
return
# close callbacks must be fired when closing
# and should be deleted later to prevent memory leaks
await self.channel.close(exc)
await self.close_callback.wait()
self.channel.closing.remove_done_callback(self.close_callback)
self.channel.connection.closing.remove_done_callback(
self.close_callback,
)
class AbstractChannel(PoolInstance, ABC):
QUEUE_CLASS: Type[AbstractQueue]
EXCHANGE_CLASS: Type[AbstractExchange]
close_callbacks: CallbackCollection[
AbstractChannel,
[Optional[BaseException]],
]
return_callbacks: CallbackCollection[
AbstractChannel,
[AbstractIncomingMessage],
]
default_exchange: AbstractExchange
publisher_confirms: bool
@property
@abstractmethod
def is_initialized(self) -> bool:
return hasattr(self, "_channel")
@property
@abstractmethod
def is_closed(self) -> bool:
raise NotImplementedError
@abstractmethod
def close(self, exc: Optional[ExceptionType] = None) -> Awaitable[None]:
raise NotImplementedError
@abstractmethod
def closed(self) -> Awaitable[Literal[True]]:
raise NotImplementedError
@abstractmethod
async def get_underlay_channel(self) -> aiormq.abc.AbstractChannel:
raise NotImplementedError
@property
@abstractmethod
def number(self) -> Optional[int]:
raise NotImplementedError
@abstractmethod
async def __aenter__(self) -> "AbstractChannel":
raise NotImplementedError
@abstractmethod
def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Awaitable[None]:
raise NotImplementedError
@abstractmethod
async def initialize(self, timeout: TimeoutType = None) -> None:
raise NotImplementedError
@abstractmethod
def reopen(self) -> Awaitable[None]:
raise NotImplementedError
@abstractmethod
async def declare_exchange(
self,
name: str,
type: Union[ExchangeType, str] = ExchangeType.DIRECT,
*,
durable: bool = False,
auto_delete: bool = False,
internal: bool = False,
passive: bool = False,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> AbstractExchange:
raise NotImplementedError
@abstractmethod
async def get_exchange(
self, name: str, *, ensure: bool = True,
) -> AbstractExchange:
raise NotImplementedError
@abstractmethod
async def declare_queue(
self,
name: Optional[str] = None,
*,
durable: bool = False,
exclusive: bool = False,
passive: bool = False,
auto_delete: bool = False,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> AbstractQueue:
raise NotImplementedError
@abstractmethod
async def get_queue(
self, name: str, *, ensure: bool = True,
) -> AbstractQueue:
raise NotImplementedError
@abstractmethod
async def set_qos(
self,
prefetch_count: int = 0,
prefetch_size: int = 0,
global_: bool = False,
timeout: TimeoutType = None,
all_channels: Optional[bool] = None,
) -> aiormq.spec.Basic.QosOk:
raise NotImplementedError
@abstractmethod
async def queue_delete(
self,
queue_name: str,
timeout: TimeoutType = None,
if_unused: bool = False,
if_empty: bool = False,
nowait: bool = False,
) -> aiormq.spec.Queue.DeleteOk:
raise NotImplementedError
@abstractmethod
async def exchange_delete(
self,
exchange_name: str,
timeout: TimeoutType = None,
if_unused: bool = False,
nowait: bool = False,
) -> aiormq.spec.Exchange.DeleteOk:
raise NotImplementedError
@abstractmethod
def transaction(self) -> AbstractTransaction:
raise NotImplementedError
@abstractmethod
async def flow(self, active: bool = True) -> aiormq.spec.Channel.FlowOk:
raise NotImplementedError
@abstractmethod
def __await__(self) -> Generator[Any, Any, "AbstractChannel"]:
raise NotImplementedError
@dataclasses.dataclass(frozen=True)
class UnderlayConnection:
connection: aiormq.abc.AbstractConnection
close_callback: OneShotCallback
@classmethod
async def make_connection(
cls, url: URL, timeout: TimeoutType = None, **kwargs: Any,
) -> aiormq.abc.AbstractConnection:
connection: aiormq.abc.AbstractConnection = await asyncio.wait_for(
aiormq.connect(url, **kwargs), timeout=timeout,
)
await connection.ready()
return connection
@classmethod
async def connect(
cls, url: URL, close_callback: Callable[..., Awaitable[Any]],
timeout: TimeoutType = None, **kwargs: Any,
) -> "UnderlayConnection":
try:
connection = await cls.make_connection(
url, timeout=timeout, **kwargs,
)
close_callback = OneShotCallback(close_callback)
connection.closing.add_done_callback(close_callback)
except Exception as e:
closing = asyncio.get_event_loop().create_future()
closing.set_exception(e)
await close_callback(closing)
raise
await connection.ready()
return cls(
connection=connection,
close_callback=close_callback,
)
def ready(self) -> Awaitable[Any]:
return self.connection.ready()
async def close(self, exc: Optional[aiormq.abc.ExceptionType]) -> Any:
if self.close_callback.finished.is_set():
return
try:
return await self.connection.close(exc)
except asyncio.CancelledError:
raise
finally:
await self.close_callback.wait()
@dataclass
class ConnectionParameter:
name: str
parser: Callable[[str], Any]
default: Optional[str] = None
is_kwarg: bool = False
def parse(self, value: Optional[str]) -> Any:
if value is None:
return self.default
try:
return self.parser(value)
except ValueError:
return self.default
class AbstractConnection(PoolInstance, ABC):
PARAMETERS: Tuple[ConnectionParameter, ...]
close_callbacks: CallbackCollection[
AbstractConnection,
[Optional[BaseException]],
]
connected: asyncio.Event
transport: Optional[UnderlayConnection]
kwargs: Mapping[str, Any]
@abstractmethod
def __init__(
self, url: URL, loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any,
):
raise NotImplementedError(
f"Method not implemented, passed: url={url}, loop={loop!r}",
)
@property
@abstractmethod
def is_closed(self) -> bool:
raise NotImplementedError
@abstractmethod
async def close(self, exc: ExceptionType = asyncio.CancelledError) -> None:
raise NotImplementedError
@abstractmethod
def closed(self) -> Awaitable[Literal[True]]:
raise NotImplementedError
@abstractmethod
async def connect(self, timeout: TimeoutType = None) -> None:
raise NotImplementedError
@abstractmethod
def channel(
self,
channel_number: Optional[int] = None,
publisher_confirms: bool = True,
on_return_raises: bool = False,
) -> AbstractChannel:
raise NotImplementedError
@abstractmethod
async def ready(self) -> None:
raise NotImplementedError
@abstractmethod
async def __aenter__(self) -> "AbstractConnection":
raise NotImplementedError
@abstractmethod
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
raise NotImplementedError
@abstractmethod
async def update_secret(
self, new_secret: str, *,
reason: str = "", timeout: TimeoutType = None,
) -> aiormq.spec.Connection.UpdateSecretOk:
raise NotImplementedError
class AbstractRobustQueue(AbstractQueue):
__slots__ = ()
@abstractmethod
def restore(self) -> Awaitable[None]:
raise NotImplementedError
@abstractmethod
async def bind(
self,
exchange: ExchangeParamType,
routing_key: Optional[str] = None,
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> aiormq.spec.Queue.BindOk:
raise NotImplementedError
@abstractmethod
async def consume(
self,
callback: Callable[[AbstractIncomingMessage], Any],
no_ack: bool = False,
exclusive: bool = False,
arguments: Arguments = None,
consumer_tag: Optional[ConsumerTag] = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> ConsumerTag:
raise NotImplementedError
class AbstractRobustExchange(AbstractExchange):
@abstractmethod
def restore(self) -> Awaitable[None]:
raise NotImplementedError
@abstractmethod
async def bind(
self,
exchange: ExchangeParamType,
routing_key: str = "",
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> aiormq.spec.Exchange.BindOk:
raise NotImplementedError
class AbstractRobustChannel(AbstractChannel):
reopen_callbacks: CallbackCollection[AbstractRobustChannel, []]
@abstractmethod
def reopen(self) -> Awaitable[None]:
raise NotImplementedError
@abstractmethod
async def restore(self) -> None:
raise NotImplementedError
@abstractmethod
async def declare_exchange(
self,
name: str,
type: Union[ExchangeType, str] = ExchangeType.DIRECT,
*,
durable: bool = False,
auto_delete: bool = False,
internal: bool = False,
passive: bool = False,
arguments: Arguments = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> AbstractRobustExchange:
raise NotImplementedError
@abstractmethod
async def declare_queue(
self,
name: Optional[str] = None,
*,
durable: bool = False,
exclusive: bool = False,
passive: bool = False,
auto_delete: bool = False,
arguments: Optional[Dict[str, Any]] = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> AbstractRobustQueue:
raise NotImplementedError
class AbstractRobustConnection(AbstractConnection):
reconnect_callbacks: CallbackCollection[AbstractRobustConnection, []]
@property
@abstractmethod
def reconnecting(self) -> bool:
raise NotImplementedError
@abstractmethod
def reconnect(self) -> Awaitable[None]:
raise NotImplementedError
@abstractmethod
def channel(
self,
channel_number: Optional[int] = None,
publisher_confirms: bool = True,
on_return_raises: bool = False,
) -> AbstractRobustChannel:
raise NotImplementedError
ChannelCloseCallback = Callable[
[Optional[AbstractChannel], Optional[BaseException]], Any,
]
ConnectionCloseCallback = Callable[
[Optional[AbstractConnection], Optional[BaseException]], Any,
]
ConnectionType = TypeVar("ConnectionType", bound=AbstractConnection)
@singledispatch
def get_exchange_name(value: Any) -> str:
raise ValueError(
"exchange argument must be an exchange "
f"instance or str not {value!r}",
)
@get_exchange_name.register(AbstractExchange)
def _get_exchange_name_from_exchnage(value: AbstractExchange) -> str:
return value.name
@get_exchange_name.register(str)
def _get_exchange_name_from_str(value: str) -> str:
return value
__all__ = (
"AbstractChannel",
"AbstractConnection",
"AbstractExchange",
"AbstractIncomingMessage",
"AbstractMessage",
"AbstractProcessContext",
"AbstractQueue",
"AbstractQueueIterator",
"AbstractRobustChannel",
"AbstractRobustConnection",
"AbstractRobustExchange",
"AbstractRobustQueue",
"AbstractTransaction",
"CallbackSetType",
"CallbackType",
"ChannelCloseCallback",
"ConnectionCloseCallback",
"ConnectionParameter",
"ConsumerTag",
"DateType",
"DeclarationResult",
"DeliveryMode",
"ExchangeParamType",
"ExchangeType",
"FieldValue",
"HeadersType",
"MILLISECONDS",
"MessageInfo",
"NoneType",
"SSLOptions",
"TimeoutType",
"TransactionState",
"UnderlayChannel",
"UnderlayConnection",
"get_exchange_name",
)
python-aio-pika-9.5.5/aio_pika/channel.py 0000664 0000000 0000000 00000036202 14761646711 0020321 0 ustar 00root root 0000000 0000000 import asyncio
import contextlib
import warnings
from abc import ABC
from types import TracebackType
from typing import (
Any, AsyncContextManager, Awaitable, Generator, Literal, Optional, Type,
Union,
)
from warnings import warn
import aiormq
import aiormq.abc
from pamqp.common import Arguments
from .abc import (
AbstractChannel, AbstractConnection, AbstractExchange, AbstractQueue,
TimeoutType, UnderlayChannel,
)
from .exceptions import ChannelInvalidStateError
from .exchange import Exchange, ExchangeType
from .log import get_logger
from .message import IncomingMessage
from .queue import Queue
from .tools import CallbackCollection
from .transaction import Transaction
log = get_logger(__name__)
class ChannelContext(AsyncContextManager, AbstractChannel, ABC):
async def __aenter__(self) -> "AbstractChannel":
if not self.is_initialized:
await self.initialize()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
return await self.close(exc_val)
def __await__(self) -> Generator[Any, Any, AbstractChannel]:
yield from self.initialize().__await__()
return self
class Channel(ChannelContext):
"""Channel abstraction"""
QUEUE_CLASS = Queue
EXCHANGE_CLASS = Exchange
_channel: Optional[UnderlayChannel]
def __init__(
self,
connection: AbstractConnection,
channel_number: Optional[int] = None,
publisher_confirms: bool = True,
on_return_raises: bool = False,
):
"""
:param connection: :class:`aio_pika.adapter.AsyncioConnection` instance
:param loop: Event loop (:func:`asyncio.get_event_loop()`
when :class:`None`)
:param future_store: :class:`aio_pika.common.FutureStore` instance
:param publisher_confirms: False if you don't need delivery
confirmations (in pursuit of performance)
"""
if not publisher_confirms and on_return_raises:
raise RuntimeError(
'"on_return_raises" not applicable '
'without "publisher_confirms"',
)
self._connection: AbstractConnection = connection
self._closed: asyncio.Future = (
asyncio.get_running_loop().create_future()
)
self._channel: Optional[UnderlayChannel] = None
self._channel_number = channel_number
self.close_callbacks = CallbackCollection(self)
self.return_callbacks = CallbackCollection(self)
self.publisher_confirms = publisher_confirms
self.on_return_raises = on_return_raises
self.close_callbacks.add(self._set_closed_callback)
@property
def is_initialized(self) -> bool:
"""Returns True when the channel has been opened
and ready for interaction"""
return self._channel is not None
@property
def is_closed(self) -> bool:
"""Returns True when the channel has been closed from the broker
side or after the close() method has been called."""
if not self.is_initialized or self._closed.done():
return True
channel = self._channel
if channel is None:
return True
return channel.channel.is_closed
async def close(
self,
exc: Optional[aiormq.abc.ExceptionType] = None,
) -> None:
if not self.is_initialized:
log.warning("Channel not opened")
return
if not self._channel:
log.warning("Transport is not ready")
return
log.debug("Closing channel %r", self)
await self._channel.close()
if not self._closed.done():
self._closed.set_result(True)
def closed(self) -> Awaitable[Literal[True]]:
return self._closed
async def get_underlay_channel(self) -> aiormq.abc.AbstractChannel:
if not self.is_initialized or not self._channel:
raise aiormq.exceptions.ChannelInvalidStateError(
"Channel was not opened",
)
return self._channel.channel
@property
def channel(self) -> aiormq.abc.AbstractChannel:
warnings.warn(
"This property is deprecated, do not use this anymore.",
DeprecationWarning,
)
if self._channel is None:
raise aiormq.exceptions.ChannelInvalidStateError
return self._channel.channel
@property
def number(self) -> Optional[int]:
if self._channel is None:
return self._channel_number
underlay_channel: UnderlayChannel = self._channel
return underlay_channel.channel.number
def __str__(self) -> str:
return "{}".format(self.number or "Not initialized channel")
async def _open(self) -> None:
transport = self._connection.transport
if transport is None:
raise ChannelInvalidStateError("No active transport in channel")
await transport.ready()
channel = await UnderlayChannel.create(
transport.connection,
self._on_close,
publisher_confirms=self.publisher_confirms,
on_return_raises=self.on_return_raises,
channel_number=self._channel_number,
)
self._channel = channel
try:
await self._on_open()
except BaseException as e:
await channel.close(e)
self._channel = None
raise
if self._closed.done():
self._closed = asyncio.get_running_loop().create_future()
async def initialize(self, timeout: TimeoutType = None) -> None:
if self.is_initialized:
raise RuntimeError("Already initialized")
elif self._closed.done():
raise RuntimeError("Can't initialize closed channel")
await self._open()
await self._on_initialized()
async def _on_open(self) -> None:
self.default_exchange: Exchange = self.EXCHANGE_CLASS(
channel=self,
arguments=None,
auto_delete=False,
durable=False,
internal=False,
name="",
passive=False,
type=ExchangeType.DIRECT,
)
async def _on_close(
self,
closing: asyncio.Future
) -> Optional[BaseException]:
try:
exc = closing.exception()
except asyncio.CancelledError as e:
exc = e
await self.close_callbacks(exc)
if self._channel and self._channel.channel:
self._channel.channel.on_return_callbacks.discard(self._on_return)
return exc
async def _set_closed_callback(
self,
_: Optional[AbstractChannel],
exc: Optional[BaseException],
) -> None:
if not self._closed.done():
self._closed.set_result(True)
async def _on_initialized(self) -> None:
channel = await self.get_underlay_channel()
channel.on_return_callbacks.add(self._on_return)
def _on_return(self, message: aiormq.abc.DeliveredMessage) -> None:
self.return_callbacks(IncomingMessage(message, no_ack=True))
async def reopen(self) -> None:
log.debug("Start reopening channel %r", self)
await self._open()
def __del__(self) -> None:
with contextlib.suppress(AttributeError):
# might raise because an Exception was raised in __init__
if not self._closed.done():
self._closed.set_result(True)
self._channel = None
async def declare_exchange(
self,
name: str,
type: Union[ExchangeType, str] = ExchangeType.DIRECT,
*,
durable: bool = False,
auto_delete: bool = False,
internal: bool = False,
passive: bool = False,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> AbstractExchange:
"""
Declare an exchange.
:param name: string with exchange name or
:class:`aio_pika.exchange.Exchange` instance
:param type: Exchange type. Enum ExchangeType value or string.
String values must be one of 'fanout', 'direct', 'topic',
'headers', 'x-delayed-message', 'x-consistent-hash'.
:param durable: Durability (exchange survive broker restart)
:param auto_delete: Delete queue when channel will be closed.
:param internal: Do not send it to broker just create an object
:param passive: Do not fail when entity was declared
previously but has another params. Raises
:class:`aio_pika.exceptions.ChannelClosed` when exchange
doesn't exist.
:param arguments: additional arguments
:param timeout: execution timeout
:return: :class:`aio_pika.exchange.Exchange` instance
"""
if auto_delete and durable is None:
durable = False
exchange = self.EXCHANGE_CLASS(
channel=self,
name=name,
type=type,
durable=durable,
auto_delete=auto_delete,
internal=internal,
passive=passive,
arguments=arguments,
)
await exchange.declare(timeout=timeout)
log.debug("Exchange declared %r", exchange)
return exchange
async def get_exchange(
self, name: str, *, ensure: bool = True,
) -> AbstractExchange:
"""
With ``ensure=True``, it's a shortcut for
``.declare_exchange(..., passive=True)``; otherwise, it returns an
exchange instance without checking its existence.
When the exchange does not exist, if ``ensure=True``, will raise
:class:`aio_pika.exceptions.ChannelClosed`.
Use this method in a separate channel (or as soon as channel created).
This is only a way to get an exchange without declaring a new one.
:param name: exchange name
:param ensure: ensure that the exchange exists
:return: :class:`aio_pika.exchange.Exchange` instance
:raises: :class:`aio_pika.exceptions.ChannelClosed` instance
"""
if ensure:
return await self.declare_exchange(name=name, passive=True)
else:
return self.EXCHANGE_CLASS(
channel=self,
name=name,
durable=False,
auto_delete=False,
internal=False,
passive=True,
arguments=None,
)
async def declare_queue(
self,
name: Optional[str] = None,
*,
durable: bool = False,
exclusive: bool = False,
passive: bool = False,
auto_delete: bool = False,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> AbstractQueue:
"""
:param name: queue name
:param durable: Durability (queue survive broker restart)
:param exclusive: Makes this queue exclusive. Exclusive queues may only
be accessed by the current connection, and are deleted when that
connection closes. Passive declaration of an exclusive queue by
other connections are not allowed.
:param passive: Do not fail when entity was declared
previously but has another params. Raises
:class:`aio_pika.exceptions.ChannelClosed` when queue
doesn't exist.
:param auto_delete: Delete queue when channel will be closed.
:param arguments: additional arguments
:param timeout: execution timeout
:return: :class:`aio_pika.queue.Queue` instance
:raises: :class:`aio_pika.exceptions.ChannelClosed` instance
"""
queue: AbstractQueue = self.QUEUE_CLASS(
channel=self,
name=name,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments,
passive=passive,
)
await queue.declare(timeout=timeout)
self.close_callbacks.add(queue.close_callbacks, weak=True)
return queue
async def get_queue(
self, name: str, *, ensure: bool = True,
) -> AbstractQueue:
"""
With ``ensure=True``, it's a shortcut for
``.declare_queue(..., passive=True)``; otherwise, it returns a
queue instance without checking its existence.
When the queue does not exist, if ``ensure=True``, will raise
:class:`aio_pika.exceptions.ChannelClosed`.
Use this method in a separate channel (or as soon as channel created).
This is only a way to get a queue without declaring a new one.
:param name: queue name
:param ensure: ensure that the queue exists
:return: :class:`aio_pika.queue.Queue` instance
:raises: :class:`aio_pika.exceptions.ChannelClosed` instance
"""
if ensure:
return await self.declare_queue(name=name, passive=True)
else:
return self.QUEUE_CLASS(
channel=self,
name=name,
durable=False,
exclusive=False,
auto_delete=False,
arguments=None,
passive=True,
)
async def set_qos(
self,
prefetch_count: int = 0,
prefetch_size: int = 0,
global_: bool = False,
timeout: TimeoutType = None,
all_channels: Optional[bool] = None,
) -> aiormq.spec.Basic.QosOk:
if all_channels is not None:
warn('Use "global_" instead of "all_channels"', DeprecationWarning)
global_ = all_channels
channel = await self.get_underlay_channel()
return await channel.basic_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size,
global_=global_,
timeout=timeout,
)
async def queue_delete(
self,
queue_name: str,
timeout: TimeoutType = None,
if_unused: bool = False,
if_empty: bool = False,
nowait: bool = False,
) -> aiormq.spec.Queue.DeleteOk:
channel = await self.get_underlay_channel()
return await channel.queue_delete(
queue=queue_name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait,
timeout=timeout,
)
async def exchange_delete(
self,
exchange_name: str,
timeout: TimeoutType = None,
if_unused: bool = False,
nowait: bool = False,
) -> aiormq.spec.Exchange.DeleteOk:
channel = await self.get_underlay_channel()
return await channel.exchange_delete(
exchange=exchange_name,
if_unused=if_unused,
nowait=nowait,
timeout=timeout,
)
def transaction(self) -> Transaction:
if self.publisher_confirms:
raise RuntimeError(
"Cannot create transaction when publisher "
"confirms are enabled",
)
return Transaction(self)
async def flow(self, active: bool = True) -> aiormq.spec.Channel.FlowOk:
channel = await self.get_underlay_channel()
return await channel.flow(active=active)
__all__ = ("Channel",)
python-aio-pika-9.5.5/aio_pika/connection.py 0000664 0000000 0000000 00000026401 14761646711 0021050 0 ustar 00root root 0000000 0000000 import asyncio
from ssl import SSLContext
from types import TracebackType
from typing import (
Any, Awaitable, Dict, Literal, Optional, Tuple, Type, TypeVar, Union
)
import aiormq.abc
from aiormq.connection import parse_int
from pamqp.common import FieldTable
from yarl import URL
from .abc import (
AbstractChannel, AbstractConnection, ConnectionParameter, SSLOptions,
TimeoutType, UnderlayConnection,
)
from .channel import Channel
from .exceptions import ConnectionClosed
from .log import get_logger
from .tools import CallbackCollection
log = get_logger(__name__)
T = TypeVar("T")
class Connection(AbstractConnection):
""" Connection abstraction """
CHANNEL_CLASS: Type[Channel] = Channel
PARAMETERS: Tuple[ConnectionParameter, ...] = (
ConnectionParameter(
name="interleave",
parser=parse_int,
is_kwarg=True,
),
ConnectionParameter(
name="happy_eyeballs_delay",
parser=float,
is_kwarg=True,
),
)
_closed: asyncio.Future
@property
def is_closed(self) -> bool:
return self._closed.done()
async def close(
self, exc: Optional[aiormq.abc.ExceptionType] = ConnectionClosed,
) -> None:
transport, self.transport = self.transport, None
self._close_called = True
if not transport:
return
await transport.close(exc)
if not self._closed.done():
self._closed.set_result(True)
def closed(self) -> Awaitable[Literal[True]]:
return self._closed
@classmethod
def _parse_parameters(cls, kwargs: Dict[str, Any]) -> Dict[str, Any]:
result = {}
for parameter in cls.PARAMETERS:
value = kwargs.get(parameter.name, parameter.default)
if parameter.is_kwarg and value is None:
# skip optional value
continue
result[parameter.name] = parameter.parse(value)
return result
def __init__(
self, url: URL, loop: Optional[asyncio.AbstractEventLoop] = None,
ssl_context: Optional[SSLContext] = None, **kwargs: Any,
):
self.loop = loop or asyncio.get_event_loop()
self.transport = None
self._closed = self.loop.create_future()
self._close_called = False
self.url = URL(url)
self.kwargs: Dict[str, Any] = self._parse_parameters(
kwargs or dict(self.url.query),
)
self.kwargs["context"] = ssl_context
self.close_callbacks = CallbackCollection(self)
self.connected: asyncio.Event = asyncio.Event()
def __str__(self) -> str:
url = self.url
if url.password:
url = url.with_password("******")
return str(url)
def __repr__(self) -> str:
return f'<{self.__class__.__name__}: "{self}">'
async def _on_connection_close(self, closing: asyncio.Future) -> None:
try:
exc = closing.exception()
except asyncio.CancelledError as e:
exc = e
self.connected.clear()
await self.close_callbacks(exc)
async def _on_connected(self) -> None:
self.connected.set()
async def connect(self, timeout: TimeoutType = None) -> None:
""" Connect to AMQP server. This method should be called after
:func:`aio_pika.connection.Connection.__init__`
.. note::
This method is called by :func:`connect`.
You shouldn't call it explicitly.
"""
self.transport = await UnderlayConnection.connect(
self.url, self._on_connection_close,
timeout=timeout, **self.kwargs,
)
await self._on_connected()
def channel(
self,
channel_number: Optional[int] = None,
publisher_confirms: bool = True,
on_return_raises: bool = False,
) -> AbstractChannel:
""" Coroutine which returns new instance of :class:`Channel`.
Example:
.. code-block:: python
import aio_pika
async def main(loop):
connection = await aio_pika.connect(
"amqp://guest:guest@127.0.0.1/"
)
channel1 = connection.channel()
await channel1.close()
# Creates channel with specific channel number
channel42 = connection.channel(42)
await channel42.close()
# For working with transactions
channel_no_confirms = await connection.channel(
publisher_confirms=False
)
await channel_no_confirms.close()
Also available as an asynchronous context manager:
.. code-block:: python
import aio_pika
async def main(loop):
connection = await aio_pika.connect(
"amqp://guest:guest@127.0.0.1/"
)
async with connection.channel() as channel:
# channel is open and available
# channel is now closed
:param channel_number: specify the channel number explicit
:param publisher_confirms:
if `True` the :func:`aio_pika.Exchange.publish` method will be
return :class:`bool` after publish is complete. Otherwise the
:func:`aio_pika.Exchange.publish` method will be return
:class:`None`
:param on_return_raises:
raise an :class:`aio_pika.exceptions.DeliveryError`
when mandatory message will be returned
"""
if not self.transport:
raise RuntimeError("Connection was not opened")
log.debug("Creating AMQP channel for connection: %r", self)
channel = self.CHANNEL_CLASS(
connection=self,
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
log.debug("Channel created: %r", channel)
return channel
async def ready(self) -> None:
await self.connected.wait()
def __del__(self) -> None:
if (
self.is_closed or
self.loop.is_closed()
):
return
asyncio.ensure_future(self.close())
async def __aenter__(self) -> "Connection":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
await self.close()
async def update_secret(
self, new_secret: str, *,
reason: str = "", timeout: TimeoutType = None,
) -> aiormq.spec.Connection.UpdateSecretOk:
if self.transport is None:
raise RuntimeError("Connection is not ready")
result = await self.transport.connection.update_secret(
new_secret=new_secret, reason=reason, timeout=timeout,
)
self.url = self.url.with_password(new_secret)
return result
def make_url(
url: Union[str, URL, None] = None,
*,
host: str = "localhost",
port: int = 5672,
login: str = "guest",
password: str = "guest",
virtualhost: str = "/",
ssl: bool = False,
ssl_options: Optional[SSLOptions] = None,
client_properties: Optional[FieldTable] = None,
**kwargs: Any,
) -> URL:
if url is not None:
if not isinstance(url, URL):
return URL(url)
return url
kw = kwargs
kw.update(ssl_options or {})
kw.update(client_properties or {})
# sanitize keywords
kw = {k: v for k, v in kw.items() if v is not None}
return URL.build(
scheme="amqps" if ssl else "amqp",
host=host,
port=port,
user=login,
password=password,
# yarl >= 1.3.0 requires path beginning with slash
path="/" + virtualhost,
query=kw,
)
async def connect(
url: Union[str, URL, None] = None,
*,
host: str = "localhost",
port: int = 5672,
login: str = "guest",
password: str = "guest",
virtualhost: str = "/",
ssl: bool = False,
loop: Optional[asyncio.AbstractEventLoop] = None,
ssl_options: Optional[SSLOptions] = None,
ssl_context: Optional[SSLContext] = None,
timeout: TimeoutType = None,
client_properties: Optional[FieldTable] = None,
connection_class: Type[AbstractConnection] = Connection,
**kwargs: Any,
) -> AbstractConnection:
""" Make connection to the broker.
Example:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect(
"amqp://guest:guest@127.0.0.1/"
)
Connect to localhost with default credentials:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect()
.. note::
The available keys for ssl_options parameter are:
* cert_reqs
* certfile
* keyfile
* ssl_version
For an information on what the ssl_options can be set to reference the
`official Python documentation`_ .
Set connection name for RabbitMQ admin panel:
.. code-block:: python
# As URL parameter method
read_connection = await connect(
"amqp://guest:guest@localhost/?name=Read%20connection"
)
write_connection = await connect(
client_properties={
'connection_name': 'Write connection'
}
)
.. note:
``client_properties`` argument requires ``aiormq>=2.9``
URL string might be containing ssl parameters e.g.
`amqps://user:pass@host//?ca_certs=ca.pem&certfile=crt.pem&keyfile=key.pem`
:param client_properties: add custom client capability.
:param url:
RFC3986_ formatted broker address. When :class:`None`
will be used keyword arguments.
:param host: hostname of the broker
:param port: broker port 5672 by default
:param login: username string. `'guest'` by default.
:param password: password string. `'guest'` by default.
:param virtualhost: virtualhost parameter. `'/'` by default
:param ssl: use SSL for connection. Should be used with addition kwargs.
:param ssl_options: A dict of values for the SSL connection.
:param timeout: connection timeout in seconds
:param loop:
Event loop (:func:`asyncio.get_event_loop()` when :class:`None`)
:param ssl_context: ssl.SSLContext instance
:param connection_class: Factory of a new connection
:param kwargs: addition parameters which will be passed to the connection.
:return: :class:`aio_pika.connection.Connection`
.. _RFC3986: https://goo.gl/MzgYAs
.. _official Python documentation: https://goo.gl/pty9xA
"""
connection: AbstractConnection = connection_class(
make_url(
url,
host=host,
port=port,
login=login,
password=password,
virtualhost=virtualhost,
ssl=ssl,
ssl_options=ssl_options,
client_properties=client_properties,
**kwargs,
),
loop=loop,
ssl_context=ssl_context,
**kwargs,
)
await connection.connect(timeout=timeout)
return connection
__all__ = ("Connection", "connect", "make_url")
python-aio-pika-9.5.5/aio_pika/exceptions.py 0000664 0000000 0000000 00000002430 14761646711 0021066 0 ustar 00root root 0000000 0000000 import asyncio
import pamqp.exceptions
from aiormq.exceptions import (
AMQPChannelError, AMQPConnectionError, AMQPError, AMQPException,
AuthenticationError, ChannelClosed, ChannelInvalidStateError,
ChannelNotFoundEntity, ChannelPreconditionFailed, ConnectionClosed,
DeliveryError, DuplicateConsumerTag, IncompatibleProtocolError,
InvalidFrameError, MethodNotImplemented, ProbableAuthenticationError,
ProtocolSyntaxError, PublishError,
)
CONNECTION_EXCEPTIONS = (
AMQPError,
ConnectionError,
OSError,
RuntimeError,
StopAsyncIteration,
pamqp.exceptions.PAMQPException,
)
class MessageProcessError(AMQPError):
reason = "%s: %r"
class QueueEmpty(AMQPError, asyncio.QueueEmpty):
pass
__all__ = (
"AMQPChannelError",
"AMQPConnectionError",
"AMQPError",
"AMQPException",
"AuthenticationError",
"CONNECTION_EXCEPTIONS",
"ChannelClosed",
"ChannelInvalidStateError",
"ChannelNotFoundEntity",
"ChannelPreconditionFailed",
"ConnectionClosed",
"DeliveryError",
"DuplicateConsumerTag",
"IncompatibleProtocolError",
"InvalidFrameError",
"MessageProcessError",
"MethodNotImplemented",
"ProbableAuthenticationError",
"ProtocolSyntaxError",
"PublishError",
"QueueEmpty",
)
python-aio-pika-9.5.5/aio_pika/exchange.py 0000664 0000000 0000000 00000015346 14761646711 0020501 0 ustar 00root root 0000000 0000000 from typing import Optional, Union
import aiormq
from pamqp.common import Arguments
from .abc import (
AbstractChannel, AbstractExchange, AbstractMessage, ExchangeParamType,
ExchangeType, TimeoutType, get_exchange_name,
)
from .log import get_logger
log = get_logger(__name__)
class Exchange(AbstractExchange):
""" Exchange abstraction """
channel: AbstractChannel
def __init__(
self,
channel: AbstractChannel,
name: str,
type: Union[ExchangeType, str] = ExchangeType.DIRECT,
*,
auto_delete: bool = False,
durable: bool = False,
internal: bool = False,
passive: bool = False,
arguments: Arguments = None,
):
self._type = type.value if isinstance(type, ExchangeType) else type
self.channel = channel
self.name = name
self.auto_delete = auto_delete
self.durable = durable
self.internal = internal
self.passive = passive
self.arguments = arguments or {}
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__}({self}):"
f" auto_delete={self.auto_delete},"
f" durable={self.durable},"
f" arguments={self.arguments!r})>"
)
async def declare(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.DeclareOk:
channel = await self.channel.get_underlay_channel()
return await channel.exchange_declare(
self.name,
exchange_type=self._type,
durable=self.durable,
auto_delete=self.auto_delete,
internal=self.internal,
passive=self.passive,
arguments=self.arguments,
timeout=timeout,
)
async def bind(
self,
exchange: ExchangeParamType,
routing_key: str = "",
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.BindOk:
""" A binding can also be a relationship between two exchanges.
This can be simply read as: this exchange is interested in messages
from another exchange.
Bindings can take an extra routing_key parameter. To avoid the confusion
with a basic_publish parameter we're going to call it a binding key.
.. code-block:: python
client = await connect()
routing_key = 'simple_routing_key'
src_exchange_name = "source_exchange"
dest_exchange_name = "destination_exchange"
channel = await client.channel()
src_exchange = await channel.declare_exchange(
src_exchange_name, auto_delete=True
)
dest_exchange = await channel.declare_exchange(
dest_exchange_name, auto_delete=True
)
queue = await channel.declare_queue(auto_delete=True)
await queue.bind(dest_exchange, routing_key)
await dest_exchange.bind(src_exchange, routing_key)
:param exchange: :class:`aio_pika.exchange.Exchange` instance
:param routing_key: routing key
:param arguments: additional arguments
:param timeout: execution timeout
:return: :class:`None`
"""
log.debug(
"Binding exchange %r to exchange %r, routing_key=%r, arguments=%r",
self,
exchange,
routing_key,
arguments,
)
channel = await self.channel.get_underlay_channel()
return await channel.exchange_bind(
arguments=arguments,
destination=self.name,
routing_key=routing_key,
source=get_exchange_name(exchange),
timeout=timeout,
)
async def unbind(
self,
exchange: ExchangeParamType,
routing_key: str = "",
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.UnbindOk:
""" Remove exchange-to-exchange binding for this
:class:`Exchange` instance
:param exchange: :class:`aio_pika.exchange.Exchange` instance
:param routing_key: routing key
:param arguments: additional arguments
:param timeout: execution timeout
:return: :class:`None`
"""
log.debug(
"Unbinding exchange %r from exchange %r, "
"routing_key=%r, arguments=%r",
self,
exchange,
routing_key,
arguments,
)
channel = await self.channel.get_underlay_channel()
return await channel.exchange_unbind(
arguments=arguments,
destination=self.name,
routing_key=routing_key,
source=get_exchange_name(exchange),
timeout=timeout,
)
async def publish(
self,
message: AbstractMessage,
routing_key: str,
*,
mandatory: bool = True,
immediate: bool = False,
timeout: TimeoutType = None,
) -> Optional[aiormq.abc.ConfirmationFrameType]:
""" Publish the message to the queue. `aio-pika` uses
`publisher confirms`_ extension for message delivery.
.. _publisher confirms: https://www.rabbitmq.com/confirms.html
"""
log.debug(
"Publishing message with routing key %r via exchange %r: %r",
routing_key,
self,
message,
)
if self.internal:
# Caught on the client side to prevent channel closure
raise ValueError(
f"Can not publish to internal exchange: '{self.name}'!",
)
if self.channel.is_closed:
raise aiormq.exceptions.ChannelInvalidStateError(
"%r closed" % self.channel,
)
channel = await self.channel.get_underlay_channel()
return await channel.basic_publish(
exchange=self.name,
routing_key=routing_key,
body=message.body,
properties=message.properties,
mandatory=mandatory,
immediate=immediate,
timeout=timeout,
)
async def delete(
self, if_unused: bool = False, timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.DeleteOk:
""" Delete the queue
:param timeout: operation timeout
:param if_unused: perform deletion when queue has no bindings.
"""
log.info("Deleting %r", self)
channel = await self.channel.get_underlay_channel()
result = await channel.exchange_delete(
self.name, if_unused=if_unused, timeout=timeout,
)
del self.channel
return result
__all__ = ("Exchange", "ExchangeType", "ExchangeParamType")
python-aio-pika-9.5.5/aio_pika/log.py 0000664 0000000 0000000 00000000366 14761646711 0017474 0 ustar 00root root 0000000 0000000 import logging
logger: logging.Logger = logging.getLogger("aio_pika")
def get_logger(name: str) -> logging.Logger:
package, module = name.split(".", 1)
if package == logger.name:
name = module
return logger.getChild(name)
python-aio-pika-9.5.5/aio_pika/message.py 0000664 0000000 0000000 00000045074 14761646711 0020344 0 ustar 00root root 0000000 0000000 import warnings
from datetime import datetime, timedelta, timezone
from functools import singledispatch
from pprint import pformat
from types import TracebackType
from typing import Any, Callable, Iterator, Optional, Type, TypeVar, Union
import aiormq
from aiormq.abc import DeliveredMessage
from pamqp.common import FieldValue
from .abc import (
MILLISECONDS, AbstractChannel, AbstractIncomingMessage, AbstractMessage,
AbstractProcessContext, DateType, DeliveryMode, HeadersType, MessageInfo,
NoneType,
)
from .exceptions import ChannelInvalidStateError, MessageProcessError
from .log import get_logger
log = get_logger(__name__)
def to_milliseconds(seconds: Union[float, int]) -> int:
return int(seconds * MILLISECONDS)
@singledispatch
def encode_expiration(value: Any) -> Optional[str]:
raise ValueError("Invalid timestamp type: %r" % type(value), value)
@encode_expiration.register(datetime)
def encode_expiration_datetime(value: datetime) -> str:
now = datetime.now(tz=value.tzinfo)
return str(to_milliseconds((value - now).total_seconds()))
@encode_expiration.register(int)
@encode_expiration.register(float)
def encode_expiration_number(value: Union[int, float]) -> str:
return str(to_milliseconds(value))
@encode_expiration.register(timedelta)
def encode_expiration_timedelta(value: timedelta) -> str:
return str(int(value.total_seconds() * MILLISECONDS))
@encode_expiration.register(NoneType)
def encode_expiration_none(_: Any) -> None:
return None
@singledispatch
def decode_expiration(t: Any) -> Optional[float]:
raise ValueError("Invalid expiration type: %r" % type(t), t)
@decode_expiration.register(str)
def decode_expiration_str(t: str) -> float:
return float(t) / MILLISECONDS
@decode_expiration.register(NoneType)
def decode_expiration_none(_: Any) -> None:
return None
@singledispatch
def encode_timestamp(value: Any) -> Optional[datetime]:
raise ValueError("Invalid timestamp type: %r" % type(value), value)
@encode_timestamp.register(datetime)
def encode_timestamp_datetime(value: datetime) -> datetime:
return value
@encode_timestamp.register(float)
@encode_timestamp.register(int)
def encode_timestamp_number(value: Union[int, float]) -> datetime:
return datetime.fromtimestamp(value, tz=timezone.utc)
@encode_timestamp.register(timedelta)
def encode_timestamp_timedelta(value: timedelta) -> datetime:
return datetime.now(tz=timezone.utc) + value
@encode_timestamp.register(NoneType)
def encode_timestamp_none(_: Any) -> None:
return None
@singledispatch
def decode_timestamp(value: Any) -> Optional[datetime]:
raise ValueError("Invalid timestamp type: %r" % type(value), value)
@decode_timestamp.register(datetime)
def decode_timestamp_datetime(value: datetime) -> datetime:
return value
@decode_timestamp.register(NoneType)
def decode_timestamp_none(_: Any) -> None:
return None
V = TypeVar("V")
D = TypeVar("D")
T = TypeVar("T")
def optional(
value: V,
func: Union[Callable[[V], T], Type[T]],
default: Optional[D] = None,
) -> Union[T, D]:
return func(value) if value else default # type: ignore
class Message(AbstractMessage):
""" AMQP message abstraction """
__slots__ = (
"app_id",
"body",
"body_size",
"content_encoding",
"content_type",
"correlation_id",
"delivery_mode",
"expiration",
"_headers",
"headers",
"message_id",
"priority",
"reply_to",
"timestamp",
"type",
"user_id",
"__lock",
)
def __init__(
self,
body: bytes,
*,
headers: Optional[HeadersType] = None,
content_type: Optional[str] = None,
content_encoding: Optional[str] = None,
delivery_mode: Union[DeliveryMode, int, None] = None,
priority: Optional[int] = None,
correlation_id: Optional[str] = None,
reply_to: Optional[str] = None,
expiration: Optional[DateType] = None,
message_id: Optional[str] = None,
timestamp: Optional[DateType] = None,
type: Optional[str] = None,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
):
""" Creates a new instance of Message
:param body: message body
:param headers: message headers
:param content_type: content type
:param content_encoding: content encoding
:param delivery_mode: delivery mode
:param priority: priority
:param correlation_id: correlation id
:param reply_to: reply to
:param expiration: expiration in seconds (or datetime or timedelta)
:param message_id: message id
:param timestamp: timestamp
:param type: type
:param user_id: user id
:param app_id: app id
"""
self.__lock = False
self.body = body if isinstance(body, bytes) else bytes(body)
self.body_size = len(self.body) if self.body else 0
self.headers: HeadersType = headers or {}
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_mode: DeliveryMode = DeliveryMode(
optional(
delivery_mode, int, DeliveryMode.NOT_PERSISTENT,
),
)
self.priority = optional(priority, int, 0)
self.correlation_id = optional(correlation_id, str)
self.reply_to = optional(reply_to, str)
self.expiration = expiration
self.message_id = optional(message_id, str)
self.timestamp = encode_timestamp(timestamp)
self.type = optional(type, str)
self.user_id = optional(user_id, str)
self.app_id = optional(app_id, str)
@property
def headers_raw(self) -> HeadersType:
warnings.warn(
f"{self.__class__.__name__}.headers_raw deprecated, please use "
f"{self.__class__.__name__}.headers instead.",
DeprecationWarning,
)
return self.headers
@staticmethod
def _as_bytes(value: Any) -> bytes:
if isinstance(value, bytes):
return value
elif isinstance(value, str):
return value.encode()
elif value is None:
return b""
else:
return str(value).encode()
def info(self) -> MessageInfo:
return MessageInfo(
app_id=self.app_id,
body_size=self.body_size,
cluster_id=None,
consumer_tag=None,
content_encoding=self.content_encoding,
content_type=self.content_type,
correlation_id=self.correlation_id,
delivery_mode=self.delivery_mode,
delivery_tag=None,
exchange=None,
expiration=self.expiration,
headers=self.headers,
message_id=self.message_id,
priority=self.priority,
redelivered=None,
reply_to=self.reply_to,
routing_key=None,
timestamp=decode_timestamp(self.timestamp),
type=str(self.type),
user_id=self.user_id,
)
@property
def locked(self) -> bool:
""" is message locked
:return: :class:`bool`
"""
return bool(self.__lock)
@property
def properties(self) -> aiormq.spec.Basic.Properties:
""" Build :class:`aiormq.spec.Basic.Properties` object """
return aiormq.spec.Basic.Properties(
app_id=self.app_id,
content_encoding=self.content_encoding,
content_type=self.content_type,
correlation_id=self.correlation_id,
delivery_mode=self.delivery_mode,
expiration=encode_expiration(self.expiration),
headers=self.headers,
message_id=self.message_id,
message_type=self.type,
priority=self.priority,
reply_to=self.reply_to,
timestamp=self.timestamp,
user_id=self.user_id,
)
def __repr__(self) -> str:
return "{name}:{repr}".format(
name=self.__class__.__name__, repr=pformat(self.info()),
)
def __setattr__(self, key: str, value: FieldValue) -> None:
if not key.startswith("_") and self.locked:
raise ValueError("Message is locked")
return super().__setattr__(key, value)
def __iter__(self) -> Iterator[int]:
return iter(self.body)
def lock(self) -> None:
""" Set lock flag to `True`"""
self.__lock = True
def __copy__(self) -> "Message":
return Message(
body=self.body,
headers=self.headers,
content_encoding=self.content_encoding,
content_type=self.content_type,
delivery_mode=self.delivery_mode,
priority=self.priority,
correlation_id=self.correlation_id,
reply_to=self.reply_to,
expiration=self.expiration,
message_id=self.message_id,
timestamp=self.timestamp,
type=self.type,
user_id=self.user_id,
app_id=self.app_id,
)
class IncomingMessage(Message, AbstractIncomingMessage):
""" Incoming message is seems like Message but has additional methods for
message acknowledgement.
Depending on the acknowledgement mode used, RabbitMQ can consider a
message to be successfully delivered either immediately after it is sent
out (written to a TCP socket) or when an explicit ("manual") client
acknowledgement is received. Manually sent acknowledgements can be
positive or negative and use one of the following protocol methods:
* basic.ack is used for positive acknowledgements
* basic.nack is used for negative acknowledgements (note: this is a RabbitMQ
extension to AMQP 0-9-1)
* basic.reject is used for negative acknowledgements but has one limitation
compared to basic.nack
Positive acknowledgements simply instruct RabbitMQ to record a message as
delivered. Negative acknowledgements with basic.reject have the same effect.
The difference is primarily in the semantics: positive acknowledgements
assume a message was successfully processed while their negative
counterpart suggests that a delivery wasn't processed but still should
be deleted.
"""
__slots__ = (
"_loop",
"__channel",
"cluster_id",
"consumer_tag",
"delivery_tag",
"exchange",
"routing_key",
"redelivered",
"__no_ack",
"__processed",
"message_count",
)
def __init__(self, message: DeliveredMessage, no_ack: bool = False):
""" Create an instance of :class:`IncomingMessage` """
self.__channel = message.channel
self.__no_ack = no_ack
self.__processed = False
super().__init__(
body=message.body,
content_type=message.header.properties.content_type,
content_encoding=message.header.properties.content_encoding,
headers=message.header.properties.headers,
delivery_mode=message.header.properties.delivery_mode,
priority=message.header.properties.priority,
correlation_id=message.header.properties.correlation_id,
reply_to=message.header.properties.reply_to,
expiration=decode_expiration(message.header.properties.expiration),
message_id=message.header.properties.message_id,
timestamp=decode_timestamp(message.header.properties.timestamp),
type=message.header.properties.message_type,
user_id=message.header.properties.user_id,
app_id=message.header.properties.app_id,
)
self.cluster_id = message.header.properties.cluster_id
self.consumer_tag = message.consumer_tag
self.delivery_tag = message.delivery_tag
self.exchange = message.exchange
self.message_count = message.message_count
self.redelivered = message.redelivered
self.routing_key = message.routing_key
if no_ack or not self.delivery_tag:
self.lock()
self.__processed = True
@property
def channel(self) -> aiormq.abc.AbstractChannel:
if self.__channel.is_closed:
raise ChannelInvalidStateError
return self.__channel
def process(
self,
requeue: bool = False,
reject_on_redelivered: bool = False,
ignore_processed: bool = False,
) -> AbstractProcessContext:
""" Context manager for processing the message
>>> async def on_message_received(message: IncomingMessage):
... async with message.process():
... # When exception will be raised
... # the message will be rejected
... print(message.body)
Example with ignore_processed=True
>>> async def on_message_received(message: IncomingMessage):
... async with message.process(ignore_processed=True):
... # Now (with ignore_processed=True) you may reject
... # (or ack) message manually too
... if True: # some reasonable condition here
... await message.reject()
... print(message.body)
:param requeue: Requeue message when exception.
:param reject_on_redelivered:
When True message will be rejected only when
message was redelivered.
:param ignore_processed: Do nothing if message already processed
"""
return ProcessContext(
self,
requeue=requeue,
reject_on_redelivered=reject_on_redelivered,
ignore_processed=ignore_processed,
)
async def ack(self, multiple: bool = False) -> None:
""" Send basic.ack is used for positive acknowledgements
.. note::
This method looks like a blocking-method, but actually it just
sends bytes to the socket and doesn't require any responses from
the broker.
:param multiple: If set to True, the message's delivery tag is
treated as "up to and including", so that multiple
messages can be acknowledged with a single method.
If set to False, the ack refers to a single message.
:return: None
"""
if self.__no_ack:
raise TypeError('Can\'t ack message with "no_ack" flag')
if self.__processed:
raise MessageProcessError("Message already processed", self)
if self.delivery_tag is not None:
await self.channel.basic_ack(
delivery_tag=self.delivery_tag, multiple=multiple,
)
self.__processed = True
if not self.locked:
self.lock()
async def reject(self, requeue: bool = False) -> None:
""" When `requeue=True` the message will be returned to queue.
Otherwise, message will be dropped.
.. note::
This method looks like a blocking-method, but actually it just
sends bytes to the socket and doesn't require any responses from
the broker.
:param requeue: bool
"""
if self.__no_ack:
raise TypeError('This message has "no_ack" flag.')
if self.__processed:
raise MessageProcessError("Message already processed", self)
if self.delivery_tag is not None:
await self.channel.basic_reject(
delivery_tag=self.delivery_tag,
requeue=requeue,
)
self.__processed = True
if not self.locked:
self.lock()
async def nack(
self, multiple: bool = False, requeue: bool = True,
) -> None:
if not self.channel.connection.basic_nack:
raise RuntimeError("Method not supported on server")
if self.__no_ack:
raise TypeError('Can\'t nack message with "no_ack" flag')
if self.__processed:
raise MessageProcessError("Message already processed", self)
if self.delivery_tag is not None:
await self.channel.basic_nack(
delivery_tag=self.delivery_tag,
multiple=multiple,
requeue=requeue,
)
self.__processed = True
if not self.locked:
self.lock()
def info(self) -> MessageInfo:
""" Method returns dict representation of the message """
info = super().info()
info["cluster_id"] = self.cluster_id
info["consumer_tag"] = self.consumer_tag
info["delivery_tag"] = self.delivery_tag
info["exchange"] = self.exchange
info["redelivered"] = self.redelivered
info["routing_key"] = self.routing_key
return info
@property
def processed(self) -> bool:
return self.__processed
class ReturnedMessage(IncomingMessage):
pass
ReturnCallback = Callable[[AbstractChannel, ReturnedMessage], Any]
class ProcessContext(AbstractProcessContext):
def __init__(
self,
message: IncomingMessage,
*,
requeue: bool,
reject_on_redelivered: bool,
ignore_processed: bool,
):
self.message = message
self.requeue = requeue
self.reject_on_redelivered = reject_on_redelivered
self.ignore_processed = ignore_processed
async def __aenter__(self) -> IncomingMessage:
return self.message
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if not exc_type:
if not self.ignore_processed or not self.message.processed:
await self.message.ack()
return
if not self.ignore_processed or not self.message.processed:
if self.reject_on_redelivered and self.message.redelivered:
if not self.message.channel.is_closed:
log.info(
"Message %r was redelivered and will be rejected",
self.message,
)
await self.message.reject(requeue=False)
return
log.warning(
"Message %r was redelivered and reject is not sent "
"since channel is closed",
self.message,
)
else:
if not self.message.channel.is_closed:
await self.message.reject(requeue=self.requeue)
return
log.warning("Reject is not sent since channel is closed")
__all__ = "Message", "IncomingMessage", "ReturnedMessage",
python-aio-pika-9.5.5/aio_pika/patterns/ 0000775 0000000 0000000 00000000000 14761646711 0020174 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/aio_pika/patterns/__init__.py 0000664 0000000 0000000 00000000351 14761646711 0022304 0 ustar 00root root 0000000 0000000 from .master import JsonMaster, Master, NackMessage, RejectMessage, Worker
from .rpc import RPC, JsonRPC
__all__ = (
"Master",
"NackMessage",
"RejectMessage",
"RPC",
"Worker",
"JsonMaster",
"JsonRPC",
)
python-aio-pika-9.5.5/aio_pika/patterns/base.py 0000664 0000000 0000000 00000002647 14761646711 0021471 0 ustar 00root root 0000000 0000000 import pickle
from typing import Any, Awaitable, Callable, TypeVar
T = TypeVar("T")
CallbackType = Callable[..., Awaitable[T]]
class Method:
__slots__ = (
"name",
"func",
)
def __init__(self, name: str, func: Callable[..., Any]):
self.name = name
self.func = func
def __getattr__(self, item: str) -> "Method":
return Method(".".join((self.name, item)), func=self.func)
def __call__(self, **kwargs: Any) -> Any:
return self.func(self.name, kwargs=kwargs)
class Proxy:
__slots__ = ("func",)
def __init__(self, func: Callable[..., Any]):
self.func = func
def __getattr__(self, item: str) -> Method:
return Method(item, self.func)
class Base:
__slots__ = ()
SERIALIZER = pickle
CONTENT_TYPE = "application/python-pickle"
def serialize(self, data: Any) -> bytes:
""" Serialize data to the bytes.
Uses `pickle` by default.
You should overlap this method when you want to change serializer
:param data: Data which will be serialized
"""
return self.SERIALIZER.dumps(data)
def deserialize(self, data: bytes) -> Any:
""" Deserialize data from bytes.
Uses `pickle` by default.
You should overlap this method when you want to change serializer
:param data: Data which will be deserialized
"""
return self.SERIALIZER.loads(data)
python-aio-pika-9.5.5/aio_pika/patterns/master.py 0000664 0000000 0000000 00000014167 14761646711 0022052 0 ustar 00root root 0000000 0000000 import asyncio
import gzip
import json
import logging
from functools import partial
from types import MappingProxyType
from typing import Any, Awaitable, Mapping, Optional
import aiormq
from aio_pika.abc import (
AbstractChannel, AbstractExchange, AbstractIncomingMessage, AbstractQueue,
ConsumerTag, DeliveryMode,
)
from aio_pika.message import Message
from ..tools import create_task, ensure_awaitable
from .base import Base, CallbackType, Proxy, T
log = logging.getLogger(__name__)
class MessageProcessingError(Exception):
pass
class NackMessage(MessageProcessingError):
def __init__(self, requeue: bool = False):
self.requeue = requeue
class RejectMessage(MessageProcessingError):
def __init__(self, requeue: bool = False):
self.requeue = requeue
class Worker:
__slots__ = (
"queue",
"consumer_tag",
"loop",
)
def __init__(
self, queue: AbstractQueue, consumer_tag: ConsumerTag,
loop: asyncio.AbstractEventLoop,
):
self.queue = queue
self.consumer_tag = consumer_tag
self.loop = loop
def close(self) -> Awaitable[None]:
""" Cancel subscription to the channel
:return: :class:`asyncio.Task`
"""
async def closer() -> None:
await self.queue.cancel(self.consumer_tag)
return create_task(closer)
class Master(Base):
__slots__ = (
"channel",
"loop",
"proxy",
"_requeue",
"_reject_on_redelivered",
)
DELIVERY_MODE = DeliveryMode.PERSISTENT
__doc__ = """
Implements Master/Worker pattern.
Usage example:
`worker.py` ::
master = Master(channel)
worker = await master.create_worker('test_worker', lambda x: print(x))
`master.py` ::
master = Master(channel)
await master.proxy.test_worker('foo')
"""
def __init__(
self,
channel: AbstractChannel,
requeue: bool = True,
reject_on_redelivered: bool = False,
):
""" Creates a new :class:`Master` instance.
:param channel: Initialized instance of :class:`aio_pika.Channel`
"""
self.channel: AbstractChannel = channel
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
self.proxy = Proxy(self.create_task)
self.channel.return_callbacks.add(self.on_message_returned)
self._requeue = requeue
self._reject_on_redelivered = reject_on_redelivered
@property
def exchange(self) -> AbstractExchange:
return self.channel.default_exchange
@staticmethod
def on_message_returned(
channel: Optional[AbstractChannel],
message: AbstractIncomingMessage,
) -> None:
log.warning(
"Message returned. Probably destination queue does not exists: %r",
message,
)
def serialize(self, data: Any) -> bytes:
""" Serialize data to the bytes.
Uses `pickle` by default.
You should overlap this method when you want to change serializer
:param data: Data which will be serialized
:returns: bytes
"""
return super().serialize(data)
def deserialize(self, data: bytes) -> Any:
""" Deserialize data from bytes.
Uses `pickle` by default.
You should overlap this method when you want to change serializer
:param data: Data which will be deserialized
:returns: :class:`Any`
"""
return super().deserialize(data)
@classmethod
async def execute(
cls, func: CallbackType, kwargs: Any,
) -> T:
kwargs = kwargs or {}
if not isinstance(kwargs, dict):
logging.error("Bad kwargs %r received for the %r", kwargs, func)
raise RejectMessage(requeue=False)
return await func(**kwargs)
async def on_message(
self, func: CallbackType,
message: AbstractIncomingMessage,
) -> None:
async with message.process(
requeue=self._requeue,
reject_on_redelivered=self._reject_on_redelivered,
ignore_processed=True,
):
try:
await self.execute(func, self.deserialize(message.body))
except RejectMessage as e:
await message.reject(requeue=e.requeue)
except NackMessage as e:
await message.nack(requeue=e.requeue)
async def create_queue(
self, queue_name: str, **kwargs: Any,
) -> AbstractQueue:
return await self.channel.declare_queue(queue_name, **kwargs)
async def create_worker(
self, queue_name: str,
func: CallbackType,
**kwargs: Any,
) -> Worker:
""" Creates a new :class:`Worker` instance. """
queue = await self.create_queue(queue_name, **kwargs)
consumer_tag = await queue.consume(
partial(self.on_message, ensure_awaitable(func)),
)
return Worker(queue, consumer_tag, self.loop)
async def create_task(
self, channel_name: str,
kwargs: Mapping[str, Any] = MappingProxyType({}),
**message_kwargs: Any,
) -> Optional[aiormq.abc.ConfirmationFrameType]:
""" Creates a new task for the worker """
message = Message(
body=self.serialize(kwargs),
content_type=self.CONTENT_TYPE,
delivery_mode=self.DELIVERY_MODE,
**message_kwargs,
)
return await self.exchange.publish(
message, channel_name, mandatory=True,
)
class JsonMaster(Master):
SERIALIZER = json
CONTENT_TYPE = "application/json"
def serialize(self, data: Any) -> bytes:
return self.SERIALIZER.dumps(data, ensure_ascii=False).encode()
class CompressedJsonMaster(Master):
SERIALIZER = json
CONTENT_TYPE = "application/json;compression=gzip"
COMPRESS_LEVEL = 6
def serialize(self, data: Any) -> bytes:
return gzip.compress(
self.SERIALIZER.dumps(data, ensure_ascii=False).encode(),
compresslevel=self.COMPRESS_LEVEL,
)
def deserialize(self, data: bytes) -> Any:
return self.SERIALIZER.loads(gzip.decompress(data))
python-aio-pika-9.5.5/aio_pika/patterns/rpc.py 0000664 0000000 0000000 00000037070 14761646711 0021341 0 ustar 00root root 0000000 0000000 import asyncio
import json
import logging
import time
import uuid
from enum import Enum
from functools import partial
from typing import Any, Callable, Dict, Optional, Tuple
from aiormq.abc import ExceptionType
from aio_pika.abc import (
AbstractChannel, AbstractExchange, AbstractIncomingMessage, AbstractQueue,
ConsumerTag, DeliveryMode,
)
from aio_pika.exceptions import MessageProcessError
from aio_pika.exchange import ExchangeType
from aio_pika.message import IncomingMessage, Message
from ..tools import ensure_awaitable
from .base import Base, CallbackType, Proxy, T
log = logging.getLogger(__name__)
class RPCException(RuntimeError):
pass
class RPCMessageType(str, Enum):
ERROR = "error"
RESULT = "result"
CALL = "call"
# This needed only for migration from 6.x to 7.x
# TODO: Remove this in 8.x release
RPCMessageTypes = RPCMessageType # noqa
class RPC(Base):
__slots__ = (
"channel",
"loop",
"proxy",
"futures",
"result_queue",
"result_consumer_tag",
"routes",
"queues",
"consumer_tags",
"dlx_exchange",
"rpc_exchange",
"host_exceptions",
)
DLX_NAME = "rpc.dlx"
DELIVERY_MODE = DeliveryMode.NOT_PERSISTENT
__doc__ = """
Remote Procedure Call helper.
Create an instance ::
rpc = await RPC.create(channel, host_exceptions=False)
Registering python function ::
# RPC instance passes only keyword arguments
def multiply(*, x, y):
return x * y
await rpc.register("multiply", multiply)
Call function through proxy ::
assert await rpc.proxy.multiply(x=2, y=3) == 6
Call function explicit ::
assert await rpc.call('multiply', dict(x=2, y=3)) == 6
Show exceptions on remote side ::
rpc = await RPC.create(channel, host_exceptions=True)
"""
result_queue: AbstractQueue
result_consumer_tag: ConsumerTag
dlx_exchange: AbstractExchange
rpc_exchange: Optional[AbstractExchange]
def __init__(
self, channel: AbstractChannel,
host_exceptions: bool = False,
) -> None:
self.channel = channel
self.loop = asyncio.get_event_loop()
self.proxy = Proxy(self.call)
self.futures: Dict[str, asyncio.Future] = {}
self.routes: Dict[str, Callable[..., Any]] = {}
self.queues: Dict[Callable[..., Any], AbstractQueue] = {}
self.consumer_tags: Dict[Callable[..., Any], ConsumerTag] = {}
self.host_exceptions = host_exceptions
def __remove_future(
self, correlation_id: str,
) -> Callable[[asyncio.Future], None]:
def do_remove(future: asyncio.Future) -> None:
log.debug("Remove done future %r", future)
self.futures.pop(correlation_id, None)
return do_remove
def create_future(self) -> Tuple[asyncio.Future, str]:
future = self.loop.create_future()
log.debug("Create future for RPC call")
correlation_id = str(uuid.uuid4())
self.futures[correlation_id] = future
future.add_done_callback(self.__remove_future(correlation_id))
return future, correlation_id
def _format_routing_key(self, method_name: str) -> str:
return (
f"{self.rpc_exchange.name}::{method_name}"
if self.rpc_exchange
else method_name
)
async def close(self) -> None:
if not hasattr(self, "result_queue"):
log.warning("RPC already closed")
return
log.debug("Cancelling listening %r", self.result_queue)
await self.result_queue.cancel(self.result_consumer_tag)
del self.result_consumer_tag
log.debug("Unbinding %r", self.result_queue)
await self.result_queue.unbind(
self.dlx_exchange, "",
arguments={"From": self.result_queue.name, "x-match": "any"},
)
log.debug("Cancelling undone futures %r", self.futures)
for future in self.futures.values():
if future.done():
continue
future.set_exception(asyncio.CancelledError)
log.debug("Deleting %r", self.result_queue)
await self.result_queue.delete()
del self.result_queue
del self.dlx_exchange
if self.rpc_exchange:
del self.rpc_exchange
async def initialize(
self, auto_delete: bool = True,
durable: bool = False, exchange: str = "", **kwargs: Any,
) -> None:
if hasattr(self, "result_queue"):
return
self.rpc_exchange = await self.channel.declare_exchange(
exchange,
type=ExchangeType.DIRECT,
auto_delete=True,
durable=durable,
) if exchange else None
self.result_queue = await self.channel.declare_queue(
None, auto_delete=auto_delete, durable=durable, **kwargs,
)
self.dlx_exchange = await self.channel.declare_exchange(
self.DLX_NAME, type=ExchangeType.HEADERS, auto_delete=True,
)
await self.result_queue.bind(
self.dlx_exchange,
"",
arguments={"From": self.result_queue.name, "x-match": "any"},
)
self.result_consumer_tag = await self.result_queue.consume(
self.on_result_message, exclusive=True, no_ack=True,
)
self.channel.close_callbacks.add(self.on_close)
self.channel.return_callbacks.add(self.on_message_returned)
def on_close(
self,
channel: Optional[AbstractChannel],
exc: Optional[ExceptionType] = None,
) -> None:
log.debug("Closing RPC futures because %r", exc)
for future in self.futures.values():
if future.done():
continue
future.set_exception(exc or Exception)
@classmethod
async def create(cls, channel: AbstractChannel, **kwargs: Any) -> "RPC":
""" Creates a new instance of :class:`aio_pika.patterns.RPC`.
You should use this method instead of :func:`__init__`,
because :func:`create` returns coroutine and makes async initialize
:param channel: initialized instance of :class:`aio_pika.Channel`
:returns: :class:`RPC`
"""
rpc = cls(channel)
await rpc.initialize(**kwargs)
return rpc
def on_message_returned(
self,
channel: Optional[AbstractChannel],
message: AbstractIncomingMessage,
) -> None:
if message.correlation_id is None:
log.warning(
"Message without correlation_id was returned: %r", message,
)
return
future = self.futures.pop(message.correlation_id, None)
if not future or future.done():
log.warning("Unknown message was returned: %r", message)
return
future.set_exception(
MessageProcessError("Message has been returned", message),
)
async def on_result_message(self, message: AbstractIncomingMessage) -> None:
if message.correlation_id is None:
log.warning(
"Message without correlation_id was received: %r", message,
)
return
future = self.futures.pop(message.correlation_id, None)
if future is None:
log.warning("Unknown message: %r", message)
return
try:
payload = await self.deserialize_message(message)
except Exception as e:
log.error("Failed to deserialize response on message: %r", message)
future.set_exception(e)
return
if message.type == RPCMessageType.RESULT.value:
future.set_result(payload)
elif message.type == RPCMessageType.ERROR.value:
if not isinstance(payload, Exception):
payload = RPCException("Wrapped non-exception object", payload)
future.set_exception(payload)
elif message.type == RPCMessageType.CALL.value:
future.set_exception(
asyncio.TimeoutError("Message timed-out", message),
)
else:
future.set_exception(
RuntimeError("Unknown message type %r" % message.type),
)
async def on_call_message(
self, method_name: str, message: IncomingMessage,
) -> None:
routing_key = self._format_routing_key(method_name)
if routing_key not in self.routes:
log.warning("Method %r not registered in %r", method_name, self)
return
try:
payload = await self.deserialize_message(message)
func = self.routes[routing_key]
result: Any = await self.execute(func, payload)
message_type = RPCMessageType.RESULT
except Exception as e:
result = self.serialize_exception(e)
message_type = RPCMessageType.ERROR
if self.host_exceptions is True:
log.exception(e)
if not message.reply_to:
log.info(
'RPC message without "reply_to" header %r call result '
"will be lost",
message,
)
await message.ack()
return
try:
result_message = await self.serialize_message(
payload=result,
message_type=message_type,
correlation_id=message.correlation_id,
delivery_mode=message.delivery_mode,
)
except asyncio.CancelledError:
raise
except Exception as e:
result_message = await self.serialize_message(
payload=e,
message_type=RPCMessageType.ERROR,
correlation_id=message.correlation_id,
delivery_mode=message.delivery_mode,
)
try:
await self.channel.default_exchange.publish(
result_message, message.reply_to, mandatory=False,
)
except Exception:
log.exception("Failed to send reply %r", result_message)
await message.reject(requeue=False)
return
if message_type == RPCMessageType.ERROR.value:
await message.ack()
return
await message.ack()
def serialize_exception(self, exception: Exception) -> Any:
""" Make python exception serializable """
return exception
async def execute(self, func: CallbackType, payload: Dict[str, Any]) -> T:
""" Executes rpc call. Might be overlapped. """
return await func(**payload)
async def deserialize_message(
self, message: AbstractIncomingMessage,
) -> Any:
return self.deserialize(message.body)
async def serialize_message(
self, payload: Any, message_type: RPCMessageType,
correlation_id: Optional[str], delivery_mode: DeliveryMode,
**kwargs: Any,
) -> Message:
return Message(
self.serialize(payload),
content_type=self.CONTENT_TYPE,
correlation_id=correlation_id,
delivery_mode=delivery_mode,
timestamp=time.time(),
type=message_type.value,
**kwargs,
)
async def call(
self,
method_name: str,
kwargs: Optional[Dict[str, Any]] = None,
*,
expiration: Optional[int] = None,
priority: int = 5,
delivery_mode: DeliveryMode = DELIVERY_MODE,
) -> Any:
""" Call remote method and awaiting result.
:param method_name: Name of method
:param kwargs: Methos kwargs
:param expiration:
If not `None` messages which staying in queue longer
will be returned and :class:`asyncio.TimeoutError` will be raised.
:param priority: Message priority
:param delivery_mode: Call message delivery mode
:raises asyncio.TimeoutError: when message expired
:raises CancelledError: when called :func:`RPC.cancel`
:raises RuntimeError: internal error
"""
future, correlation_id = self.create_future()
message = await self.serialize_message(
payload=kwargs or {},
message_type=RPCMessageType.CALL,
correlation_id=correlation_id,
delivery_mode=delivery_mode,
reply_to=self.result_queue.name,
headers={"From": self.result_queue.name},
priority=priority,
)
if expiration is not None:
message.expiration = expiration
routing_key = self._format_routing_key(method_name)
log.debug("Publishing calls for %s(%r)", routing_key, kwargs)
exchange = self.rpc_exchange or self.channel.default_exchange
await exchange.publish(
message, routing_key=routing_key,
mandatory=True,
)
log.debug("Waiting RPC result for %s(%r)", routing_key, kwargs)
return await future
async def register(
self, method_name: str, func: CallbackType, **kwargs: Any,
) -> Any:
""" Method creates a queue with name which equal of
`method_name` argument. Then subscribes this queue.
:param method_name: Method name
:param func:
target function. Function **MUST** accept only keyword arguments.
:param kwargs: arguments which will be passed to `queue_declare`
:raises RuntimeError:
Function already registered in this :class:`RPC` instance
or method_name already used.
"""
arguments = kwargs.pop("arguments", {})
arguments.update({"x-dead-letter-exchange": self.DLX_NAME})
func = ensure_awaitable(func)
kwargs["arguments"] = arguments
routing_key = self._format_routing_key(method_name)
queue = await self.channel.declare_queue(routing_key, **kwargs)
if self.rpc_exchange:
await queue.bind(
self.rpc_exchange,
routing_key,
)
if func in self.consumer_tags:
raise RuntimeError("Function already registered")
if routing_key in self.routes:
raise RuntimeError(
"Method name already used for %r" % self.routes[routing_key],
)
self.consumer_tags[func] = await queue.consume(
partial(self.on_call_message, method_name),
)
self.routes[routing_key] = func
self.queues[func] = queue
async def unregister(self, func: CallbackType) -> None:
""" Cancels subscription to the method-queue.
:param func: Function
"""
if func not in self.consumer_tags:
return
consumer_tag = self.consumer_tags.pop(func)
queue = self.queues.pop(func)
await queue.cancel(consumer_tag)
self.routes.pop(queue.name)
class JsonRPCError(RuntimeError):
pass
class JsonRPC(RPC):
SERIALIZER = json
CONTENT_TYPE = "application/json"
def serialize(self, data: Any) -> bytes:
return self.SERIALIZER.dumps(
data, ensure_ascii=False, default=repr,
).encode()
def serialize_exception(self, exception: Exception) -> Any:
return {
"error": {
"type": exception.__class__.__name__,
"message": repr(exception),
"args": exception.args,
},
}
async def deserialize_message(
self, message: AbstractIncomingMessage,
) -> Any:
payload = await super().deserialize_message(message)
if message.type == RPCMessageType.ERROR:
payload = JsonRPCError("RPC exception", payload)
return payload
__all__ = (
"JsonRPC",
"RPC",
"RPCException",
"RPCMessageType",
)
python-aio-pika-9.5.5/aio_pika/pool.py 0000664 0000000 0000000 00000010121 14761646711 0017652 0 ustar 00root root 0000000 0000000 import abc
import asyncio
from types import TracebackType
from typing import (
Any, AsyncContextManager, Awaitable, Callable, Generic, Optional, Set,
Tuple, Type, TypeVar,
)
from aio_pika.log import get_logger
from aio_pika.tools import create_task
log = get_logger(__name__)
class PoolInstance(abc.ABC):
@abc.abstractmethod
def close(self) -> Awaitable[None]:
raise NotImplementedError
T = TypeVar("T")
ConstructorType = Callable[
...,
Awaitable[PoolInstance],
]
class PoolInvalidStateError(RuntimeError):
pass
class Pool(Generic[T]):
__slots__ = (
"loop",
"__max_size",
"__items",
"__constructor",
"__created",
"__lock",
"__constructor_args",
"__item_set",
"__closed",
)
def __init__(
self,
constructor: ConstructorType,
*args: Any,
max_size: Optional[int] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
):
self.loop = loop or asyncio.get_event_loop()
self.__closed = False
self.__constructor: Callable[..., Awaitable[Any]] = constructor
self.__constructor_args: Tuple[Any, ...] = args or ()
self.__created: int = 0
self.__item_set: Set[PoolInstance] = set()
self.__items: asyncio.Queue = asyncio.Queue()
self.__lock: asyncio.Lock = asyncio.Lock()
self.__max_size: Optional[int] = max_size
@property
def is_closed(self) -> bool:
return self.__closed
def acquire(self) -> "PoolItemContextManager[T]":
if self.__closed:
raise PoolInvalidStateError("acquire operation on closed pool")
return PoolItemContextManager[T](self)
@property
def _has_released(self) -> bool:
return self.__items.qsize() > 0
@property
def _is_overflow(self) -> bool:
if self.__max_size:
return self.__created >= self.__max_size or self._has_released
return self._has_released
async def _create_item(self) -> T:
if self.__closed:
raise PoolInvalidStateError("create item operation on closed pool")
async with self.__lock:
if self._is_overflow:
return await self.__items.get()
log.debug("Creating a new instance of %r", self.__constructor)
item = await self.__constructor(*self.__constructor_args)
self.__created += 1
self.__item_set.add(item)
return item
async def _get(self) -> T:
if self.__closed:
raise PoolInvalidStateError("get operation on closed pool")
if self._is_overflow:
return await self.__items.get()
return await self._create_item()
def put(self, item: T) -> None:
if self.__closed:
raise PoolInvalidStateError("put operation on closed pool")
self.__items.put_nowait(item)
async def close(self) -> None:
async with self.__lock:
self.__closed = True
tasks = []
for item in self.__item_set:
tasks.append(create_task(item.close))
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
async def __aenter__(self) -> "Pool":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self.__closed:
return
await asyncio.ensure_future(self.close())
class PoolItemContextManager(Generic[T], AsyncContextManager):
__slots__ = "pool", "item"
def __init__(self, pool: Pool):
self.pool = pool
self.item: T
async def __aenter__(self) -> T:
# noinspection PyProtectedMember
self.item = await self.pool._get()
return self.item
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self.item is not None:
self.pool.put(self.item)
python-aio-pika-9.5.5/aio_pika/py.typed 0000664 0000000 0000000 00000000001 14761646711 0020022 0 ustar 00root root 0000000 0000000
python-aio-pika-9.5.5/aio_pika/queue.py 0000664 0000000 0000000 00000046124 14761646711 0020041 0 ustar 00root root 0000000 0000000 import asyncio
from asyncio import Future
from functools import partial
from types import TracebackType
from typing import (
Any, Awaitable, Callable, Literal, Optional, Type, cast, overload,
)
import aiormq
from aiormq.abc import DeliveredMessage
from exceptiongroup import ExceptionGroup
from pamqp.common import Arguments
from .abc import (
AbstractChannel, AbstractIncomingMessage, AbstractQueue,
AbstractQueueIterator, ConsumerTag, TimeoutType, get_exchange_name,
)
from .exceptions import QueueEmpty
from .exchange import ExchangeParamType
from .log import get_logger
from .message import IncomingMessage
from .tools import CallbackCollection, create_task, ensure_awaitable
log = get_logger(__name__)
async def consumer(
callback: Callable[[AbstractIncomingMessage], Any],
msg: DeliveredMessage, *,
no_ack: bool,
) -> Any:
message = IncomingMessage(msg, no_ack=no_ack)
return await create_task(callback, message)
class Queue(AbstractQueue):
""" AMQP queue abstraction """
__slots__ = (
"__weakref__",
"__get_lock",
"close_callbacks",
"channel",
"name",
"durable",
"exclusive",
"auto_delete",
"arguments",
"passive",
"declaration_result",
)
def __init__(
self,
channel: AbstractChannel,
name: Optional[str],
durable: bool,
exclusive: bool,
auto_delete: bool,
arguments: Arguments,
passive: bool = False,
):
self.__get_lock = asyncio.Lock()
self.close_callbacks = CallbackCollection(self)
self.channel = channel
self.name = name or ""
self.durable = durable
self.exclusive = exclusive
self.auto_delete = auto_delete
self.arguments = arguments
self.passive = passive
def __str__(self) -> str:
return f"{self.name}"
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__}({self}): "
f"auto_delete={self.auto_delete}, "
f"durable={self.durable}, "
f"exclusive={self.exclusive}, "
f"arguments={self.arguments!r}"
)
async def declare(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Queue.DeclareOk:
""" Declare queue.
:param timeout: execution timeout
:return: :class:`None`
"""
log.debug("Declaring queue: %r", self)
channel = await self.channel.get_underlay_channel()
self.declaration_result = await channel.queue_declare(
queue=self.name,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.arguments,
passive=self.passive,
timeout=timeout,
)
if self.declaration_result.queue is not None:
self.name = self.declaration_result.queue
else:
self.name = ""
return self.declaration_result
async def bind(
self,
exchange: ExchangeParamType,
routing_key: Optional[str] = None,
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Queue.BindOk:
""" A binding is a relationship between an exchange and a queue.
This can be simply read as: the queue is interested in messages
from this exchange.
Bindings can take an extra routing_key parameter. To avoid
the confusion with a basic_publish parameter we're going to
call it a binding key.
:param exchange: :class:`aio_pika.exchange.Exchange` instance
:param routing_key: routing key
:param arguments: additional arguments
:param timeout: execution timeout
:raises asyncio.TimeoutError:
when the binding timeout period has elapsed.
:return: :class:`None`
"""
if routing_key is None:
routing_key = self.name
log.debug(
"Binding queue %r: exchange=%r, routing_key=%r, arguments=%r",
self,
exchange,
routing_key,
arguments,
)
channel = await self.channel.get_underlay_channel()
return await channel.queue_bind(
self.name,
exchange=get_exchange_name(exchange),
routing_key=routing_key,
arguments=arguments,
timeout=timeout,
)
async def unbind(
self,
exchange: ExchangeParamType,
routing_key: Optional[str] = None,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Queue.UnbindOk:
""" Remove binding from exchange for this :class:`Queue` instance
:param exchange: :class:`aio_pika.exchange.Exchange` instance
:param routing_key: routing key
:param arguments: additional arguments
:param timeout: execution timeout
:raises asyncio.TimeoutError:
when the unbinding timeout period has elapsed.
:return: :class:`None`
"""
if routing_key is None:
routing_key = self.name
log.debug(
"Unbinding queue %r: exchange=%r, routing_key=%r, arguments=%r",
self,
exchange,
routing_key,
arguments,
)
channel = await self.channel.get_underlay_channel()
return await channel.queue_unbind(
queue=self.name,
exchange=get_exchange_name(exchange),
routing_key=routing_key,
arguments=arguments,
timeout=timeout,
)
async def consume(
self,
callback: Callable[[AbstractIncomingMessage], Awaitable[Any]],
no_ack: bool = False,
exclusive: bool = False,
arguments: Arguments = None,
consumer_tag: Optional[ConsumerTag] = None,
timeout: TimeoutType = None,
) -> ConsumerTag:
""" Start to consuming the :class:`Queue`.
:param timeout: :class:`asyncio.TimeoutError` will be raises when the
Future was not finished after this time.
:param callback: Consuming callback. Should be a coroutine function.
:param no_ack:
if :class:`True` you don't need to call
:func:`aio_pika.message.IncomingMessage.ack`
:param exclusive:
Makes this queue exclusive. Exclusive queues may only
be accessed by the current connection, and are deleted
when that connection closes. Passive declaration of an
exclusive queue by other connections are not allowed.
:param arguments: additional arguments
:param consumer_tag: optional consumer tag
:raises asyncio.TimeoutError:
when the consuming timeout period has elapsed.
:return str: consumer tag :class:`str`
"""
log.debug("Start to consuming queue: %r", self)
callback = ensure_awaitable(callback)
channel = await self.channel.get_underlay_channel()
consume_result = await channel.basic_consume(
queue=self.name,
consumer_callback=partial(
consumer,
callback,
no_ack=no_ack,
),
exclusive=exclusive,
no_ack=no_ack,
arguments=arguments,
consumer_tag=consumer_tag,
timeout=timeout,
)
# consumer_tag property is Optional[str] in practice this check
# should never take place, however, it protects against the case
# if the `None` comes from pamqp
if consume_result.consumer_tag is None:
raise RuntimeError("Consumer tag is None")
return consume_result.consumer_tag
async def cancel(
self, consumer_tag: ConsumerTag,
timeout: TimeoutType = None,
nowait: bool = False,
) -> aiormq.spec.Basic.CancelOk:
""" This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send any more
messages for that consumer. The client may receive an arbitrary number
of messages in between sending the cancel method and receiving the
cancel-ok reply. It may also be sent from the server to the client in
the event of the consumer being unexpectedly cancelled (i.e. cancelled
for any reason other than the server receiving the corresponding
basic.cancel from the client). This allows clients to be notified of
the loss of consumers due to events such as queue deletion.
:param consumer_tag:
consumer tag returned by :func:`~aio_pika.Queue.consume`
:param timeout: execution timeout
:param bool nowait: Do not expect a Basic.CancelOk response
:return: Basic.CancelOk when operation completed successfully
"""
channel = await self.channel.get_underlay_channel()
return await channel.basic_cancel(
consumer_tag=consumer_tag, nowait=nowait, timeout=timeout,
)
@overload
async def get(
self, *, no_ack: bool = False,
fail: Literal[True] = ..., timeout: TimeoutType = ...,
) -> IncomingMessage:
...
@overload
async def get(
self, *, no_ack: bool = False,
fail: Literal[False] = ..., timeout: TimeoutType = ...,
) -> Optional[IncomingMessage]:
...
async def get(
self, *, no_ack: bool = False,
fail: bool = True, timeout: TimeoutType = 5,
) -> Optional[IncomingMessage]:
""" Get message from the queue.
:param no_ack: if :class:`True` you don't need to call
:func:`aio_pika.message.IncomingMessage.ack`
:param timeout: execution timeout
:param fail: Should return :class:`None` instead of raise an
exception :class:`aio_pika.exceptions.QueueEmpty`.
:return: :class:`aio_pika.message.IncomingMessage`
"""
channel = await self.channel.get_underlay_channel()
msg: DeliveredMessage = await channel.basic_get(
self.name, no_ack=no_ack, timeout=timeout,
)
if isinstance(msg.delivery, aiormq.spec.Basic.GetEmpty):
if fail:
raise QueueEmpty
return None
return IncomingMessage(msg, no_ack=no_ack)
async def purge(
self, no_wait: bool = False, timeout: TimeoutType = None,
) -> aiormq.spec.Queue.PurgeOk:
""" Purge all messages from the queue.
:param no_wait: no wait response
:param timeout: execution timeout
:return: :class:`None`
"""
log.info("Purging queue: %r", self)
channel = await self.channel.get_underlay_channel()
return await channel.queue_purge(
self.name, nowait=no_wait, timeout=timeout,
)
async def delete(
self, *, if_unused: bool = True,
if_empty: bool = True, timeout: TimeoutType = None,
) -> aiormq.spec.Queue.DeleteOk:
""" Delete the queue.
:param if_unused: Perform delete only when unused
:param if_empty: Perform delete only when empty
:param timeout: execution timeout
:return: :class:`None`
"""
log.info("Deleting %r", self)
channel = await self.channel.get_underlay_channel()
return await channel.queue_delete(
self.name,
if_unused=if_unused,
if_empty=if_empty,
timeout=timeout,
)
def __aiter__(self) -> "AbstractQueueIterator":
return self.iterator()
def iterator(self, **kwargs: Any) -> "AbstractQueueIterator":
""" Returns an iterator for async for expression.
Full example:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect()
async with connection:
channel = await connection.channel()
queue = await channel.declare_queue('test')
async with queue.iterator() as q:
async for message in q:
print(message.body)
When your program runs with run_forever the iterator will be closed
in background. In this case the context processor for iterator might
be skipped and the queue might be used in the "async for"
expression directly.
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect()
async with connection:
channel = await connection.channel()
queue = await channel.declare_queue('test')
async for message in queue:
print(message.body)
:return: QueueIterator
"""
return QueueIterator(self, **kwargs)
class QueueIterator(AbstractQueueIterator):
DEFAULT_CLOSE_TIMEOUT = 5
@property
def consumer_tag(self) -> Optional[ConsumerTag]:
return getattr(self, "_consumer_tag", None)
async def close(self) -> None:
await self._on_close(self._amqp_queue, None)
if not self._closed.done():
self._closed.set_result(True)
async def _set_closed(
self,
_channel: Optional[AbstractQueue],
exc: Optional[BaseException]
) -> None:
if not self._closed.done():
self._closed.set_result(True)
async def _on_close(
self,
_channel: Optional[AbstractQueue],
_exc: Optional[BaseException]
) -> None:
log.debug("Cancelling queue iterator %r", self)
if not hasattr(self, "_consumer_tag"):
log.debug("Queue iterator %r already cancelled", self)
return
if self._amqp_queue.channel.is_closed:
log.debug("Queue iterator %r channel closed", self)
return
log.debug("Basic.cancel for %r", self.consumer_tag)
consumer_tag = self._consumer_tag
del self._consumer_tag
self._amqp_queue.close_callbacks.discard(self._on_close)
await self._amqp_queue.cancel(consumer_tag)
log.debug("Queue iterator %r closed", self)
if self._queue.empty():
return
exceptions = []
# Reject all messages that have been received and in the buffer/cache.
while not self._queue.empty():
msg = self._queue.get_nowait()
if self._amqp_queue.channel.is_closed:
log.warning(
"Message %r lost when queue iterator %r channel closed",
msg,
self,
)
elif self._consume_kwargs.get("no_ack", False):
log.warning(
"Message %r lost for consumer with no_ack %r",
msg,
self,
)
else:
try:
await msg.nack(requeue=True, multiple=False)
except Exception as e:
log.warning(
"Failed to nack message %r",
msg,
exc_info=e,
)
exceptions.append(e)
if exceptions:
raise ExceptionGroup(
"Unable to nack all messages",
exceptions,
)
def __str__(self) -> str:
return f"queue[{self._amqp_queue}](...)"
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__}: "
f"queue={self._amqp_queue.name!r} "
f"ctag={self.consumer_tag!r}>"
)
def __init__(self, queue: Queue, **kwargs: Any):
self._consumer_tag: ConsumerTag
self._amqp_queue: Queue = queue
self._queue = asyncio.Queue()
self._closed = asyncio.get_running_loop().create_future()
self._message_or_closed = asyncio.Event()
self._timeout_event = asyncio.Event()
self._consume_kwargs = kwargs
cast(
asyncio.Future, self._amqp_queue.channel.closed()
).add_done_callback(self._propagate_closed)
self._closed.add_done_callback(self._propagate_closed)
self._amqp_queue.close_callbacks.add(self._on_close, weak=True)
self._amqp_queue.close_callbacks.add(
self._set_closed,
weak=True
)
def _propagate_closed(self, _: Future) -> None:
self._message_or_closed.set()
async def on_message(self, message: AbstractIncomingMessage) -> None:
await self._queue.put(message)
self._message_or_closed.set()
async def consume(self) -> None:
self._consumer_tag = await self._amqp_queue.consume(
self.on_message, **self._consume_kwargs,
)
def __aiter__(self) -> "AbstractQueueIterator":
return self
async def __aenter__(self) -> "AbstractQueueIterator":
if not hasattr(self, "_consumer_tag"):
await self.consume()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if hasattr(self, "__closing"):
try:
await self.__closing
finally:
del self.__closing
else:
await self.close()
async def __anext__(self) -> AbstractIncomingMessage:
if self._closed.done():
raise StopAsyncIteration
if not hasattr(self, "_consumer_tag"):
await self.consume()
timeout: Optional[float] = self._consume_kwargs.get("timeout")
if not self._message_or_closed.is_set():
coroutine: Awaitable[Any] = self._message_or_closed.wait()
if timeout is not None and timeout > 0:
coroutine = asyncio.wait_for(coroutine, timeout=timeout)
try:
await coroutine
except (asyncio.TimeoutError, asyncio.CancelledError):
if timeout is not None:
timeout = (
timeout
if timeout > 0
else self.DEFAULT_CLOSE_TIMEOUT
)
log.info(
"%r closing with timeout %d seconds",
self, timeout,
)
task = asyncio.create_task(self.close())
coroutine = task
if timeout is not None:
coroutine = asyncio.wait_for(
asyncio.shield(coroutine),
timeout=timeout,
)
try:
await coroutine
except asyncio.TimeoutError:
self.__closing = task
raise
if self._queue.empty():
raise StopAsyncIteration
msg = self._queue.get_nowait()
if (
self._queue.empty()
and not self._amqp_queue.channel.is_closed
and not self._closed.done()
):
self._message_or_closed.clear()
return msg
__all__ = ("Queue", "QueueIterator", "ConsumerTag")
python-aio-pika-9.5.5/aio_pika/robust_channel.py 0000664 0000000 0000000 00000021156 14761646711 0021721 0 ustar 00root root 0000000 0000000 import asyncio
import warnings
from collections import defaultdict
from itertools import chain
from typing import Any, DefaultDict, Dict, Optional, Set, Type, Union
from warnings import warn
import aiormq
from .abc import (
AbstractConnection, AbstractRobustChannel, AbstractRobustExchange,
AbstractRobustQueue, TimeoutType,
)
from .channel import Channel
from .exchange import Exchange, ExchangeType
from .log import get_logger
from .queue import Queue
from .robust_exchange import RobustExchange
from .robust_queue import RobustQueue
from .tools import CallbackCollection
log = get_logger(__name__)
class RobustChannel(Channel, AbstractRobustChannel): # type: ignore
""" Channel abstraction """
QUEUE_CLASS: Type[Queue] = RobustQueue
EXCHANGE_CLASS: Type[Exchange] = RobustExchange
RESTORE_RETRY_DELAY: int = 2
_exchanges: DefaultDict[str, Set[AbstractRobustExchange]]
_queues: DefaultDict[str, Set[RobustQueue]]
default_exchange: RobustExchange
def __init__(
self,
connection: AbstractConnection,
channel_number: Optional[int] = None,
publisher_confirms: bool = True,
on_return_raises: bool = False,
):
"""
:param connection: :class:`aio_pika.adapter.AsyncioConnection` instance
:param loop:
Event loop (:func:`asyncio.get_event_loop()` when :class:`None`)
:param future_store: :class:`aio_pika.common.FutureStore` instance
:param publisher_confirms:
False if you don't need delivery confirmations
(in pursuit of performance)
"""
super().__init__(
connection=connection,
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
)
self._exchanges = defaultdict(set)
self._queues = defaultdict(set)
self._prefetch_count: int = 0
self._prefetch_size: int = 0
self._global_qos: bool = False
self.reopen_callbacks = CallbackCollection(self)
self.__restore_lock = asyncio.Lock()
self.__restored = asyncio.Event()
self.close_callbacks.remove(self._set_closed_callback)
async def ready(self) -> None:
await self._connection.ready()
await self.__restored.wait()
async def get_underlay_channel(self) -> aiormq.abc.AbstractChannel:
await self._connection.ready()
return await super().get_underlay_channel()
async def restore(self, channel: Any = None) -> None:
if channel is not None:
warnings.warn(
"Channel argument will be ignored because you "
"don't need to pass this anymore.",
DeprecationWarning,
)
async with self.__restore_lock:
if self.__restored.is_set():
return
await self.reopen()
self.__restored.set()
async def _on_close(
self,
closing: asyncio.Future
) -> Optional[BaseException]:
exc = await super()._on_close(closing)
if isinstance(exc, asyncio.CancelledError):
# This happens only if the channel is forced to close from the
# outside, for example, if the connection is closed.
# Of course, here you need to exit from this function
# as soon as possible and to avoid a recovery attempt.
self.__restored.clear()
if not self._closed.done():
self._closed.set_result(True)
return exc
in_restore_state = not self.__restored.is_set()
self.__restored.clear()
if self._closed.done() or in_restore_state:
return exc
await self.restore()
return exc
async def close(
self,
exc: Optional[aiormq.abc.ExceptionType] = None,
) -> None:
# Avoid recovery when channel is explicitely closed using this method
self.__restored.clear()
await super().close(exc)
async def reopen(self) -> None:
await super().reopen()
await self.reopen_callbacks()
async def _on_open(self) -> None:
if not hasattr(self, "default_exchange"):
await super()._on_open()
exchanges = tuple(chain(*self._exchanges.values()))
queues = tuple(chain(*self._queues.values()))
channel = await self.get_underlay_channel()
await channel.basic_qos(
prefetch_count=self._prefetch_count,
prefetch_size=self._prefetch_size,
)
for exchange in exchanges:
await exchange.restore()
for queue in queues:
await queue.restore()
if hasattr(self, "default_exchange"):
self.default_exchange.channel = self
self.__restored.set()
async def set_qos(
self,
prefetch_count: int = 0,
prefetch_size: int = 0,
global_: bool = False,
timeout: TimeoutType = None,
all_channels: Optional[bool] = None,
) -> aiormq.spec.Basic.QosOk:
if all_channels is not None:
warn('Use "global_" instead of "all_channels"', DeprecationWarning)
global_ = all_channels
await self.ready()
self._prefetch_count = prefetch_count
self._prefetch_size = prefetch_size
self._global_qos = global_
return await super().set_qos(
prefetch_count=prefetch_count,
prefetch_size=prefetch_size,
global_=global_,
timeout=timeout,
)
async def declare_exchange(
self,
name: str,
type: Union[ExchangeType, str] = ExchangeType.DIRECT,
durable: bool = False,
auto_delete: bool = False,
internal: bool = False,
passive: bool = False,
arguments: Optional[Dict[str, Any]] = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> AbstractRobustExchange:
"""
:param robust: If True, the exchange will be re-declared during
reconnection.
Set to False for temporary exchanges that should not be restored.
"""
await self.ready()
exchange = (
await super().declare_exchange(
name=name,
type=type,
durable=durable,
auto_delete=auto_delete,
internal=internal,
passive=passive,
arguments=arguments,
timeout=timeout,
)
)
if not internal and robust:
# noinspection PyTypeChecker
self._exchanges[name].add(exchange) # type: ignore
return exchange # type: ignore
async def exchange_delete(
self,
exchange_name: str,
timeout: TimeoutType = None,
if_unused: bool = False,
nowait: bool = False,
) -> aiormq.spec.Exchange.DeleteOk:
await self.ready()
result = await super().exchange_delete(
exchange_name=exchange_name,
timeout=timeout,
if_unused=if_unused,
nowait=nowait,
)
self._exchanges.pop(exchange_name, None)
return result
async def declare_queue(
self,
name: Optional[str] = None,
*,
durable: bool = False,
exclusive: bool = False,
passive: bool = False,
auto_delete: bool = False,
arguments: Optional[Dict[str, Any]] = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> AbstractRobustQueue:
"""
:param robust: If True, the queue will be re-declared during
reconnection.
Set to False for temporary queues that should not be restored.
"""
await self.ready()
queue: RobustQueue = await super().declare_queue( # type: ignore
name=name,
durable=durable,
exclusive=exclusive,
passive=passive,
auto_delete=auto_delete,
arguments=arguments,
timeout=timeout,
)
if robust:
self._queues[queue.name].add(queue)
return queue
async def queue_delete(
self,
queue_name: str,
timeout: TimeoutType = None,
if_unused: bool = False,
if_empty: bool = False,
nowait: bool = False,
) -> aiormq.spec.Queue.DeleteOk:
await self.ready()
result = await super().queue_delete(
queue_name=queue_name,
timeout=timeout,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait,
)
self._queues.pop(queue_name, None)
return result
__all__ = ("RobustChannel",)
python-aio-pika-9.5.5/aio_pika/robust_connection.py 0000664 0000000 0000000 00000024413 14761646711 0022447 0 ustar 00root root 0000000 0000000 import asyncio
from ssl import SSLContext
from typing import Any, Optional, Tuple, Type, Union
from weakref import WeakSet
import aiormq.abc
from aiormq.connection import parse_bool, parse_timeout
from pamqp.common import FieldTable
from yarl import URL
from .abc import (
AbstractRobustChannel, AbstractRobustConnection, ConnectionParameter,
SSLOptions, TimeoutType,
)
from .connection import Connection, make_url
from .exceptions import CONNECTION_EXCEPTIONS
from .log import get_logger
from .robust_channel import RobustChannel
from .tools import CallbackCollection
log = get_logger(__name__)
class RobustConnection(Connection, AbstractRobustConnection):
"""Robust connection"""
CHANNEL_REOPEN_PAUSE = 1
CHANNEL_CLASS: Type[RobustChannel] = RobustChannel
PARAMETERS: Tuple[ConnectionParameter, ...] = Connection.PARAMETERS + (
ConnectionParameter(
name="reconnect_interval",
parser=parse_timeout, default="5",
),
ConnectionParameter(
name="fail_fast",
parser=parse_bool, default="1",
),
)
def __init__(
self,
url: URL,
loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any,
):
super().__init__(url=url, loop=loop, **kwargs)
self.reconnect_interval = self.kwargs.pop("reconnect_interval")
self.connection_attempt: int = 0
self.__fail_fast_future = self.loop.create_future()
self.fail_fast = self.kwargs.pop("fail_fast", True)
if not self.fail_fast:
self.__fail_fast_future.set_result(None)
self.__channels: WeakSet[AbstractRobustChannel] = WeakSet()
self.__connection_close_event = asyncio.Event()
self.__connect_timeout: Optional[TimeoutType] = None
self.__reconnection_task: Optional[asyncio.Task] = None
self._reconnect_lock = asyncio.Lock()
self.reconnect_callbacks = CallbackCollection(self)
self.__connection_close_event.set()
@property
def reconnecting(self) -> bool:
return self._reconnect_lock.locked()
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__}: "{self}" '
f"{len(self.__channels)} channels>"
)
async def _on_connection_close(self, closing: asyncio.Future) -> None:
await super()._on_connection_close(closing)
if self._close_called or self.is_closed:
return
log.info(
"Connection to %s closed. Reconnecting after %r seconds.",
self, self.reconnect_interval,
)
self.__connection_close_event.set()
async def _on_connected(self) -> None:
await super()._on_connected()
transport = self.transport
if transport is None:
raise RuntimeError("No active transport for connection %r", self)
try:
# Make a copy of the channels to iterate on, to guard from
# concurrent updates to the set.
for channel in tuple(self.__channels):
try:
await channel.restore()
except Exception:
log.exception("Failed to reopen channel")
raise
except Exception as e:
await self.close_callbacks(e)
await asyncio.gather(
transport.connection.close(e),
return_exceptions=True,
)
raise
if self.connection_attempt:
await self.reconnect_callbacks()
self.connection_attempt += 1
self.__connection_close_event.clear()
async def __connection_factory(self) -> None:
log.debug("Starting connection factory for %r", self)
while not self.is_closed and not self._close_called:
log.debug("Waiting for connection close event for %r", self)
await self.__connection_close_event.wait()
if self.is_closed or self._close_called:
return
# noinspection PyBroadException
try:
self.transport = None
self.connected.clear()
log.debug("Connection attempt for %r", self)
await Connection.connect(self, self.__connect_timeout)
if not self.__fail_fast_future.done():
self.__fail_fast_future.set_result(None)
log.debug("Connection made on %r", self)
except CONNECTION_EXCEPTIONS as e:
if not self.__fail_fast_future.done():
self.__fail_fast_future.set_exception(e)
return
log.warning(
'Connection attempt to "%s" failed: %s. '
"Reconnecting after %r seconds.",
self, e, self.reconnect_interval,
)
except Exception:
log.exception(
"Reconnect attempt failed %s. "
"Retrying after %r seconds.",
self, self.reconnect_interval,
)
await asyncio.sleep(self.reconnect_interval)
async def connect(self, timeout: TimeoutType = None) -> None:
self.__connect_timeout = timeout
if self.is_closed:
raise RuntimeError(f"{self!r} connection closed")
if self.reconnecting:
raise RuntimeError(
(
"Connect method called but connection "
f"{self!r} is reconnecting right now."
),
self,
)
if not self.__reconnection_task:
self.__reconnection_task = self.loop.create_task(
self.__connection_factory(),
)
await self.__fail_fast_future
await self.connected.wait()
async def reconnect(self) -> None:
if self.transport:
await self.transport.connection.close()
await self.connect()
await self.reconnect_callbacks()
def channel(
self,
channel_number: Optional[int] = None,
publisher_confirms: bool = True,
on_return_raises: bool = False,
) -> AbstractRobustChannel:
channel: AbstractRobustChannel = super().channel(
channel_number=channel_number,
publisher_confirms=publisher_confirms,
on_return_raises=on_return_raises,
) # type: ignore
self.__channels.add(channel)
return channel
async def close(
self, exc: Optional[aiormq.abc.ExceptionType] = asyncio.CancelledError,
) -> None:
if self.__reconnection_task is not None:
self.__reconnection_task.cancel()
await asyncio.gather(
self.__reconnection_task, return_exceptions=True,
)
self.__reconnection_task = None
return await super().close(exc)
async def connect_robust(
url: Union[str, URL, None] = None,
*,
host: str = "localhost",
port: int = 5672,
login: str = "guest",
password: str = "guest",
virtualhost: str = "/",
ssl: bool = False,
loop: Optional[asyncio.AbstractEventLoop] = None,
ssl_options: Optional[SSLOptions] = None,
ssl_context: Optional[SSLContext] = None,
timeout: TimeoutType = None,
client_properties: Optional[FieldTable] = None,
connection_class: Type[AbstractRobustConnection] = RobustConnection,
**kwargs: Any,
) -> AbstractRobustConnection:
"""Make connection to the broker.
Example:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect(
"amqp://guest:guest@127.0.0.1/"
)
Connect to localhost with default credentials:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect()
.. note::
The available keys for ssl_options parameter are:
* cert_reqs
* certfile
* keyfile
* ssl_version
For an information on what the ssl_options can be set to reference the
`official Python documentation`_ .
Set connection name for RabbitMQ admin panel:
.. code-block:: python
# As URL parameter method
read_connection = await connect(
"amqp://guest:guest@localhost/?name=Read%20connection"
)
# keyword method
write_connection = await connect(
client_properties={
'connection_name': 'Write connection'
}
)
.. note:
``client_properties`` argument requires ``aiormq>=2.9``
URL string might contain ssl parameters e.g.
`amqps://user:pass@host//?ca_certs=ca.pem&certfile=crt.pem&keyfile=key.pem`
:param client_properties: add custom client capability.
:param url:
RFC3986_ formatted broker address. When :class:`None`
will be used keyword arguments.
:param host: hostname of the broker
:param port: broker port 5672 by default
:param login: username string. `'guest'` by default.
:param password: password string. `'guest'` by default.
:param virtualhost: virtualhost parameter. `'/'` by default
:param ssl: use SSL for connection. Should be used with addition kwargs.
:param ssl_options: A dict of values for the SSL connection.
:param timeout: connection timeout in seconds
:param loop:
Event loop (:func:`asyncio.get_event_loop()` when :class:`None`)
:param ssl_context: ssl.SSLContext instance
:param connection_class: Factory of a new connection
:param kwargs: addition parameters which will be passed to the connection.
:return: :class:`aio_pika.connection.Connection`
.. _RFC3986: https://goo.gl/MzgYAs
.. _official Python documentation: https://goo.gl/pty9xA
"""
connection: AbstractRobustConnection = connection_class(
make_url(
url,
host=host,
port=port,
login=login,
password=password,
virtualhost=virtualhost,
ssl=ssl,
ssl_options=ssl_options,
client_properties=client_properties,
**kwargs,
),
loop=loop, ssl_context=ssl_context, **kwargs,
)
await connection.connect(timeout=timeout)
return connection
__all__ = (
"RobustConnection",
"connect_robust",
)
python-aio-pika-9.5.5/aio_pika/robust_exchange.py 0000664 0000000 0000000 00000005511 14761646711 0022070 0 ustar 00root root 0000000 0000000 import asyncio
import warnings
from typing import Any, Dict, Union
import aiormq
from pamqp.common import Arguments
from .abc import (
AbstractChannel, AbstractExchange, AbstractRobustExchange,
ExchangeParamType, TimeoutType,
)
from .exchange import Exchange, ExchangeType
from .log import get_logger
log = get_logger(__name__)
class RobustExchange(Exchange, AbstractRobustExchange):
""" Exchange abstraction """
_bindings: Dict[Union[AbstractExchange, str], Dict[str, Any]]
def __init__(
self,
channel: AbstractChannel,
name: str,
type: Union[ExchangeType, str] = ExchangeType.DIRECT,
*,
auto_delete: bool = False,
durable: bool = False,
internal: bool = False,
passive: bool = False,
arguments: Arguments = None,
):
super().__init__(
channel=channel,
name=name,
type=type,
auto_delete=auto_delete,
durable=durable,
internal=internal,
passive=passive,
arguments=arguments,
)
self._bindings = {}
self.__restore_lock = asyncio.Lock()
async def restore(self, channel: Any = None) -> None:
if channel is not None:
warnings.warn(
"Channel argument will be ignored because you "
"don't need to pass this anymore.",
DeprecationWarning,
)
async with self.__restore_lock:
try:
# special case for default exchange
if self.name == "":
return
await self.declare()
for exchange, kwargs in tuple(self._bindings.items()):
await self.bind(exchange, **kwargs)
except Exception:
raise
async def bind(
self,
exchange: ExchangeParamType,
routing_key: str = "",
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> aiormq.spec.Exchange.BindOk:
result = await super().bind(
exchange,
routing_key=routing_key,
arguments=arguments,
timeout=timeout,
)
if robust:
self._bindings[exchange] = dict(
routing_key=routing_key,
arguments=arguments,
)
return result
async def unbind(
self,
exchange: ExchangeParamType,
routing_key: str = "",
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Exchange.UnbindOk:
result = await super().unbind(
exchange, routing_key, arguments=arguments, timeout=timeout,
)
self._bindings.pop(exchange, None)
return result
__all__ = ("RobustExchange",)
python-aio-pika-9.5.5/aio_pika/robust_queue.py 0000664 0000000 0000000 00000011357 14761646711 0021437 0 ustar 00root root 0000000 0000000 import uuid
import warnings
from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Union
import aiormq
from aiormq import ChannelInvalidStateError
from pamqp.common import Arguments
from .abc import (
AbstractChannel, AbstractExchange, AbstractIncomingMessage,
AbstractQueueIterator, AbstractRobustQueue, ConsumerTag, TimeoutType,
)
from .exchange import ExchangeParamType
from .log import get_logger
from .queue import Queue, QueueIterator
log = get_logger(__name__)
class RobustQueue(Queue, AbstractRobustQueue):
__slots__ = ("_consumers", "_bindings")
_consumers: Dict[ConsumerTag, Dict[str, Any]]
_bindings: Dict[Tuple[Union[AbstractExchange, str], str], Dict[str, Any]]
def __init__(
self,
channel: AbstractChannel,
name: Optional[str],
durable: bool = False,
exclusive: bool = False,
auto_delete: bool = False,
arguments: Arguments = None,
passive: bool = False,
):
super().__init__(
channel=channel,
name=name or f"amq_{uuid.uuid4().hex}",
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments,
passive=passive,
)
self._consumers = {}
self._bindings = {}
async def restore(self, channel: Any = None) -> None:
if channel is not None:
warnings.warn(
"Channel argument will be ignored because you "
"don't need to pass this anymore.",
DeprecationWarning,
)
await self.declare()
bindings = tuple(self._bindings.items())
consumers = tuple(self._consumers.items())
for (exchange, routing_key), kwargs in bindings:
await self.bind(exchange, routing_key, **kwargs)
for consumer_tag, kwargs in consumers:
await self.consume(consumer_tag=consumer_tag, **kwargs)
async def bind(
self,
exchange: ExchangeParamType,
routing_key: Optional[str] = None,
*,
arguments: Arguments = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> aiormq.spec.Queue.BindOk:
if routing_key is None:
routing_key = self.name
result = await super().bind(
exchange=exchange, routing_key=routing_key,
arguments=arguments, timeout=timeout,
)
if robust:
self._bindings[(exchange, routing_key)] = dict(
arguments=arguments,
)
return result
async def unbind(
self,
exchange: ExchangeParamType,
routing_key: Optional[str] = None,
arguments: Arguments = None,
timeout: TimeoutType = None,
) -> aiormq.spec.Queue.UnbindOk:
if routing_key is None:
routing_key = self.name
result = await super().unbind(
exchange, routing_key, arguments, timeout,
)
self._bindings.pop((exchange, routing_key), None)
return result
async def consume(
self,
callback: Callable[[AbstractIncomingMessage], Awaitable[Any]],
no_ack: bool = False,
exclusive: bool = False,
arguments: Arguments = None,
consumer_tag: Optional[ConsumerTag] = None,
timeout: TimeoutType = None,
robust: bool = True,
) -> ConsumerTag:
consumer_tag = await super().consume(
consumer_tag=consumer_tag,
timeout=timeout,
callback=callback,
no_ack=no_ack,
exclusive=exclusive,
arguments=arguments,
)
if robust:
self._consumers[consumer_tag] = dict(
callback=callback,
no_ack=no_ack,
exclusive=exclusive,
arguments=arguments,
)
return consumer_tag
async def cancel(
self,
consumer_tag: ConsumerTag,
timeout: TimeoutType = None,
nowait: bool = False,
) -> aiormq.spec.Basic.CancelOk:
result = await super().cancel(consumer_tag, timeout, nowait)
self._consumers.pop(consumer_tag, None)
return result
def iterator(self, **kwargs: Any) -> AbstractQueueIterator:
return RobustQueueIterator(self, **kwargs)
class RobustQueueIterator(QueueIterator):
def __init__(self, queue: Queue, **kwargs: Any):
super().__init__(queue, **kwargs)
self._amqp_queue.close_callbacks.discard(self._set_closed)
async def consume(self) -> None:
while True:
try:
return await super().consume()
except ChannelInvalidStateError:
await self._amqp_queue.channel.get_underlay_channel()
__all__ = ("RobustQueue",)
python-aio-pika-9.5.5/aio_pika/tools.py 0000664 0000000 0000000 00000024331 14761646711 0020051 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import asyncio
import inspect
import warnings
from functools import wraps
from itertools import chain
from threading import Lock
from typing import (
AbstractSet, Any, Awaitable, Callable, Coroutine, Generator, Iterator,
List,
MutableSet, Optional, TypeVar, Union, Generic,
)
from weakref import ReferenceType, WeakSet, ref
from aio_pika.log import get_logger
try:
from typing import ParamSpec, Protocol
except ImportError:
from typing_extensions import ParamSpec, Protocol # type: ignore
log = get_logger(__name__)
T = TypeVar("T")
def iscoroutinepartial(fn: Callable[..., Any]) -> bool:
"""
Use Python 3.8's inspect.iscoroutinefunction() instead
"""
warnings.warn(
"Use inspect.iscoroutinefunction() instead.", DeprecationWarning
)
return asyncio.iscoroutinefunction(fn)
def _task_done(future: asyncio.Future) -> None:
try:
exc = future.exception()
if exc is not None:
raise exc
except asyncio.CancelledError:
pass
def create_task(
func: Callable[..., Union[Coroutine[Any, Any, T], Awaitable[T]]],
*args: Any,
loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any,
) -> Awaitable[T]:
loop = loop or asyncio.get_event_loop()
if inspect.iscoroutinefunction(func):
task = loop.create_task(func(*args, **kwargs))
task.add_done_callback(_task_done)
return task
def run(future: asyncio.Future) -> Optional[asyncio.Future]:
if future.done():
return None
try:
future.set_result(func(*args, **kwargs))
except Exception as e:
future.set_exception(e)
return future
future = loop.create_future()
future.add_done_callback(_task_done)
loop.call_soon(run, future)
return future
_Sender = TypeVar("_Sender", contravariant=True)
_Params = ParamSpec("_Params")
_Return = TypeVar("_Return", covariant=True)
class CallbackType(Protocol[_Sender, _Params, _Return]):
def __call__(
self,
__sender: Optional[_Sender],
/,
*args: _Params.args,
**kwargs: _Params.kwargs,
) -> Union[_Return, Awaitable[_Return]]:
...
class StubAwaitable:
__slots__ = ()
def __await__(self) -> Generator[Any, Any, None]:
yield
STUB_AWAITABLE = StubAwaitable()
class CallbackCollection(
MutableSet[
Union[
CallbackType[_Sender, _Params, Any],
"CallbackCollection[Any, _Params]",
],
],
Generic[_Sender, _Params],
):
__slots__ = (
"__weakref__",
"__sender",
"__callbacks",
"__weak_callbacks",
"__lock",
)
def __init__(self, sender: Union[_Sender, ReferenceType[_Sender]]):
self.__sender: ReferenceType
if isinstance(sender, ReferenceType):
self.__sender = sender
else:
self.__sender = ref(sender)
self.__callbacks: CallbackSetType = set()
self.__weak_callbacks: MutableSet[
Union[
CallbackType[_Sender, _Params, Any],
CallbackCollection[Any, _Params],
],
] = WeakSet()
self.__lock: Lock = Lock()
def add(
self,
callback: Union[
CallbackType[_Sender, _Params, Any],
CallbackCollection[Any, _Params],
],
weak: bool = False
) -> None:
if self.is_frozen:
raise RuntimeError("Collection frozen")
if not callable(callback):
raise ValueError("Callback is not callable")
with self.__lock:
if weak or isinstance(callback, CallbackCollection):
self.__weak_callbacks.add(callback)
else:
self.__callbacks.add(callback) # type: ignore
def remove(
self,
callback: Union[
CallbackType[_Sender, _Params, Any],
CallbackCollection[Any, _Params],
],
) -> None:
if self.is_frozen:
raise RuntimeError("Collection frozen")
with self.__lock:
try:
self.__callbacks.remove(callback) # type: ignore
except KeyError:
self.__weak_callbacks.remove(callback)
def discard(
self,
callback: Union[
CallbackType[_Sender, _Params, Any],
CallbackCollection[Any, _Params],
],
) -> None:
if self.is_frozen:
raise RuntimeError("Collection frozen")
with self.__lock:
if callback in self.__callbacks:
self.__callbacks.remove(callback) # type: ignore
elif callback in self.__weak_callbacks:
self.__weak_callbacks.remove(callback)
def clear(self) -> None:
if self.is_frozen:
raise RuntimeError("Collection frozen")
with self.__lock:
self.__callbacks.clear() # type: ignore
self.__weak_callbacks.clear()
@property
def is_frozen(self) -> bool:
return isinstance(self.__callbacks, frozenset)
def freeze(self) -> None:
if self.is_frozen:
raise RuntimeError("Collection already frozen")
with self.__lock:
self.__callbacks = frozenset(self.__callbacks)
self.__weak_callbacks = WeakSet(self.__weak_callbacks)
def unfreeze(self) -> None:
if not self.is_frozen:
raise RuntimeError("Collection is not frozen")
with self.__lock:
self.__callbacks = set(self.__callbacks)
self.__weak_callbacks = WeakSet(self.__weak_callbacks)
def __contains__(self, x: object) -> bool:
return x in self.__callbacks or x in self.__weak_callbacks
def __len__(self) -> int:
return len(self.__callbacks) + len(self.__weak_callbacks)
def __iter__(self) -> Iterator[
Union[
CallbackType[_Sender, _Params, Any],
CallbackCollection[_Sender, _Params],
],
]:
return iter(chain(self.__callbacks, self.__weak_callbacks))
def __bool__(self) -> bool:
return bool(self.__callbacks) or bool(self.__weak_callbacks)
def __copy__(self) -> CallbackCollection[_Sender, _Params]:
instance = self.__class__(self.__sender)
with self.__lock:
for cb in self.__callbacks:
instance.add(cb, weak=False)
for cb in self.__weak_callbacks:
instance.add(cb, weak=True)
if self.is_frozen:
instance.freeze()
return instance
def __call__(
self,
*args: _Params.args,
**kwargs: _Params.kwargs,
) -> Awaitable[Any]:
futures: List[asyncio.Future] = []
with self.__lock:
sender = self.__sender()
for cb in self:
try:
if isinstance(cb, CallbackCollection):
result = cb(*args, **kwargs)
else:
result = cb(sender, *args, **kwargs)
if inspect.isawaitable(result):
futures.append(asyncio.ensure_future(result))
except Exception:
log.exception("Callback %r error", cb)
if not futures:
return STUB_AWAITABLE
return asyncio.gather(*futures, return_exceptions=True)
def __hash__(self) -> int:
return id(self)
class OneShotCallback:
__slots__ = ("loop", "finished", "__lock", "callback", "__task")
def __init__(self, callback: Callable[..., Awaitable[T]]):
self.callback: Callable[..., Awaitable[T]] = callback
self.loop = asyncio.get_event_loop()
self.finished: asyncio.Event = asyncio.Event()
self.__lock: asyncio.Lock = asyncio.Lock()
self.__task: Optional[asyncio.Future] = None
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: cb={self.callback!r}>"
def wait(self) -> Awaitable[Any]:
try:
return self.finished.wait()
except asyncio.CancelledError:
if self.__task is not None:
self.__task.cancel()
raise
async def __task_inner(self, *args: Any, **kwargs: Any) -> None:
async with self.__lock:
if self.finished.is_set():
return
try:
await self.callback(*args, **kwargs)
finally:
self.loop.call_soon(self.finished.set)
del self.callback
def __call__(self, *args: Any, **kwargs: Any) -> Awaitable[Any]:
if self.finished.is_set() or self.__task is not None:
return STUB_AWAITABLE
self.__task = self.loop.create_task(
self.__task_inner(*args, **kwargs),
)
return self.__task
def ensure_awaitable(
func: Callable[_Params, Union[T, Awaitable[T]]],
) -> Callable[_Params, Awaitable[T]]:
if inspect.iscoroutinefunction(func):
return func
if inspect.isfunction(func):
warnings.warn(
f"You probably registering the non-coroutine function {func!r}. "
"This is deprecated and will be removed in future releases. "
"Moreover, it can block the event loop",
DeprecationWarning,
)
@wraps(func)
async def wrapper(*args: _Params.args, **kwargs: _Params.kwargs) -> T:
nonlocal func
result = func(*args, **kwargs)
if not hasattr(result, "__await__"):
warnings.warn(
f"Function {func!r} returned a non awaitable result."
"This may be bad for performance or may blocks the "
"event loop, you should pay attention to this. This "
"warning is here in an attempt to maintain backwards "
"compatibility and will simply be removed in "
"future releases.",
DeprecationWarning,
)
return result
return await result
return wrapper
CallbackSetType = AbstractSet[
Union[
CallbackType[_Sender, _Params, None],
CallbackCollection[_Sender, _Params],
],
]
__all__ = (
"CallbackCollection",
"CallbackSetType",
"CallbackType",
"OneShotCallback",
"create_task",
"ensure_awaitable",
"iscoroutinepartial",
)
python-aio-pika-9.5.5/aio_pika/transaction.py 0000664 0000000 0000000 00000004021 14761646711 0021230 0 ustar 00root root 0000000 0000000 from types import TracebackType
from typing import Optional, Type
import aiormq
from pamqp import commands
from .abc import (
AbstractChannel, AbstractTransaction, TimeoutType, TransactionState,
)
class Transaction(AbstractTransaction):
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.state.value}>"
def __str__(self) -> str:
return self.state.value
def __init__(self, channel: AbstractChannel):
self.__channel = channel
self.state: TransactionState = TransactionState.CREATED
@property
def channel(self) -> AbstractChannel:
if self.__channel is None:
raise RuntimeError("Channel not opened")
if self.__channel.is_closed:
raise RuntimeError("Closed channel")
return self.__channel
async def select(
self, timeout: TimeoutType = None,
) -> aiormq.spec.Tx.SelectOk:
channel = await self.channel.get_underlay_channel()
result = await channel.tx_select(timeout=timeout)
self.state = TransactionState.STARTED
return result
async def rollback(
self, timeout: TimeoutType = None,
) -> commands.Tx.RollbackOk:
channel = await self.channel.get_underlay_channel()
result = await channel.tx_rollback(timeout=timeout)
self.state = TransactionState.ROLLED_BACK
return result
async def commit(
self, timeout: TimeoutType = None,
) -> commands.Tx.CommitOk:
channel = await self.channel.get_underlay_channel()
result = await channel.tx_commit(timeout=timeout)
self.state = TransactionState.COMMITED
return result
async def __aenter__(self) -> "Transaction":
await self.select()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if exc_type:
await self.rollback()
else:
await self.commit()
python-aio-pika-9.5.5/docs/ 0000775 0000000 0000000 00000000000 14761646711 0015510 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/docs/Makefile 0000664 0000000 0000000 00000001141 14761646711 0017145 0 ustar 00root root 0000000 0000000 # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = aio-pika
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) python-aio-pika-9.5.5/docs/requirements.txt 0000664 0000000 0000000 00000000044 14761646711 0020772 0 ustar 00root root 0000000 0000000 # ONLY FOR ReadTheDocs
autodoc
furo
python-aio-pika-9.5.5/docs/source/ 0000775 0000000 0000000 00000000000 14761646711 0017010 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/docs/source/_static/ 0000775 0000000 0000000 00000000000 14761646711 0020436 5 ustar 00root root 0000000 0000000 python-aio-pika-9.5.5/docs/source/_static/.DS_Store 0000664 0000000 0000000 00000024004 14761646711 0022121 0 ustar 00root root 0000000 0000000 Bud1 c o nbwspbl @ @ @ @ f a v i c o nbwspblob bplist00
]ShowStatusBar[ShowSidebar[ShowToolbar[ShowTabView_ContainerShowSidebar\WindowBounds[ShowPathbar _{{164, 108}, {770, 436}}%1=I`myz{|}~ f a v i c o nlsvCblob bplist00
GHGI_viewOptionsVersion_showIconPreviewWcolumns_calculateAllSizes_scrollPositionYXtextSize_scrollPositionXZsortColumnXiconSize_useRelativeDates !%*/49=BWvisibleUwidthYascendingZidentifier , TnameXubiquity# \dateModified$[dateCreated') aTsize,. s Tkind13d Ulabel68K Wversion< Xcomments?A^dateLastOpenedCYdateAdded# #@( #@0 2 D L ` r {
)234@IJLMR[\^_dmnpqw K f a v i c o nlsvpblob bplist00
FGFH_viewOptionsVersion_showIconPreviewWcolumns_calculateAllSizes_scrollPositionYXtextSize_scrollPositionXZsortColumnXiconSize_useRelativeDates %)-27K B # #@( #@0 2 D L ` r { !'-7?ADEFOQSTU^`abkmnoxz|}~ J f a v i c o nvSrnlong t u t o r i a lbwspblob bplist00]ShowStatusBar[ShowToolbar[ShowTabView_ContainerShowSidebar\WindowBounds[ShowSidebar _{{223, 59}, {1289, 758}} #/;R_klmno
t u t o r i a ldsclbool t u t o r i a llsvCblob bplist00
VW
YXiconSize_showIconPreviewWcolumns_calculateAllSizesXtextSizeZsortColumn_useRelativeDates_viewOptionsVersion#@0 "&+05:?CHLP
ZidentifierUwidthYascendingWvisibleTnameA
WvisibleUwidthYascending#Xubiquity
\dateModified
#[dateCreated
'(
Tsizea
,-
Tkinds
12
Ulabeld
67
WversionK
;<
Xcomments,
@^dateLastOpened
DEZshareOwner
IE_shareLastEditor
OYdateAdded
QR_invitationStatus#@* Tkind " 4 <