././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3652701 aiozmq-1.0.0/0000755000076600000240000000000000000000000011770 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/.coveragerc0000644000076600000240000000004300000000000014106 0ustar00jellestaff[run] source = aiozmq branch = True././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1667439287.323866 aiozmq-1.0.0/.github/0000755000076600000240000000000000000000000013330 5ustar00jellestaff././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3326812 aiozmq-1.0.0/.github/workflows/0000755000076600000240000000000000000000000015365 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/.github/workflows/test.yml0000644000076600000240000000267100000000000017075 0ustar00jellestaffname: main on: push: branches: [master] tags: ['*'] pull_request: paths-ignore: - .gitignore - LICENSE jobs: build: runs-on: ubuntu-latest strategy: fail-fast: false matrix: python: ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11'] msgpack: ['', '1'] debug: ['', '1'] steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - name: install zmq run: | sudo apt-get update -qq && sudo apt-get install -qq libzmq3-dev - name: install python dependencies run: | pip install --upgrade pip setuptools wheel && \ python setup.py install && \ test ${{ matrix.msgpack }} == 1 && pip install msgpack; \ pip install pyflakes pycodestyle docutils codecov black==22.8.0 - name: test run: | python -c "import zmq; print('ZMQ version:', zmq.zmq_version())" && \ pycodestyle --max-line-length=88 aiozmq examples tests benchmarks && \ pyflakes . && \ black --check aiozmq/ benchmarks/ examples/ tests/ setup.py runtests.py && \ export USE_MSGPACK=${{ matrix.msgpack }} && \ export PYTHONASYNCIODEBUG=${{ matrix.debug }} && \ python setup.py check -rm && \ if python -c "import sys; sys.exit(sys.version_info < (3,5))"; then python setup.py check -s; fi && \ python runtests.py --coverage -v ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/.gitignore0000644000076600000240000000013000000000000013752 0ustar00jellestaff*~ coverage .coverage *.egg-info *.pyc *.pyo *.bak *.egg build htmlcov docs/_build dist ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/ACKS.txt0000644000076600000240000000037200000000000013254 0ustar00jellestaffAcknowledgements ---------------- The list of contributors to *aiozmq*, at least of persons with real names but not nicks only. Alain Péteut Alexey Popravka Andrey Khoronko Anton Kasyanov Artem Dudarev Chris Laws Nikita Ofitserov Oleg Baranovskiy ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439249.0 aiozmq-1.0.0/CHANGES.txt0000644000076600000240000000533500000000000013607 0ustar00jellestaffCHANGES ------- 1.0.0 (2022-11-02) ^^^^^^^^^^^^^^^^^^ * Support Python 3.9, 3.10, and 3.11 (thanks in part to Esben Sonne) * Drop support for Python 3.5 * Remove support for using annotations as conversion functions 0.9.0 (2020-01-25) ^^^^^^^^^^^^^^^^^^ * Support Python 3.7 and 3.8 0.8.0 (2016-12-07) ^^^^^^^^^^^^^^^^^^ * Respect `events_backlog` parameter in zmq stream creation #86 0.7.1 (2015-09-20) ^^^^^^^^^^^^^^^^^^ * Fix monitoring events implementation * Make the library compatible with Python 3.5 0.7.0 (2015-07-31) ^^^^^^^^^^^^^^^^^^ * Implement monitoring ZMQ events #50 * Do deeper lookup for inhereted classes #54 * Relax endpont check #56 * Implement monitoring events for stream api #52 0.6.1 (2015-05-19) ^^^^^^^^^^^^^^^^^^ * Dynamically get list of pyzmq socket types 0.6.0 (2015-02-14) ^^^^^^^^^^^^^^^^^^ * Process asyncio specific exceptions as builtins. * Add repr(exception) to rpc server call logs if any * Add transport.get_write_buffer_limits() method * Add __repr__ to transport * Add zmq_type to tr.get_extra_info() * Add zmq streams 0.5.2 (2014-10-09) ^^^^^^^^^^^^^^^^^^ * Poll events after sending zmq message for eventless transport 0.5.1 (2014-09-27) ^^^^^^^^^^^^^^^^^^ * Fix loopless transport implementation. 0.5.0 (2014-08-23) ^^^^^^^^^^^^^^^^^^ * Support zmq devices in aiozmq.rpc.serve_rpc() * Add loopless 0MQ transport 0.4.1 (2014-07-03) ^^^^^^^^^^^^^^^^^^ * Add exclude_log_exceptions parameter to rpc servers. 0.4.0 (2014-05-28) ^^^^^^^^^^^^^^^^^^ * Implement pause_reading/resume_reading methods in ZmqTransport. 0.3.0 (2014-05-17) ^^^^^^^^^^^^^^^^^^ * Add limited support for Windows. * Fix unstable test execution, change ZmqEventLoop to use global shared zmq.Context by default. * Process cancellation on rpc servers and clients. 0.2.0 (2014-04-18) ^^^^^^^^^^^^^^^^^^ * msg in msg_received now is a list, not tuple * Allow to send empty msg by trsansport.write() * Add benchmarks * Derive ServiceClosedError from aiozmq.rpc.Error, not Exception * Implement logging from remote calls at server side (log_exceptions parameter). * Optimize byte counting in ZmqTransport. 0.1.3 (2014-04-10) ^^^^^^^^^^^^^^^^^^ * Function default values are not passed to an annotaion. Add check for libzmq version (should be >= 3.0) 0.1.2 (2014-04-01) ^^^^^^^^^^^^^^^^^^ * Function default values are not passed to an annotaion. 0.1.1 (2014-03-31) ^^^^^^^^^^^^^^^^^^ * Rename plural module names to single ones. 0.1.0 (2014-03-30) ^^^^^^^^^^^^^^^^^^ * Implement ZmqEventLoop with *create_zmq_connection* method which operates on zmq transport and protocol. * Implement ZmqEventLoopPolicy. * Introduce ZmqTransport and ZmqProtocol. * Implement zmq.rpc with RPC, PUSHPULL and PUBSUB protocols. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/LICENSE0000644000076600000240000000245100000000000012777 0ustar00jellestaffCopyright (c) 2013, 2014, 2015, Nikolay Kim and Andrew Svetlov All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/MANIFEST.in0000644000076600000240000000013000000000000013520 0ustar00jellestaffinclude LICENSE include CHANGES.txt include README.rst graft aiozmq global-exclude *.pyc././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/Makefile0000644000076600000240000000157100000000000013434 0ustar00jellestaff# Some simple testing tasks (sorry, UNIX only). PYTHON=python3 PYFLAKES=pyflakes FILTER= doc: cd docs && make html echo "open file://`pwd`/docs/_build/html/index.html" pep: pycodestyle aiozmq examples tests flake: $(PYFLAKES) . test: pep flake $(PYTHON) runtests.py $(FILTER) vtest: pep flake $(PYTHON) runtests.py -v $(FILTER) testloop: pep flake $(PYTHON) runtests.py --forever $(FILTER) cov cover coverage: pep flake $(PYTHON) runtests.py --coverage $(FILTER) clean: find . -name __pycache__ |xargs rm -rf find . -type f -name '*.py[co]' -delete find . -type f -name '*~' -delete find . -type f -name '.*~' -delete find . -type f -name '@*' -delete find . -type f -name '#*#' -delete find . -type f -name '*.orig' -delete find . -type f -name '*.rej' -delete rm -f .coverage rm -rf coverage rm -rf docs/_build .PHONY: all pep test vtest testloop cov clean ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1667439287.365426 aiozmq-1.0.0/PKG-INFO0000644000076600000240000001567700000000000013105 0ustar00jellestaffMetadata-Version: 2.1 Name: aiozmq Version: 1.0.0 Summary: ZeroMQ integration with asyncio. Home-page: http://aiozmq.readthedocs.org Download-URL: https://pypi.python.org/pypi/aiozmq Author: Nikolay Kim Author-email: fafhrd91@gmail.com Maintainer: Jelle Zijlstra Maintainer-email: jelle.zijlstra@gmail.com License: BSD Platform: POSIX Platform: Windows Platform: MacOS X Classifier: License :: OSI Approved :: BSD License Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Operating System :: POSIX Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: Microsoft :: Windows Classifier: Environment :: Web Environment Classifier: Development Status :: 4 - Beta Classifier: Framework :: AsyncIO Provides-Extra: rpc License-File: LICENSE asyncio integration with ZeroMQ =============================== asyncio (PEP 3156) support for ZeroMQ. .. image:: https://travis-ci.com/aio-libs/aiozmq.svg?branch=master :target: https://travis-ci.com/aio-libs/aiozmq The difference between ``aiozmq`` and vanilla ``pyzmq`` (``zmq.asyncio``) is: ``zmq.asyncio`` works only by replacing the *base event loop* with a custom one. This approach works but has two disadvantages: 1. ``zmq.asyncio.ZMQEventLoop`` cannot be combined with other loop implementations (most notable is the ultra fast ``uvloop``). 2. It uses the internal ZMQ Poller which has fast ZMQ Sockets support but isn't intended to work fast with many (thousands) regular TCP sockets. In practice it means that ``zmq.asyncio`` is not recommended to be used with web servers like ``aiohttp``. See also https://github.com/zeromq/pyzmq/issues/894 Documentation ------------- See http://aiozmq.readthedocs.org Simple high-level client-server RPC example: .. code-block:: python import asyncio import aiozmq.rpc class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote_func(self, a:int, b:int) -> int: return a + b async def go(): server = await aiozmq.rpc.serve_rpc( ServerHandler(), bind='tcp://127.0.0.1:5555') client = await aiozmq.rpc.connect_rpc( connect='tcp://127.0.0.1:5555') ret = await client.call.remote_func(1, 2) assert 3 == ret server.close() client.close() asyncio.run(go()) Low-level request-reply example: .. code-block:: python import asyncio import aiozmq import zmq async def go(): router = await aiozmq.create_zmq_stream( zmq.ROUTER, bind='tcp://127.0.0.1:*') addr = list(router.transport.bindings())[0] dealer = await aiozmq.create_zmq_stream( zmq.DEALER, connect=addr) for i in range(10): msg = (b'data', b'ask', str(i).encode('utf-8')) dealer.write(msg) data = await router.read() router.write(data) answer = await dealer.read() print(answer) dealer.close() router.close() asyncio.run(go()) Comparison to pyzmq ------------------- ``zmq.asyncio`` provides an *asyncio compatible loop* implementation. But it's based on ``zmq.Poller`` which doesn't work well with massive non-zmq socket usage. E.g. if you build a web server for handling at least thousands of parallel web requests (1000-5000) ``pyzmq``'s internal poller will be slow. ``aiozmq`` works with epoll natively, it doesn't need a custom loop implementation and cooperates pretty well with `uvloop` for example. For details see https://github.com/zeromq/pyzmq/issues/894 Requirements ------------ * Python_ 3.6+ * pyzmq_ 13.1+ * optional submodule ``aiozmq.rpc`` requires msgpack_ 0.5+ License ------- aiozmq is offered under the BSD license. .. _python: https://www.python.org/ .. _pyzmq: https://pypi.python.org/pypi/pyzmq .. _asyncio: https://pypi.python.org/pypi/asyncio .. _msgpack: https://pypi.python.org/pypi/msgpack CHANGES ------- 1.0.0 (2022-11-02) ^^^^^^^^^^^^^^^^^^ * Support Python 3.9, 3.10, and 3.11 (thanks in part to Esben Sonne) * Drop support for Python 3.5 * Remove support for using annotations as conversion functions 0.9.0 (2020-01-25) ^^^^^^^^^^^^^^^^^^ * Support Python 3.7 and 3.8 0.8.0 (2016-12-07) ^^^^^^^^^^^^^^^^^^ * Respect `events_backlog` parameter in zmq stream creation #86 0.7.1 (2015-09-20) ^^^^^^^^^^^^^^^^^^ * Fix monitoring events implementation * Make the library compatible with Python 3.5 0.7.0 (2015-07-31) ^^^^^^^^^^^^^^^^^^ * Implement monitoring ZMQ events #50 * Do deeper lookup for inhereted classes #54 * Relax endpont check #56 * Implement monitoring events for stream api #52 0.6.1 (2015-05-19) ^^^^^^^^^^^^^^^^^^ * Dynamically get list of pyzmq socket types 0.6.0 (2015-02-14) ^^^^^^^^^^^^^^^^^^ * Process asyncio specific exceptions as builtins. * Add repr(exception) to rpc server call logs if any * Add transport.get_write_buffer_limits() method * Add __repr__ to transport * Add zmq_type to tr.get_extra_info() * Add zmq streams 0.5.2 (2014-10-09) ^^^^^^^^^^^^^^^^^^ * Poll events after sending zmq message for eventless transport 0.5.1 (2014-09-27) ^^^^^^^^^^^^^^^^^^ * Fix loopless transport implementation. 0.5.0 (2014-08-23) ^^^^^^^^^^^^^^^^^^ * Support zmq devices in aiozmq.rpc.serve_rpc() * Add loopless 0MQ transport 0.4.1 (2014-07-03) ^^^^^^^^^^^^^^^^^^ * Add exclude_log_exceptions parameter to rpc servers. 0.4.0 (2014-05-28) ^^^^^^^^^^^^^^^^^^ * Implement pause_reading/resume_reading methods in ZmqTransport. 0.3.0 (2014-05-17) ^^^^^^^^^^^^^^^^^^ * Add limited support for Windows. * Fix unstable test execution, change ZmqEventLoop to use global shared zmq.Context by default. * Process cancellation on rpc servers and clients. 0.2.0 (2014-04-18) ^^^^^^^^^^^^^^^^^^ * msg in msg_received now is a list, not tuple * Allow to send empty msg by trsansport.write() * Add benchmarks * Derive ServiceClosedError from aiozmq.rpc.Error, not Exception * Implement logging from remote calls at server side (log_exceptions parameter). * Optimize byte counting in ZmqTransport. 0.1.3 (2014-04-10) ^^^^^^^^^^^^^^^^^^ * Function default values are not passed to an annotaion. Add check for libzmq version (should be >= 3.0) 0.1.2 (2014-04-01) ^^^^^^^^^^^^^^^^^^ * Function default values are not passed to an annotaion. 0.1.1 (2014-03-31) ^^^^^^^^^^^^^^^^^^ * Rename plural module names to single ones. 0.1.0 (2014-03-30) ^^^^^^^^^^^^^^^^^^ * Implement ZmqEventLoop with *create_zmq_connection* method which operates on zmq transport and protocol. * Implement ZmqEventLoopPolicy. * Introduce ZmqTransport and ZmqProtocol. * Implement zmq.rpc with RPC, PUSHPULL and PUBSUB protocols. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/README.rst0000644000076600000240000000620300000000000013460 0ustar00jellestaffasyncio integration with ZeroMQ =============================== asyncio (PEP 3156) support for ZeroMQ. .. image:: https://travis-ci.com/aio-libs/aiozmq.svg?branch=master :target: https://travis-ci.com/aio-libs/aiozmq The difference between ``aiozmq`` and vanilla ``pyzmq`` (``zmq.asyncio``) is: ``zmq.asyncio`` works only by replacing the *base event loop* with a custom one. This approach works but has two disadvantages: 1. ``zmq.asyncio.ZMQEventLoop`` cannot be combined with other loop implementations (most notable is the ultra fast ``uvloop``). 2. It uses the internal ZMQ Poller which has fast ZMQ Sockets support but isn't intended to work fast with many (thousands) regular TCP sockets. In practice it means that ``zmq.asyncio`` is not recommended to be used with web servers like ``aiohttp``. See also https://github.com/zeromq/pyzmq/issues/894 Documentation ------------- See http://aiozmq.readthedocs.org Simple high-level client-server RPC example: .. code-block:: python import asyncio import aiozmq.rpc class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote_func(self, a:int, b:int) -> int: return a + b async def go(): server = await aiozmq.rpc.serve_rpc( ServerHandler(), bind='tcp://127.0.0.1:5555') client = await aiozmq.rpc.connect_rpc( connect='tcp://127.0.0.1:5555') ret = await client.call.remote_func(1, 2) assert 3 == ret server.close() client.close() asyncio.run(go()) Low-level request-reply example: .. code-block:: python import asyncio import aiozmq import zmq async def go(): router = await aiozmq.create_zmq_stream( zmq.ROUTER, bind='tcp://127.0.0.1:*') addr = list(router.transport.bindings())[0] dealer = await aiozmq.create_zmq_stream( zmq.DEALER, connect=addr) for i in range(10): msg = (b'data', b'ask', str(i).encode('utf-8')) dealer.write(msg) data = await router.read() router.write(data) answer = await dealer.read() print(answer) dealer.close() router.close() asyncio.run(go()) Comparison to pyzmq ------------------- ``zmq.asyncio`` provides an *asyncio compatible loop* implementation. But it's based on ``zmq.Poller`` which doesn't work well with massive non-zmq socket usage. E.g. if you build a web server for handling at least thousands of parallel web requests (1000-5000) ``pyzmq``'s internal poller will be slow. ``aiozmq`` works with epoll natively, it doesn't need a custom loop implementation and cooperates pretty well with `uvloop` for example. For details see https://github.com/zeromq/pyzmq/issues/894 Requirements ------------ * Python_ 3.6+ * pyzmq_ 13.1+ * optional submodule ``aiozmq.rpc`` requires msgpack_ 0.5+ License ------- aiozmq is offered under the BSD license. .. _python: https://www.python.org/ .. _pyzmq: https://pypi.python.org/pypi/pyzmq .. _asyncio: https://pypi.python.org/pypi/asyncio .. _msgpack: https://pypi.python.org/pypi/msgpack ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3360038 aiozmq-1.0.0/aiozmq/0000755000076600000240000000000000000000000013270 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439249.0 aiozmq-1.0.0/aiozmq/__init__.py0000644000076600000240000000345000000000000015403 0ustar00jellestaffimport re import sys from collections import namedtuple import zmq from .core import ZmqEventLoop, ZmqEventLoopPolicy, create_zmq_connection from .interface import ZmqTransport, ZmqProtocol from .selector import ZmqSelector from .stream import ZmqStream, ZmqStreamProtocol, ZmqStreamClosed, create_zmq_stream __all__ = ( "ZmqSelector", "ZmqEventLoop", "ZmqEventLoopPolicy", "ZmqTransport", "ZmqProtocol", "ZmqStream", "ZmqStreamProtocol", "create_zmq_stream", "ZmqStreamClosed", "create_zmq_connection", "version_info", "version", ) __version__ = "1.0.0" version = __version__ + " , Python " + sys.version VersionInfo = namedtuple("VersionInfo", "major minor micro releaselevel serial") def _parse_version(ver): RE = ( r"^(?P\d+)\.(?P\d+)\." r"(?P\d+)((?P[a-z]+)(?P\d+)?)?$" ) match = re.match(RE, ver) try: major = int(match.group("major")) minor = int(match.group("minor")) micro = int(match.group("micro")) levels = {"c": "candidate", "a": "alpha", "b": "beta", None: "final"} releaselevel = levels[match.group("releaselevel")] serial = int(match.group("serial")) if match.group("serial") else 0 return VersionInfo(major, minor, micro, releaselevel, serial) except Exception: raise ImportError("Invalid package version {}".format(ver)) version_info = _parse_version(__version__) if zmq.zmq_version_info()[0] < 3: # pragma no cover raise ImportError("aiozmq doesn't support libzmq < 3.0") # make pyflakes happy ( ZmqSelector, ZmqEventLoop, ZmqEventLoopPolicy, ZmqTransport, ZmqProtocol, ZmqStream, ZmqStreamProtocol, ZmqStreamClosed, create_zmq_stream, create_zmq_connection, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667228777.0 aiozmq-1.0.0/aiozmq/_test_util.py0000644000076600000240000002447000000000000016024 0ustar00jellestaff"""Private test support utulities""" import contextlib import functools import logging import platform import socket import sys import time import unittest class Error(Exception): """Base class for regression test exceptions.""" class TestFailed(Error): """Test failed.""" def _requires_unix_version(sysname, min_version): # pragma: no cover """Decorator raising SkipTest if the OS is `sysname` and the version is less than `min_version`. For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if the FreeBSD version is less than 7.2. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kw): if platform.system() == sysname: version_txt = platform.release().split("-", 1)[0] try: version = tuple(map(int, version_txt.split("."))) except ValueError: pass else: if version < min_version: min_version_txt = ".".join(map(str, min_version)) raise unittest.SkipTest( "%s version %s or higher required, not %s" % (sysname, min_version_txt, version_txt) ) return func(*args, **kw) wrapper.min_version = min_version return wrapper return decorator def requires_freebsd_version(*min_version): # pragma: no cover """Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is less than `min_version`. For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD version is less than 7.2. """ return _requires_unix_version("FreeBSD", min_version) def requires_linux_version(*min_version): # pragma: no cover """Decorator raising SkipTest if the OS is Linux and the Linux version is less than `min_version`. For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux version is less than 2.6.32. """ return _requires_unix_version("Linux", min_version) def requires_mac_ver(*min_version): # pragma: no cover """Decorator raising SkipTest if the OS is Mac OS X and the OS X version if less than min_version. For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version is lesser than 10.5. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kw): if sys.platform == "darwin": version_txt = platform.mac_ver()[0] try: version = tuple(map(int, version_txt.split("."))) except ValueError: pass else: if version < min_version: min_version_txt = ".".join(map(str, min_version)) raise unittest.SkipTest( "Mac OS X %s or higher required, not %s" % (min_version_txt, version_txt) ) return func(*args, **kw) wrapper.min_version = min_version return wrapper return decorator # Don't use "localhost", since resolving it uses the DNS under recent # Windows versions (see issue #18792). HOST = "127.0.0.1" HOSTv6 = "::1" def _is_ipv6_enabled(): # pragma: no cover """Check whether IPv6 is enabled on this host.""" if socket.has_ipv6: sock = None try: sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) sock.bind((HOSTv6, 0)) return True except OSError: pass finally: if sock: sock.close() return False IPV6_ENABLED = _is_ipv6_enabled() def find_unused_port( family=socket.AF_INET, socktype=socket.SOCK_STREAM ): # pragma: no cover """Returns an unused port that should be suitable for binding. This is achieved by creating a temporary socket with the same family and type as the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to the specified host address (defaults to 0.0.0.0) with the port set to 0, eliciting an unused ephemeral port from the OS. The temporary socket is then closed and deleted, and the ephemeral port is returned. Either this method or bind_port() should be used for any tests where a server socket needs to be bound to a particular port for the duration of the test. Which one to use depends on whether the calling code is creating a python socket, or if an unused port needs to be provided in a constructor or passed to an external program (i.e. the -accept argument to openssl's s_server mode). Always prefer bind_port() over find_unused_port() where possible. Hard coded ports should *NEVER* be used. As soon as a server socket is bound to a hard coded port, the ability to run multiple instances of the test simultaneously on the same host is compromised, which makes the test a ticking time bomb in a buildbot environment. On Unix buildbots, this may simply manifest as a failed test, which can be recovered from without intervention in most cases, but on Windows, the entire python process can completely and utterly wedge, requiring someone to log in to the buildbot and manually kill the affected process. (This is easy to reproduce on Windows, unfortunately, and can be traced to the SO_REUSEADDR socket option having different semantics on Windows versus Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind, listen and then accept connections on identical host/ports. An EADDRINUSE OSError will be raised at some point (depending on the platform and the order bind and listen were called on each socket). However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE will ever be raised when attempting to bind two identical host/ports. When accept() is called on each socket, the second caller's process will steal the port from the first caller, leaving them both in an awkwardly wedged state where they'll no longer respond to any signals or graceful kills, and must be forcibly killed via OpenProcess()/TerminateProcess(). The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option instead of SO_REUSEADDR, which effectively affords the same semantics as SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open Source world compared to Windows ones, this is a common mistake. A quick look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when openssl.exe is called with the 's_server' option, for example. See http://bugs.python.org/issue2550 for more info. The following site also has a very thorough description about the implications of both REUSEADDR and EXCLUSIVEADDRUSE on Windows: http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx) XXX: although this approach is a vast improvement on previous attempts to elicit unused ports, it rests heavily on the assumption that the ephemeral port returned to us by the OS won't immediately be dished back out to some other process when we close and delete our temporary socket but before our calling code has a chance to bind the returned port. We can deal with this issue if/when we come across it. """ tempsock = socket.socket(family, socktype) port = bind_port(tempsock) tempsock.close() del tempsock return port def bind_port(sock, host=HOST): # pragma: no cover """Bind the socket to a free port and return the port number. Relies on ephemeral ports in order to ensure we are using an unbound port. This is important as many tests may be running simultaneously, especially in a buildbot environment. This method raises an exception if the sock.family is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR or SO_REUSEPORT set on it. Tests should *never* set these socket options for TCP/IP sockets. The only case for setting these options is testing multicasting via multiple UDP sockets. Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e. on Windows), it will be set on the socket. This will prevent anyone else from bind()'ing to our host/port for the duration of the test. """ if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM: if hasattr(socket, "SO_REUSEADDR"): if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1: raise TestFailed( "tests should never set the SO_REUSEADDR " "socket option on TCP/IP sockets!" ) if hasattr(socket, "SO_REUSEPORT"): try: opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) if opt == 1: raise TestFailed( "tests should never set the SO_REUSEPORT " "socket option on TCP/IP sockets!" ) except OSError: # Python's socket module was compiled using modern headers # thus defining SO_REUSEPORT but this process is running # under an older kernel that does not support SO_REUSEPORT. pass if hasattr(socket, "SO_EXCLUSIVEADDRUSE"): sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) sock.bind((host, 0)) port = sock.getsockname()[1] return port def check_errno(errno, exc): assert isinstance(exc, OSError), exc assert exc.errno == errno, (exc, errno) class TestHandler(logging.Handler): def __init__(self, queue): super().__init__() self.queue = queue def emit(self, record): time.sleep(0) self.queue.put_nowait(record) @contextlib.contextmanager def log_hook(logname, queue): logger = logging.getLogger(logname) handler = TestHandler(queue) logger.addHandler(handler) level = logger.level logger.setLevel(logging.DEBUG) try: yield finally: logger.removeHandler(handler) logger.level = level class RpcMixin: def close_service(self, service): if service is None: return loop = service._loop service.close() loop.run_until_complete(service.wait_closed()) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3385112 aiozmq-1.0.0/aiozmq/cli/0000755000076600000240000000000000000000000014037 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/cli/__init__.py0000644000076600000240000000000000000000000016136 0ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/cli/proxy.py0000644000076600000240000001073400000000000015577 0ustar00jellestaffimport zmq import sys import argparse from datetime import datetime def get_arguments(): ap = argparse.ArgumentParser(description="ZMQ Proxy tool") def common_arguments(ap): ap.add_argument( "--front-bind", metavar="ADDR", action="append", help="Binds frontend socket to specified address", ) ap.add_argument( "--front-connect", metavar="ADDR", action="append", help="Connects frontend socket to specified address", ) ap.add_argument( "--back-bind", metavar="ADDR", action="append", help="Binds backend socket to specified address", ) ap.add_argument( "--back-connect", metavar="ADDR", action="append", help="Connects backend socket to specified address", ) ap.add_argument( "--monitor-bind", metavar="ADDR", action="append", help="Creates and binds monitor socket" " to specified address", ) ap.add_argument( "--monitor-connect", metavar="ADDR", action="append", help="Creates and connects monitor socket" " to specified address", ) parsers = ap.add_subparsers(title="Commands", help="ZMQ Proxy tool commands") sub = parsers.add_parser( "queue", help="Creates Shared Queue proxy" " (frontend/backend sockets are ZMQ_ROUTER/ZMQ_DEALER)", ) sub.set_defaults(sock_types=(zmq.ROUTER, zmq.DEALER), action=serve_proxy) common_arguments(sub) sub = parsers.add_parser( "forwarder", help="Creates Forwarder proxy" " (frontend/backend sockets are ZMQ_XSUB/ZMQ_XPUB)", ) sub.set_defaults(sock_types=(zmq.XSUB, zmq.XPUB), action=serve_proxy) common_arguments(sub) sub = parsers.add_parser( "streamer", help="Creates Streamer proxy" " (frontend/backend sockets are ZMQ_PULL/ZMQ_PUSH)", ) sub.set_defaults(sock_types=(zmq.PULL, zmq.PUSH), action=serve_proxy) common_arguments(sub) sub = parsers.add_parser( "monitor", help="Connects/binds to monitor socket and dumps all traffic" ) sub.set_defaults(action=monitor) sub.add_argument("--connect", metavar="ADDR", help="Connect to monitor socket") sub.add_argument("--bind", metavar="ADDR", help="Bind monitor socket") return ap def main(): ap = get_arguments() options = ap.parse_args() options.action(options) def serve_proxy(options): if not (options.front_connect or options.front_bind): print("No frontend socket address specified!", file=sys.stderr) sys.exit(1) if not (options.back_connect or options.back_bind): print("No backend socket address specified!", file=sys.stderr) sys.exit(1) ctx = zmq.Context.instance() front_type, back_type = options.sock_types front = ctx.socket(front_type) back = ctx.socket(back_type) if options.monitor_bind or options.monitor_connect: monitor = ctx.socket(zmq.PUB) bind_connect(monitor, options.monitor_bind, options.monitor_connect) else: monitor = None bind_connect(front, options.front_bind, options.front_connect) bind_connect(back, options.back_bind, options.back_connect) try: if monitor: zmq.proxy(front, back, monitor) else: zmq.proxy(front, back) except Exception: return finally: front.close() back.close() def bind_connect(sock, bind=None, connect=None): if bind: for address in bind: sock.bind(address) if connect: for address in connect: sock.connect(address) def monitor(options): ctx = zmq.Context.instance() sock = ctx.socket(zmq.SUB) bind = [options.bind] if options.bind else [] connect = [options.connect] if options.connect else [] bind_connect(sock, bind, connect) sock.setsockopt(zmq.SUBSCRIBE, b"") try: while True: try: data = sock.recv() except KeyboardInterrupt: break except Exception as err: print("Error receiving message: {!r}".format(err)) else: print(datetime.now().isoformat(), "Message received: {!r}".format(data)) finally: sock.close() ctx.term() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/core.py0000644000076600000240000007460600000000000014607 0ustar00jellestaffimport asyncio import asyncio.events import errno import struct import sys import threading import weakref import zmq from collections import deque, namedtuple from collections.abc import Iterable from .interface import ZmqTransport, ZmqProtocol from .log import logger from .selector import ZmqSelector from .util import _EndpointsSet if sys.platform == "win32": from asyncio.windows_events import SelectorEventLoop else: from asyncio.unix_events import SelectorEventLoop, SafeChildWatcher __all__ = ["ZmqEventLoop", "ZmqEventLoopPolicy", "create_zmq_connection"] SocketEvent = namedtuple("SocketEvent", "event value endpoint") async def create_zmq_connection( protocol_factory, zmq_type, *, bind=None, connect=None, zmq_sock=None, loop=None ): """A coroutine which creates a ZeroMQ connection endpoint. The return value is a pair of (transport, protocol), where transport support ZmqTransport interface. protocol_factory should instantiate object with ZmqProtocol interface. zmq_type is type of ZeroMQ socket (zmq.REQ, zmq.REP, zmq.PUB, zmq.SUB, zmq.PAIR, zmq.DEALER, zmq.ROUTER, zmq.PULL, zmq.PUSH, etc.) bind is string or iterable of strings that specifies enpoints. Every endpoint creates ending for acceptin connections and binds it to the transport. Other side should use connect parameter to connect to this transport. See http://api.zeromq.org/master:zmq-bind for details. connect is string or iterable of strings that specifies enpoints. Every endpoint connects transport to specified transport. Other side should use bind parameter to wait for incoming connections. See http://api.zeromq.org/master:zmq-connect for details. endpoint is a string consisting of two parts as follows: transport://address. The transport part specifies the underlying transport protocol to use. The meaning of the address part is specific to the underlying transport protocol selected. The following transports are defined: inproc - local in-process (inter-thread) communication transport, see http://api.zeromq.org/master:zmq-inproc. ipc - local inter-process communication transport, see http://api.zeromq.org/master:zmq-ipc tcp - unicast transport using TCP, see http://api.zeromq.org/master:zmq_tcp pgm, epgm - reliable multicast transport using PGM, see http://api.zeromq.org/master:zmq_pgm zmq_sock is a zmq.Socket instance to use preexisting object with created transport. """ if loop is None: loop = asyncio.get_event_loop() if isinstance(loop, ZmqEventLoop): ret = await loop.create_zmq_connection( protocol_factory, zmq_type, bind=bind, connect=connect, zmq_sock=zmq_sock ) return ret try: if zmq_sock is None: zmq_sock = zmq.Context.instance().socket(zmq_type) elif zmq_sock.getsockopt(zmq.TYPE) != zmq_type: raise ValueError("Invalid zmq_sock type") except zmq.ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc protocol = protocol_factory() waiter = asyncio.Future() transport = _ZmqLooplessTransportImpl(loop, zmq_type, zmq_sock, protocol, waiter) await waiter try: if bind is not None: if isinstance(bind, str): bind = [bind] else: if not isinstance(bind, Iterable): raise ValueError("bind should be str or iterable") for endpoint in bind: await transport.bind(endpoint) if connect is not None: if isinstance(connect, str): connect = [connect] else: if not isinstance(connect, Iterable): raise ValueError("connect should be " "str or iterable") for endpoint in connect: await transport.connect(endpoint) return transport, protocol except OSError: # don't care if zmq_sock.close can raise exception # that should never happen zmq_sock.close() raise class ZmqEventLoop(SelectorEventLoop): """ZeroMQ event loop. Follows asyncio.AbstractEventLoop specification, in addition implements create_zmq_connection method for working with ZeroMQ sockets. """ def __init__(self, *, zmq_context=None): super().__init__(selector=ZmqSelector()) if zmq_context is None: self._zmq_context = zmq.Context.instance() else: self._zmq_context = zmq_context self._zmq_sockets = weakref.WeakSet() def close(self): for zmq_sock in self._zmq_sockets: if not zmq_sock.closed: zmq_sock.close() super().close() async def create_zmq_connection( self, protocol_factory, zmq_type, *, bind=None, connect=None, zmq_sock=None ): """A coroutine which creates a ZeroMQ connection endpoint. See aiozmq.create_zmq_connection() coroutine for details. """ try: if zmq_sock is None: zmq_sock = self._zmq_context.socket(zmq_type) elif zmq_sock.getsockopt(zmq.TYPE) != zmq_type: raise ValueError("Invalid zmq_sock type") except zmq.ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc protocol = protocol_factory() waiter = asyncio.Future(loop=self) transport = _ZmqTransportImpl(self, zmq_type, zmq_sock, protocol, waiter) await waiter try: if bind is not None: if isinstance(bind, str): bind = [bind] else: if not isinstance(bind, Iterable): raise ValueError("bind should be str or iterable") for endpoint in bind: await transport.bind(endpoint) if connect is not None: if isinstance(connect, str): connect = [connect] else: if not isinstance(connect, Iterable): raise ValueError("connect should be str or iterable") for endpoint in connect: await transport.connect(endpoint) self._zmq_sockets.add(zmq_sock) return transport, protocol except OSError: # don't care if zmq_sock.close can raise exception # that should never happen zmq_sock.close() raise class _ZmqEventProtocol(ZmqProtocol): """This protocol is used internally by aiozmq to receive messages from a socket event monitor socket. This protocol decodes each event message into a namedtuple and then passes them through to the protocol running the socket that is being monitored via the ZmqProtocol.event_received method. This design simplifies the API visible to the developer at the cost of adding some internal complexity - a hidden protocol that transfers events from the monitor protocol to the monitored socket's protocol. """ def __init__(self, loop, main_protocol): self._protocol = main_protocol self.wait_ready = asyncio.Future() self.wait_closed = asyncio.Future() def connection_made(self, transport): self.transport = transport self.wait_ready.set_result(True) def connection_lost(self, exc): self.wait_closed.set_result(exc) def msg_received(self, data): if len(data) != 2 or len(data[0]) != 6: raise RuntimeError("Invalid event message format: {}".format(data)) event, value = struct.unpack("=hi", data[0]) endpoint = data[1].decode() self.event_received(SocketEvent(event, value, endpoint)) def event_received(self, evt): self._protocol.event_received(evt) class _BaseTransport(ZmqTransport): LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5 ZMQ_TYPES = { getattr(zmq, name): name for name in ( "PUB", "SUB", "REP", "REQ", "PUSH", "PULL", "DEALER", "ROUTER", "XPUB", "XSUB", "PAIR", "STREAM", ) if hasattr(zmq, name) } def __init__(self, loop, zmq_type, zmq_sock, protocol): super().__init__(None) self._protocol_paused = False self._set_write_buffer_limits() self._extra["zmq_socket"] = zmq_sock self._extra["zmq_type"] = zmq_type self._loop = loop self._zmq_sock = zmq_sock self._zmq_type = zmq_type self._protocol = protocol self._closing = False self._buffer = deque() self._buffer_size = 0 self._bindings = set() self._connections = set() self._subscriptions = set() self._paused = False self._conn_lost = 0 self._monitor = None def __repr__(self): info = [ "ZmqTransport", "sock={}".format(self._zmq_sock), "type={}".format(self.ZMQ_TYPES[self._zmq_type]), ] try: events = self._zmq_sock.getsockopt(zmq.EVENTS) if events & zmq.POLLIN: info.append("read=polling") else: info.append("read=idle") if events & zmq.POLLOUT: state = "polling" else: state = "idle" bufsize = self.get_write_buffer_size() info.append("write=<{}, bufsize={}>".format(state, bufsize)) except zmq.ZMQError: pass return "<{}>".format(" ".join(info)) def write(self, data): if not data: return for part in data: if not isinstance(part, (bytes, bytearray, memoryview)): raise TypeError( "data argument must be iterable of " "byte-ish (%r)" % data ) data_len = sum(len(part) for part in data) if self._conn_lost: if self._conn_lost >= self.LOG_THRESHOLD_FOR_CONNLOST_WRITES: logger.warning("write to closed ZMQ socket.") self._conn_lost += 1 return if not self._buffer: try: if self._do_send(data): return except Exception as exc: self._fatal_error(exc, "Fatal write error on zmq socket transport") return self._buffer.append((data_len, data)) self._buffer_size += data_len self._maybe_pause_protocol() def can_write_eof(self): return False def abort(self): self._force_close(None) def _fatal_error(self, exc, message="Fatal error on transport"): # Should be called from exception handler only. self._loop.call_exception_handler( { "message": message, "exception": exc, "transport": self, "protocol": self._protocol, } ) self._force_close(exc) def _call_connection_lost(self, exc): try: self._protocol.connection_lost(exc) finally: if not self._zmq_sock.closed: self._zmq_sock.close() self._zmq_sock = None self._protocol = None self._loop = None def _maybe_pause_protocol(self): size = self.get_write_buffer_size() if size <= self._high_water: return if not self._protocol_paused: self._protocol_paused = True try: self._protocol.pause_writing() except Exception as exc: self._loop.call_exception_handler( { "message": "protocol.pause_writing() failed", "exception": exc, "transport": self, "protocol": self._protocol, } ) def _maybe_resume_protocol(self): if self._protocol_paused and self.get_write_buffer_size() <= self._low_water: self._protocol_paused = False try: self._protocol.resume_writing() except Exception as exc: self._loop.call_exception_handler( { "message": "protocol.resume_writing() failed", "exception": exc, "transport": self, "protocol": self._protocol, } ) def _set_write_buffer_limits(self, high=None, low=None): if high is None: if low is None: high = 64 * 1024 else: high = 4 * low if low is None: low = high // 4 if not high >= low >= 0: raise ValueError("high (%r) must be >= low (%r) must be >= 0" % (high, low)) self._high_water = high self._low_water = low def get_write_buffer_limits(self): return (self._low_water, self._high_water) def set_write_buffer_limits(self, high=None, low=None): self._set_write_buffer_limits(high=high, low=low) self._maybe_pause_protocol() def pause_reading(self): if self._closing: raise RuntimeError("Cannot pause_reading() when closing") if self._paused: raise RuntimeError("Already paused") self._paused = True self._do_pause_reading() def resume_reading(self): if not self._paused: raise RuntimeError("Not paused") self._paused = False if self._closing: return self._do_resume_reading() def getsockopt(self, option): while True: try: ret = self._zmq_sock.getsockopt(option) if option == zmq.LAST_ENDPOINT: ret = ret.decode("utf-8").rstrip("\x00") return ret except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise OSError(exc.errno, exc.strerror) from exc def setsockopt(self, option, value): while True: try: self._zmq_sock.setsockopt(option, value) if option == zmq.SUBSCRIBE: self._subscriptions.add(value) elif option == zmq.UNSUBSCRIBE: self._subscriptions.discard(value) return except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise OSError(exc.errno, exc.strerror) from exc def get_write_buffer_size(self): return self._buffer_size def bind(self, endpoint): fut = asyncio.Future(loop=self._loop) try: if not isinstance(endpoint, str): raise TypeError("endpoint should be str, got {!r}".format(endpoint)) try: self._zmq_sock.bind(endpoint) real_endpoint = self.getsockopt(zmq.LAST_ENDPOINT) except zmq.ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc except Exception as exc: fut.set_exception(exc) else: self._bindings.add(real_endpoint) fut.set_result(real_endpoint) return fut def unbind(self, endpoint): fut = asyncio.Future(loop=self._loop) try: if not isinstance(endpoint, str): raise TypeError("endpoint should be str, got {!r}".format(endpoint)) try: self._zmq_sock.unbind(endpoint) except zmq.ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc else: self._bindings.discard(endpoint) except Exception as exc: fut.set_exception(exc) else: fut.set_result(None) return fut def bindings(self): return _EndpointsSet(self._bindings) def connect(self, endpoint): fut = asyncio.Future(loop=self._loop) try: if not isinstance(endpoint, str): raise TypeError("endpoint should be str, got {!r}".format(endpoint)) try: self._zmq_sock.connect(endpoint) except zmq.ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc except Exception as exc: fut.set_exception(exc) else: self._connections.add(endpoint) fut.set_result(endpoint) return fut def disconnect(self, endpoint): fut = asyncio.Future(loop=self._loop) try: if not isinstance(endpoint, str): raise TypeError("endpoint should be str, got {!r}".format(endpoint)) try: self._zmq_sock.disconnect(endpoint) except zmq.ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc except Exception as exc: fut.set_exception(exc) else: self._connections.discard(endpoint) fut.set_result(None) return fut def connections(self): return _EndpointsSet(self._connections) def subscribe(self, value): if self._zmq_type != zmq.SUB: raise NotImplementedError("Not supported ZMQ socket type") if not isinstance(value, bytes): raise TypeError("value argument should be bytes") if value in self._subscriptions: return self.setsockopt(zmq.SUBSCRIBE, value) def unsubscribe(self, value): if self._zmq_type != zmq.SUB: raise NotImplementedError("Not supported ZMQ socket type") if not isinstance(value, bytes): raise TypeError("value argument should be bytes") self.setsockopt(zmq.UNSUBSCRIBE, value) def subscriptions(self): if self._zmq_type != zmq.SUB: raise NotImplementedError("Not supported ZMQ socket type") return _EndpointsSet(self._subscriptions) async def enable_monitor(self, events=None): # The standard approach of binding and then connecting does not # work in this specific case. The event loop does not properly # detect messages on the inproc transport which means that event # messages get missed. # pyzmq's 'get_monitor_socket' method can't be used because this # performs the actions in the wrong order for use with an event # loop. # For more information on this issue see: # http://lists.zeromq.org/pipermail/zeromq-dev/2015-July/029181.html if zmq.zmq_version_info() < (4,) or zmq.pyzmq_version_info() < (14, 4): raise NotImplementedError( "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4, " "have libzmq:{}, pyzmq:{}".format( zmq.zmq_version(), zmq.pyzmq_version() ) ) if self._monitor is None: addr = "inproc://monitor.s-{}".format(self._zmq_sock.FD) events = events or zmq.EVENT_ALL _, self._monitor = await create_zmq_connection( lambda: _ZmqEventProtocol(self._loop, self._protocol), zmq.PAIR, connect=addr, loop=self._loop, ) # bind must come after connect self._zmq_sock.monitor(addr, events) await self._monitor.wait_ready async def disable_monitor(self): self._disable_monitor() def _disable_monitor(self): if self._monitor: self._zmq_sock.disable_monitor() self._monitor.transport.close() self._monitor = None class _ZmqTransportImpl(_BaseTransport): def __init__(self, loop, zmq_type, zmq_sock, protocol, waiter=None): super().__init__(loop, zmq_type, zmq_sock, protocol) self._loop.add_reader(self._zmq_sock, self._read_ready) self._loop.call_soon(self._protocol.connection_made, self) if waiter is not None: self._loop.call_soon(waiter.set_result, None) def _read_ready(self): try: try: data = self._zmq_sock.recv_multipart(zmq.NOBLOCK) except zmq.ZMQError as exc: if exc.errno in (errno.EAGAIN, errno.EINTR): return else: raise OSError(exc.errno, exc.strerror) from exc except Exception as exc: self._fatal_error(exc, "Fatal read error on zmq socket transport") else: self._protocol.msg_received(data) def _do_send(self, data): try: self._zmq_sock.send_multipart(data, zmq.DONTWAIT) return True except zmq.ZMQError as exc: if exc.errno in (errno.EAGAIN, errno.EINTR): self._loop.add_writer(self._zmq_sock, self._write_ready) return False else: raise OSError(exc.errno, exc.strerror) from exc def _write_ready(self): assert self._buffer, "Data should not be empty" try: try: self._zmq_sock.send_multipart(self._buffer[0][1], zmq.DONTWAIT) except zmq.ZMQError as exc: if exc.errno in (errno.EAGAIN, errno.EINTR): return else: raise OSError(exc.errno, exc.strerror) from exc except Exception as exc: self._fatal_error(exc, "Fatal write error on zmq socket transport") else: sent_len, sent_data = self._buffer.popleft() self._buffer_size -= sent_len self._maybe_resume_protocol() if not self._buffer: self._loop.remove_writer(self._zmq_sock) if self._closing: self._call_connection_lost(None) def close(self): if self._closing: return self._closing = True if self._monitor: self._disable_monitor() if not self._paused: self._loop.remove_reader(self._zmq_sock) if not self._buffer: self._conn_lost += 1 self._loop.call_soon(self._call_connection_lost, None) def _force_close(self, exc): if self._conn_lost: return if self._monitor: self._disable_monitor() if self._buffer: self._buffer.clear() self._buffer_size = 0 self._loop.remove_writer(self._zmq_sock) if not self._closing: self._closing = True if not self._paused: if self._zmq_sock.closed: self._loop._remove_reader(self._zmq_sock) else: self._loop.remove_reader(self._zmq_sock) self._conn_lost += 1 self._loop.call_soon(self._call_connection_lost, exc) def _do_pause_reading(self): self._loop.remove_reader(self._zmq_sock) def _do_resume_reading(self): self._loop.add_reader(self._zmq_sock, self._read_ready) class _ZmqLooplessTransportImpl(_BaseTransport): def __init__(self, loop, zmq_type, zmq_sock, protocol, waiter): super().__init__(loop, zmq_type, zmq_sock, protocol) fd = zmq_sock.getsockopt(zmq.FD) self._fd = fd self._loop.add_reader(fd, self._read_ready) self._loop.call_soon(self._protocol.connection_made, self) self._loop.call_soon(waiter.set_result, None) self._soon_call = None def _read_ready(self): self._soon_call = None if self._zmq_sock is None: return events = self._zmq_sock.getsockopt(zmq.EVENTS) try_again = False if not self._paused and events & zmq.POLLIN: self._do_read() try_again = True if self._buffer and events & zmq.POLLOUT: self._do_write() if not try_again: try_again = bool(self._buffer) if try_again: postevents = self._zmq_sock.getsockopt(zmq.EVENTS) if postevents & zmq.POLLIN: schedule = True elif self._buffer and postevents & zmq.POLLOUT: schedule = True else: schedule = False if schedule: self._soon_call = self._loop.call_soon(self._read_ready) def _do_read(self): try: try: data = self._zmq_sock.recv_multipart(zmq.NOBLOCK) except zmq.ZMQError as exc: if exc.errno in (errno.EAGAIN, errno.EINTR): return else: raise OSError(exc.errno, exc.strerror) from exc except Exception as exc: self._fatal_error(exc, "Fatal read error on zmq socket transport") else: self._protocol.msg_received(data) def _do_write(self): if not self._buffer: return try: try: self._zmq_sock.send_multipart(self._buffer[0][1], zmq.DONTWAIT) except zmq.ZMQError as exc: if exc.errno in (errno.EAGAIN, errno.EINTR): if self._soon_call is None: self._soon_call = self._loop.call_soon(self._read_ready) return else: raise OSError(exc.errno, exc.strerror) from exc except Exception as exc: self._fatal_error(exc, "Fatal write error on zmq socket transport") else: sent_len, sent_data = self._buffer.popleft() self._buffer_size -= sent_len self._maybe_resume_protocol() if not self._buffer and self._closing: self._loop.remove_reader(self._fd) self._call_connection_lost(None) else: if self._soon_call is None: self._soon_call = self._loop.call_soon(self._read_ready) def _do_send(self, data): try: self._zmq_sock.send_multipart(data, zmq.DONTWAIT) if self._soon_call is None: self._soon_call = self._loop.call_soon(self._read_ready) return True except zmq.ZMQError as exc: if exc.errno not in (errno.EAGAIN, errno.EINTR): raise OSError(exc.errno, exc.strerror) from exc else: if self._soon_call is None: self._soon_call = self._loop.call_soon(self._read_ready) return False def close(self): if self._closing: return self._closing = True if self._monitor: self._disable_monitor() if not self._buffer: self._conn_lost += 1 if not self._paused: self._loop.remove_reader(self._fd) self._loop.call_soon(self._call_connection_lost, None) def _force_close(self, exc): if self._conn_lost: return if self._monitor: self._disable_monitor() if self._buffer: self._buffer.clear() self._buffer_size = 0 self._closing = True self._loop.remove_reader(self._fd) self._conn_lost += 1 self._loop.call_soon(self._call_connection_lost, exc) def _do_pause_reading(self): pass def _do_resume_reading(self): self._read_ready() def _call_connection_lost(self, exc): try: super()._call_connection_lost(exc) finally: self._soon_call = None class ZmqEventLoopPolicy(asyncio.AbstractEventLoopPolicy): """ZeroMQ policy implementation for accessing the event loop. In this policy, each thread has its own event loop. However, we only automatically create an event loop by default for the main thread; other threads by default have no event loop. """ class _Local(threading.local): _loop = None _set_called = False def __init__(self): self._local = self._Local() self._watcher = None def get_event_loop(self): """Get the event loop. If current thread is the main thread and there are no registered event loop for current thread then the call creates new event loop and registers it. Return an instance of ZmqEventLoop. Raise RuntimeError if there is no registered event loop for current thread. """ if ( self._local._loop is None and not self._local._set_called and isinstance(threading.current_thread(), threading._MainThread) ): self.set_event_loop(self.new_event_loop()) assert self._local._loop is not None, ( "There is no current event loop in thread %r." % threading.current_thread().name ) return self._local._loop def new_event_loop(self): """Create a new event loop. You must call set_event_loop() to make this the current event loop. """ return ZmqEventLoop() def set_event_loop(self, loop): """Set the event loop. As a side effect, if a child watcher was set before, then calling .set_event_loop() from the main thread will call .attach_loop(loop) on the child watcher. """ self._local._set_called = True assert loop is None or isinstance( loop, asyncio.AbstractEventLoop ), "loop should be None or AbstractEventLoop instance" self._local._loop = loop if self._watcher is not None and isinstance( threading.current_thread(), threading._MainThread ): self._watcher.attach_loop(loop) if sys.platform != "win32": def _init_watcher(self): with asyncio.events._lock: if self._watcher is None: # pragma: no branch self._watcher = SafeChildWatcher() if isinstance(threading.current_thread(), threading._MainThread): self._watcher.attach_loop(self._local._loop) def get_child_watcher(self): """Get the child watcher. If not yet set, a SafeChildWatcher object is automatically created. """ if self._watcher is None: self._init_watcher() return self._watcher def set_child_watcher(self, watcher): """Set the child watcher.""" assert watcher is None or isinstance( watcher, asyncio.AbstractChildWatcher ), "watcher should be None or AbstractChildWatcher instance" if self._watcher is not None: self._watcher.close() self._watcher = watcher ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/interface.py0000644000076600000240000001726100000000000015611 0ustar00jellestafffrom asyncio import BaseProtocol, BaseTransport __all__ = ["ZmqTransport", "ZmqProtocol"] class ZmqTransport(BaseTransport): """Interface for ZeroMQ transport.""" def write(self, data): """Write message to the transport. data is iterable to send as multipart message. This does not block; it buffers the data and arranges for it to be sent out asynchronously. """ raise NotImplementedError def abort(self): """Close the transport immediately. Buffered data will be lost. No more data will be received. The protocol's connection_lost() method will (eventually) be called with None as its argument. """ raise NotImplementedError def getsockopt(self, option): """Get ZeroMQ socket option. option is a constant like zmq.SUBSCRIBE, zmq.UNSUBSCRIBE, zmq.TYPE etc. For list of available options please see: http://api.zeromq.org/master:zmq-getsockopt """ raise NotImplementedError def setsockopt(self, option, value): """Set ZeroMQ socket option. option is a constant like zmq.SUBSCRIBE, zmq.UNSUBSCRIBE, zmq.TYPE etc. value is a new option value, it's type depend of option name. For list of available options please see: http://api.zeromq.org/master:zmq-setsockopt """ raise NotImplementedError def set_write_buffer_limits(self, high=None, low=None): """Set the high- and low-water limits for write flow control. These two values control when to call the protocol's pause_writing() and resume_writing() methods. If specified, the low-water limit must be less than or equal to the high-water limit. Neither value can be negative. The defaults are implementation-specific. If only the high-water limit is given, the low-water limit defaults to a implementation-specific value less than or equal to the high-water limit. Setting high to zero forces low to zero as well, and causes pause_writing() to be called whenever the buffer becomes non-empty. Setting low to zero causes resume_writing() to be called only once the buffer is empty. Use of zero for either limit is generally sub-optimal as it reduces opportunities for doing I/O and computation concurrently. """ raise NotImplementedError def get_write_buffer_limits(self): raise NotImplementedError def get_write_buffer_size(self): """Return the current size of the write buffer.""" raise NotImplementedError def pause_reading(self): """Pause the receiving end. No data will be passed to the protocol's msg_received() method until resume_reading() is called. """ raise NotImplementedError def resume_reading(self): """Resume the receiving end. Data received will once again be passed to the protocol's msg_received() method. """ raise NotImplementedError def bind(self, endpoint): """Bind transpot to endpoint. endpoint is a string in format transport://address as ZeroMQ requires. Return bound endpoint, unwinding wildcards if needed. """ raise NotImplementedError def unbind(self, endpoint): """Unbind transpot from endpoint.""" raise NotImplementedError def bindings(self): """Return immutable set of endpoints bound to transport. N.B. returned endpoints includes only ones that has been bound via transport.bind or event_loop.create_zmq_connection calls and does not includes bindings that has been done to zmq_sock before create_zmq_connection has been called. """ raise NotImplementedError def connect(self, endpoint): """Connect transport to endpoint. endpoint is a string in format transport://address as ZeroMQ requires. For TCP connections endpoint should specify IPv4 or IPv6 address, not DNS name. Use await get_event_loop().getaddrinfo(host, port) for translating DNS into address. Raise ValueError if endpoint is tcp DNS address. Return bound connection, unwinding wildcards if needed. """ raise NotImplementedError def disconnect(self, endpoint): """Disconnect transpot from endpoint.""" raise NotImplementedError def connections(self): """Return immutable set of endpoints connected to transport. N.B. returned endpoints includes only ones that has been connected via transport.connect or event_loop.create_zmq_connection calls and does not includes connections that has been done to zmq_sock before create_zmq_connection has been called. """ raise NotImplementedError def subscribe(self, value): """Establish a new message filter on SUB transport. Newly created SUB transports filters out all incoming messages, therefore you should to call this method to establish an initial message filter. Value should be bytes. An empty (b'') value subscribes to all incoming messages. A non-empty value subscribes to all messages beginning with the specified prefix. Multiple filters may be attached to a single SUB transport, in which case a message shall be accepted if it matches at least one filter. """ raise NotImplementedError def unsubscribe(self, value): """Remove an existing message filter on a SUB transport. Value should be bytes. The filter specified must match an existing filter previously established with the .subscribe(). If the transport has several instances of the same filter attached the .unsubscribe() removes only one instance, leaving the rest in place and functional. """ raise NotImplementedError def subscriptions(self): """Return immutable set of subscriptions (bytes) subscribed on transport. N.B. returned subscriptions includes only ones that has been subscribed via transport.subscribe call and does not includes subscribtions that has been done to zmq_sock before create_zmq_connection has been called. """ raise NotImplementedError async def enable_monitor(self, events=None): """Enables socket events to be reported for this socket. Socket events are passed to the protocol's ZmqProtocol's event_received method. This method is a coroutine. The socket event monitor capability requires libzmq >= 4 and pyzmq >= 14.4. events is a bitmask (e.g zmq.EVENT_CONNECTED) defining the events to monitor. Default is all events (i.e. zmq.EVENT_ALL). For list of available events please see: http://api.zeromq.org/4-0:zmq-socket-monitor Raise NotImplementedError if libzmq or pyzmq versions do not support socket monitoring. """ raise NotImplementedError def disable_monitor(self): """Stop the socket event monitor.""" raise NotImplementedError class ZmqProtocol(BaseProtocol): """Interface for ZeroMQ protocol.""" def msg_received(self, data): """Called when some ZeroMQ message is received. data is the multipart tuple of bytes with at least one item. """ def event_received(self, event): """Called when a ZeroMQ socket event is received. This method is only called when a socket monitor is enabled. :param event: A namedtuple containing 3 items `event`, `value`, and `endpoint`. """ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/log.py0000644000076600000240000000007100000000000014421 0ustar00jellestaffimport logging logger = logging.getLogger(__package__) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3424332 aiozmq-1.0.0/aiozmq/rpc/0000755000076600000240000000000000000000000014054 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/rpc/__init__.py0000644000076600000240000000221200000000000016162 0ustar00jellestaff"""ZeroMQ RPC/Pipeline/PubSub services""" try: from msgpack import version as msgpack_version except ImportError: # pragma: no cover msgpack_version = (0,) from .base import ( method, AbstractHandler, AttrHandler, Error, GenericError, NotFoundError, ParametersError, ServiceClosedError, Service, ) from .rpc import ( connect_rpc, serve_rpc, ) from .pipeline import ( connect_pipeline, serve_pipeline, ) from .pubsub import ( connect_pubsub, serve_pubsub, ) from .log import logger _MSGPACK_VERSION = (0, 4, 0) _MSGPACK_VERSION_STR = ".".join(map(str, _MSGPACK_VERSION)) if msgpack_version < _MSGPACK_VERSION: # pragma: no cover raise ImportError( "aiozmq.rpc requires msgpack package" " (version >= {})".format(_MSGPACK_VERSION_STR) ) __all__ = [ "method", "connect_rpc", "serve_rpc", "connect_pipeline", "serve_pipeline", "connect_pubsub", "serve_pubsub", "logger", "Error", "GenericError", "NotFoundError", "ParametersError", "AbstractHandler", "ServiceClosedError", "AttrHandler", "Service", ] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/aiozmq/rpc/base.py0000644000076600000240000001507300000000000015346 0ustar00jellestaffimport abc import asyncio import inspect import pprint import textwrap from types import MethodType from .log import logger from .packer import _Packer from aiozmq import interface if hasattr(asyncio, "ensure_future"): ensure_future = asyncio.ensure_future else: # Deprecated since Python version 3.4.4. ensure_future = getattr(asyncio, "async") class Error(Exception): """Base RPC exception""" class GenericError(Error): """Error for all untranslated exceptions from rpc method calls.""" def __init__(self, exc_type, args, exc_repr): super().__init__(exc_type, args, exc_repr) self.exc_type = exc_type self.arguments = args self.exc_repr = exc_repr def __repr__(self): return "".format( self.exc_type, self.arguments, self.exc_repr ) class NotFoundError(Error, LookupError): """Error raised by server if RPC namespace/method lookup failed.""" class ParametersError(Error, ValueError): """Error raised by server when RPC method's parameters could not be validated against the signature.""" class ServiceClosedError(Error): """RPC Service is closed.""" class AbstractHandler(metaclass=abc.ABCMeta): """Abstract class for server-side RPC handlers.""" __slots__ = () @abc.abstractmethod def __getitem__(self, key): raise KeyError @classmethod def __subclasshook__(cls, C): if issubclass(C, (str, bytes)): return False if cls is AbstractHandler: if any("__getitem__" in B.__dict__ for B in C.__mro__): return True return NotImplemented class AttrHandler(AbstractHandler): """Base class for RPC handlers via attribute lookup.""" def __getitem__(self, key): try: return getattr(self, key) except AttributeError: raise KeyError def method(func): """Marks a decorated function as an RPC endpoint handler.""" func.__rpc__ = {} func.__signature__ = inspect.signature(func) return func class Service(asyncio.AbstractServer): """RPC service. Instances of Service (or descendants) are returned by coroutines that creates clients or servers. Implementation of AbstractServer. """ def __init__(self, loop, proto): self._loop = loop self._proto = proto @property def transport(self): """Return the transport. You can use the transport to dynamically bind/unbind, connect/disconnect etc. """ transport = self._proto.transport if transport is None: raise ServiceClosedError() return transport def close(self): if self._proto.closing: return self._proto.closing = True if self._proto.transport is None: return self._proto.transport.close() async def wait_closed(self): if self._proto.transport is None: return waiter = asyncio.Future(loop=self._loop) self._proto.done_waiters.append(waiter) await waiter class _BaseProtocol(interface.ZmqProtocol): def __init__(self, loop, *, translation_table=None): self.loop = loop self.transport = None self.done_waiters = [] self.packer = _Packer(translation_table=translation_table) self.pending_waiters = set() self.closing = False def connection_made(self, transport): self.transport = transport def connection_lost(self, exc): self.transport = None for waiter in self.done_waiters: waiter.set_result(None) class _BaseServerProtocol(_BaseProtocol): def __init__( self, loop, handler, *, translation_table=None, log_exceptions=False, exclude_log_exceptions=(), timeout=None ): super().__init__(loop, translation_table=translation_table) if not isinstance(handler, AbstractHandler): raise TypeError("handler must implement AbstractHandler") self.handler = handler self.log_exceptions = log_exceptions self.exclude_log_exceptions = exclude_log_exceptions self.timeout = timeout def connection_lost(self, exc): super().connection_lost(exc) for waiter in list(self.pending_waiters): if not waiter.cancelled(): waiter.cancel() def dispatch(self, name): if not name: raise NotFoundError(name) namespaces, sep, method = name.rpartition(".") handler = self.handler if namespaces: for part in namespaces.split("."): try: handler = handler[part] except KeyError: raise NotFoundError(name) else: if not isinstance(handler, AbstractHandler): raise NotFoundError(name) try: func = handler[method] except KeyError: raise NotFoundError(name) else: if isinstance(func, MethodType): holder = func.__func__ else: holder = func if not hasattr(holder, "__rpc__"): raise NotFoundError(name) return func def check_args(self, func, args, kwargs): """Utility function for validating function arguments Returns validated (args, kwargs) tuple """ try: sig = inspect.signature(func) bargs = sig.bind(*args, **kwargs) except TypeError as exc: raise ParametersError(repr(exc)) from exc else: return bargs.args, bargs.kwargs def try_log(self, fut, name, args, kwargs): try: fut.result() except Exception as exc: if self.log_exceptions: for e in self.exclude_log_exceptions: if isinstance(exc, e): return logger.exception( textwrap.dedent( """\ An exception %r from method %r call occurred. args = %s kwargs = %s """ ), exc, name, pprint.pformat(args), pprint.pformat(kwargs), ) # noqa def add_pending(self, coro): fut = ensure_future(coro) self.pending_waiters.add(fut) return fut def discard_pending(self, fut): self.pending_waiters.discard(fut) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/rpc/log.py0000644000076600000240000000012600000000000015206 0ustar00jellestaff"""Logging configuration.""" import logging logger = logging.getLogger(__package__) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/rpc/packer.py0000644000076600000240000000504000000000000015672 0ustar00jellestaff"""Private utility functions.""" from collections import ChainMap from datetime import datetime, date, time, timedelta, tzinfo from functools import partial from pickle import dumps, loads, HIGHEST_PROTOCOL from msgpack import ExtType, packb, unpackb _default = { 127: (date, partial(dumps, protocol=HIGHEST_PROTOCOL), loads), 126: (datetime, partial(dumps, protocol=HIGHEST_PROTOCOL), loads), 125: (time, partial(dumps, protocol=HIGHEST_PROTOCOL), loads), 124: (timedelta, partial(dumps, protocol=HIGHEST_PROTOCOL), loads), 123: (tzinfo, partial(dumps, protocol=HIGHEST_PROTOCOL), loads), } class _Packer: def __init__(self, *, translation_table=None): if translation_table is None: translation_table = _default else: translation_table = ChainMap(translation_table, _default) self.translation_table = translation_table self._pack_cache = {} self._unpack_cache = {} for code in sorted(self.translation_table): cls, packer, unpacker = self.translation_table[code] self._pack_cache[cls] = (code, packer) self._unpack_cache[code] = unpacker def packb(self, data): return packb(data, use_bin_type=True, default=self.ext_type_pack_hook) def unpackb(self, packed): return unpackb( packed, use_list=False, raw=False, ext_hook=self.ext_type_unpack_hook ) def ext_type_pack_hook(self, obj, _sentinel=object()): obj_class = obj.__class__ hit = self._pack_cache.get(obj_class, _sentinel) if hit is None: # packer has been not found by previous long-lookup raise TypeError("Unknown type: {!r}".format(obj)) elif hit is _sentinel: # do long-lookup for code in sorted(self.translation_table): cls, packer, unpacker = self.translation_table[code] if isinstance(obj, cls): self._pack_cache[obj_class] = (code, packer) self._unpack_cache[code] = unpacker return ExtType(code, packer(obj)) else: self._pack_cache[obj_class] = None raise TypeError("Unknown type: {!r}".format(obj)) else: # do shortcut code, packer = hit return ExtType(code, packer(obj)) def ext_type_unpack_hook(self, code, data): try: unpacker = self._unpack_cache[code] return unpacker(data) except KeyError: return ExtType(code, data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/aiozmq/rpc/pipeline.py0000644000076600000240000001150000000000000016230 0ustar00jellestaffimport asyncio from functools import partial import zmq from aiozmq import create_zmq_connection from .base import ( NotFoundError, ParametersError, Service, ServiceClosedError, _BaseProtocol, _BaseServerProtocol, ) from .log import logger from .util import ( _MethodCall, ) async def connect_pipeline( *, connect=None, bind=None, loop=None, translation_table=None ): """A coroutine that creates and connects/binds Pipeline client instance. Usually for this function you need to use *connect* parameter, but ZeroMQ does not forbid to use *bind*. translation_table -- an optional table for custom value translators. loop -- an optional parameter to point ZmqEventLoop instance. If loop is None then default event loop will be given by asyncio.get_event_loop() call. Returns PipelineClient instance. """ if loop is None: loop = asyncio.get_event_loop() transp, proto = await create_zmq_connection( lambda: _ClientProtocol(loop, translation_table=translation_table), zmq.PUSH, connect=connect, bind=bind, loop=loop, ) return PipelineClient(loop, proto) async def serve_pipeline( handler, *, connect=None, bind=None, loop=None, translation_table=None, log_exceptions=False, exclude_log_exceptions=(), timeout=None ): """A coroutine that creates and connects/binds Pipeline server instance. Usually for this function you need to use *bind* parameter, but ZeroMQ does not forbid to use *connect*. handler -- an object which processes incoming pipeline calls. Usually you like to pass AttrHandler instance. log_exceptions -- log exceptions from remote calls if True. translation_table -- an optional table for custom value translators. exclude_log_exceptions -- sequence of exception classes than should not be logged. timeout -- timeout for performing handling of async server calls. loop -- an optional parameter to point ZmqEventLoop instance. If loop is None then default event loop will be given by asyncio.get_event_loop() call. Returns Service instance. """ if loop is None: loop = asyncio.get_event_loop() trans, proto = await create_zmq_connection( lambda: _ServerProtocol( loop, handler, translation_table=translation_table, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions, timeout=timeout, ), zmq.PULL, connect=connect, bind=bind, loop=loop, ) return Service(loop, proto) class _ClientProtocol(_BaseProtocol): def call(self, name, args, kwargs): if self.transport is None: raise ServiceClosedError() bname = name.encode("utf-8") bargs = self.packer.packb(args) bkwargs = self.packer.packb(kwargs) self.transport.write([bname, bargs, bkwargs]) fut = asyncio.Future() fut.set_result(None) return fut class PipelineClient(Service): def __init__(self, loop, proto): super().__init__(loop, proto) @property def notify(self): """Return object for dynamic Pipeline calls. The usage is: await client.pipeline.ns.func(1, 2) """ return _MethodCall(self._proto) class _ServerProtocol(_BaseServerProtocol): def msg_received(self, data): bname, bargs, bkwargs = data args = self.packer.unpackb(bargs) kwargs = self.packer.unpackb(bkwargs) try: name = bname.decode("utf-8") func = self.dispatch(name) args, kwargs = self.check_args(func, args, kwargs) except (NotFoundError, ParametersError) as exc: fut = asyncio.Future() fut.set_exception(exc) else: if asyncio.iscoroutinefunction(func): fut = self.add_pending(func(*args, **kwargs)) else: fut = asyncio.Future() try: fut.set_result(func(*args, **kwargs)) except Exception as exc: fut.set_exception(exc) fut.add_done_callback( partial(self.process_call_result, name=name, args=args, kwargs=kwargs) ) def process_call_result(self, fut, *, name, args, kwargs): self.discard_pending(fut) try: if fut.result() is not None: logger.warning("Pipeline handler %r returned not None", name) except (NotFoundError, ParametersError) as exc: logger.exception("Call to %r caused error: %r", name, exc) except asyncio.CancelledError: return except Exception: self.try_log(fut, name, args, kwargs) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/aiozmq/rpc/pubsub.py0000644000076600000240000001641600000000000015736 0ustar00jellestaffimport asyncio from collections.abc import Iterable from functools import partial import zmq from aiozmq import create_zmq_connection from .base import ( NotFoundError, ParametersError, Service, ServiceClosedError, _BaseProtocol, _BaseServerProtocol, ) from .log import logger async def connect_pubsub(*, connect=None, bind=None, loop=None, translation_table=None): """A coroutine that creates and connects/binds pubsub client. Usually for this function you need to use connect parameter, but ZeroMQ does not forbid to use bind. translation_table -- an optional table for custom value translators. loop -- an optional parameter to point ZmqEventLoop. If loop is None then default event loop will be given by asyncio.get_event_loop() call. Returns PubSubClient instance. """ if loop is None: loop = asyncio.get_event_loop() transp, proto = await create_zmq_connection( lambda: _ClientProtocol(loop, translation_table=translation_table), zmq.PUB, connect=connect, bind=bind, loop=loop, ) return PubSubClient(loop, proto) async def serve_pubsub( handler, *, subscribe=None, connect=None, bind=None, loop=None, translation_table=None, log_exceptions=False, exclude_log_exceptions=(), timeout=None ): """A coroutine that creates and connects/binds pubsub server instance. Usually for this function you need to use *bind* parameter, but ZeroMQ does not forbid to use *connect*. handler -- an object which processes incoming pipeline calls. Usually you like to pass AttrHandler instance. log_exceptions -- log exceptions from remote calls if True. subscribe -- subscription specification. Subscribe server to topics. Allowed parameters are str, bytes, iterable of str or bytes. translation_table -- an optional table for custom value translators. exclude_log_exceptions -- sequence of exception classes than should not be logged. timeout -- timeout for performing handling of async server calls. loop -- an optional parameter to point ZmqEventLoop. If loop is None then default event loop will be given by asyncio.get_event_loop() call. Returns PubSubService instance. Raises OSError on system error. Raises TypeError if arguments have inappropriate type. """ if loop is None: loop = asyncio.get_event_loop() transp, proto = await create_zmq_connection( lambda: _ServerProtocol( loop, handler, translation_table=translation_table, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions, timeout=timeout, ), zmq.SUB, connect=connect, bind=bind, loop=loop, ) serv = PubSubService(loop, proto) if subscribe is not None: if isinstance(subscribe, (str, bytes)): subscribe = [subscribe] else: if not isinstance(subscribe, Iterable): raise TypeError("bind should be str, bytes or iterable") for topic in subscribe: serv.subscribe(topic) return serv class _ClientProtocol(_BaseProtocol): def call(self, topic, name, args, kwargs): if self.transport is None: raise ServiceClosedError() if topic is None: btopic = b"" elif isinstance(topic, str): btopic = topic.encode("utf-8") elif isinstance(topic, bytes): btopic = topic else: raise TypeError( "topic argument should be None, str or bytes " "({!r})".format(topic) ) bname = name.encode("utf-8") bargs = self.packer.packb(args) bkwargs = self.packer.packb(kwargs) self.transport.write([btopic, bname, bargs, bkwargs]) fut = asyncio.Future() fut.set_result(None) return fut class PubSubClient(Service): def __init__(self, loop, proto): super().__init__(loop, proto) def publish(self, topic): """Return object for dynamic PubSub calls. The usage is: await client.publish('my_topic').ns.func(1, 2) topic argument may be None otherwise must be isntance of str or bytes """ return _MethodCall(self._proto, topic) class PubSubService(Service): def subscribe(self, topic): """Subscribe to the topic. topic argument must be str or bytes. Raises TypeError in other cases """ if isinstance(topic, bytes): btopic = topic elif isinstance(topic, str): btopic = topic.encode("utf-8") else: raise TypeError("topic should be str or bytes, got {!r}".format(topic)) self.transport.subscribe(btopic) def unsubscribe(self, topic): """Unsubscribe from the topic. topic argument must be str or bytes. Raises TypeError in other cases """ if isinstance(topic, bytes): btopic = topic elif isinstance(topic, str): btopic = topic.encode("utf-8") else: raise TypeError("topic should be str or bytes, got {!r}".format(topic)) self.transport.unsubscribe(btopic) class _MethodCall: __slots__ = ("_proto", "_topic", "_names") def __init__(self, proto, topic, names=()): self._proto = proto self._topic = topic self._names = names def __getattr__(self, name): return self.__class__(self._proto, self._topic, self._names + (name,)) def __call__(self, *args, **kwargs): if not self._names: raise ValueError("PubSub method name is empty") return self._proto.call(self._topic, ".".join(self._names), args, kwargs) class _ServerProtocol(_BaseServerProtocol): def msg_received(self, data): btopic, bname, bargs, bkwargs = data args = self.packer.unpackb(bargs) kwargs = self.packer.unpackb(bkwargs) try: name = bname.decode("utf-8") func = self.dispatch(name) args, kwargs = self.check_args(func, args, kwargs) except (NotFoundError, ParametersError) as exc: fut = asyncio.Future() fut.set_exception(exc) else: if asyncio.iscoroutinefunction(func): fut = self.add_pending(func(*args, **kwargs)) else: fut = asyncio.Future() try: fut.set_result(func(*args, **kwargs)) except Exception as exc: fut.set_exception(exc) fut.add_done_callback( partial(self.process_call_result, name=name, args=args, kwargs=kwargs) ) def process_call_result(self, fut, *, name, args, kwargs): self.discard_pending(fut) try: if fut.result() is not None: logger.warning("PubSub handler %r returned not None", name) except asyncio.CancelledError: return except (NotFoundError, ParametersError) as exc: logger.exception("Call to %r caused error: %r", name, exc) except Exception: self.try_log(fut, name, args, kwargs) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/aiozmq/rpc/rpc.py0000644000076600000240000002370000000000000015214 0ustar00jellestaff"""ZeroMQ RPC""" import asyncio import os import random import struct import sys import time from collections import ChainMap from functools import partial import zmq from aiozmq import create_zmq_connection from .base import ( GenericError, NotFoundError, ParametersError, Service, ServiceClosedError, _BaseProtocol, _BaseServerProtocol, ) from .log import logger from .util import ( _MethodCall, _fill_error_table, ) __all__ = [ "connect_rpc", "serve_rpc", ] async def connect_rpc( *, connect=None, bind=None, loop=None, error_table=None, translation_table=None, timeout=None ): """A coroutine that creates and connects/binds RPC client. Usually for this function you need to use *connect* parameter, but ZeroMQ does not forbid to use *bind*. error_table -- an optional table for custom exception translators. timeout -- an optional timeout for RPC calls. If timeout is not None and remote call takes longer than timeout seconds then asyncio.TimeoutError will be raised at client side. If the server will return an answer after timeout has been raised that answer **is ignored**. translation_table -- an optional table for custom value translators. loop -- an optional parameter to point ZmqEventLoop instance. If loop is None then default event loop will be given by asyncio.get_event_loop call. Returns a RPCClient instance. """ if loop is None: loop = asyncio.get_event_loop() transp, proto = await create_zmq_connection( lambda: _ClientProtocol( loop, error_table=error_table, translation_table=translation_table ), zmq.DEALER, connect=connect, bind=bind, loop=loop, ) return RPCClient(loop, proto, timeout=timeout) async def serve_rpc( handler, *, connect=None, bind=None, loop=None, translation_table=None, log_exceptions=False, exclude_log_exceptions=(), timeout=None ): """A coroutine that creates and connects/binds RPC server instance. Usually for this function you need to use *bind* parameter, but ZeroMQ does not forbid to use *connect*. handler -- an object which processes incoming RPC calls. Usually you like to pass AttrHandler instance. log_exceptions -- log exceptions from remote calls if True. exclude_log_exceptions -- sequence of exception classes than should not be logged. translation_table -- an optional table for custom value translators. timeout -- timeout for performing handling of async server calls. loop -- an optional parameter to point ZmqEventLoop instance. If loop is None then default event loop will be given by asyncio.get_event_loop call. Returns Service instance. """ if loop is None: loop = asyncio.get_event_loop() transp, proto = await create_zmq_connection( lambda: _ServerProtocol( loop, handler, translation_table=translation_table, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions, timeout=timeout, ), zmq.ROUTER, connect=connect, bind=bind, loop=loop, ) return Service(loop, proto) _default_error_table = _fill_error_table() class _ClientProtocol(_BaseProtocol): """Client protocol implementation.""" REQ_PREFIX = struct.Struct("=HH") REQ_SUFFIX = struct.Struct("=Ld") RESP = struct.Struct("=HHLd?") def __init__(self, loop, *, error_table=None, translation_table=None): super().__init__(loop, translation_table=translation_table) self.calls = {} self.prefix = self.REQ_PREFIX.pack( os.getpid() % 0x10000, random.randrange(0x10000) ) self.counter = 0 if error_table is None: self.error_table = _default_error_table else: self.error_table = ChainMap(error_table, _default_error_table) def msg_received(self, data): try: header, banswer = data pid, rnd, req_id, timestamp, is_error = self.RESP.unpack(header) answer = self.packer.unpackb(banswer) except Exception: logger.critical("Cannot unpack %r", data, exc_info=sys.exc_info()) return call = self.calls.pop(req_id, None) if call is None: logger.critical( "Unknown answer id: %d (%d %d %f %d) -> %s", req_id, pid, rnd, timestamp, is_error, answer, ) elif call.cancelled(): logger.debug( "The future for request #%08x has been cancelled, " "skip the received result.", req_id, ) else: if is_error: call.set_exception(self._translate_error(*answer)) else: call.set_result(answer) def connection_lost(self, exc): super().connection_lost(exc) for call in self.calls.values(): if not call.cancelled(): call.cancel() def _translate_error(self, exc_type, exc_args, exc_repr): found = self.error_table.get(exc_type) if found is None: return GenericError(exc_type, exc_args, exc_repr) else: return found(*exc_args) def _new_id(self): self.counter += 1 if self.counter > 0xFFFFFFFF: self.counter = 0 return ( self.prefix + self.REQ_SUFFIX.pack(self.counter, time.time()), self.counter, ) def call(self, name, args, kwargs): if self.transport is None: raise ServiceClosedError() bname = name.encode("utf-8") bargs = self.packer.packb(args) bkwargs = self.packer.packb(kwargs) header, req_id = self._new_id() assert req_id not in self.calls, (req_id, self.calls) fut = asyncio.Future() self.calls[req_id] = fut self.transport.write([header, bname, bargs, bkwargs]) return fut class RPCClient(Service): def __init__(self, loop, proto, *, timeout): super().__init__(loop, proto) self._timeout = timeout @property def call(self): """Return object for dynamic RPC calls. The usage is: ret = await client.call.ns.func(1, 2) """ return _MethodCall(self._proto, timeout=self._timeout) def with_timeout(self, timeout): """Return a new RPCClient instance with overriden timeout""" return self.__class__(self._loop, self._proto, timeout=timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): return class _ServerProtocol(_BaseServerProtocol): REQ = struct.Struct("=HHLd") RESP_PREFIX = struct.Struct("=HH") RESP_SUFFIX = struct.Struct("=Ld?") def __init__( self, loop, handler, *, translation_table=None, log_exceptions=False, exclude_log_exceptions=(), timeout=None ): super().__init__( loop, handler, translation_table=translation_table, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions, timeout=timeout, ) self.prefix = self.RESP_PREFIX.pack( os.getpid() % 0x10000, random.randrange(0x10000) ) def msg_received(self, data): try: *pre, header, bname, bargs, bkwargs = data pid, rnd, req_id, timestamp = self.REQ.unpack(header) name = bname.decode("utf-8") args = self.packer.unpackb(bargs) kwargs = self.packer.unpackb(bkwargs) except Exception: logger.critical("Cannot unpack %r", data, exc_info=sys.exc_info()) return try: func = self.dispatch(name) args, kwargs = self.check_args(func, args, kwargs) except (NotFoundError, ParametersError) as exc: fut = asyncio.Future() fut.add_done_callback( partial( self.process_call_result, req_id=req_id, pre=pre, name=name, args=args, kwargs=kwargs, ) ) fut.set_exception(exc) else: if asyncio.iscoroutinefunction(func): fut = self.add_pending(func(*args, **kwargs)) else: fut = asyncio.Future() try: fut.set_result(func(*args, **kwargs)) except Exception as exc: fut.set_exception(exc) fut.add_done_callback( partial( self.process_call_result, req_id=req_id, pre=pre, name=name, args=args, kwargs=kwargs, ) ) def process_call_result(self, fut, *, req_id, pre, name, args, kwargs): self.discard_pending(fut) self.try_log(fut, name, args, kwargs) if self.transport is None: return try: ret = fut.result() prefix = self.prefix + self.RESP_SUFFIX.pack(req_id, time.time(), False) self.transport.write(pre + [prefix, self.packer.packb(ret)]) except asyncio.CancelledError: return except Exception as exc: prefix = self.prefix + self.RESP_SUFFIX.pack(req_id, time.time(), True) exc_type = exc.__class__ exc_info = ( exc_type.__module__ + "." + exc_type.__qualname__, exc.args, repr(exc), ) self.transport.write(pre + [prefix, self.packer.packb(exc_info)]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667226339.0 aiozmq-1.0.0/aiozmq/rpc/util.py0000644000076600000240000000242200000000000015403 0ustar00jellestaffimport asyncio import builtins from .base import NotFoundError, ParametersError class _MethodCall: __slots__ = ("_proto", "_timeout", "_names") def __init__(self, proto, timeout=None, names=()): self._proto = proto self._timeout = timeout self._names = names def __getattr__(self, name): return self.__class__(self._proto, self._timeout, self._names + (name,)) def __call__(self, *args, **kwargs): if not self._names: raise ValueError("RPC method name is empty") fut = self._proto.call(".".join(self._names), args, kwargs) return asyncio.Task(asyncio.wait_for(fut, timeout=self._timeout)) def _fill_error_table(): # Fill error table with standard exceptions error_table = {} for name in dir(builtins): val = getattr(builtins, name) if isinstance(val, type) and issubclass(val, Exception): error_table["builtins." + name] = val for name in dir(asyncio): val = getattr(asyncio, name) if isinstance(val, type) and issubclass(val, Exception): error_table["asyncio." + name] = val error_table["aiozmq.rpc.base.NotFoundError"] = NotFoundError error_table["aiozmq.rpc.base.ParametersError"] = ParametersError return error_table ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/selector.py0000644000076600000240000001415500000000000015470 0ustar00jellestaff"""ZMQ pooler for asyncio.""" import math from collections.abc import Mapping from errno import EINTR from zmq import ( ZMQError, POLLIN, POLLOUT, POLLERR, Socket as ZMQSocket, Poller as ZMQPoller, ) __all__ = ["ZmqSelector"] try: from asyncio.selectors import BaseSelector, SelectorKey, EVENT_READ, EVENT_WRITE except ImportError: # pragma: no cover from selectors import BaseSelector, SelectorKey, EVENT_READ, EVENT_WRITE def _fileobj_to_fd(fileobj): """Return a file descriptor from a file object. Parameters: fileobj -- file object or file descriptor Returns: corresponding file descriptor or zmq.Socket instance Raises: ValueError if the object is invalid """ if isinstance(fileobj, int): fd = fileobj elif isinstance(fileobj, ZMQSocket): return fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError("Invalid file object: " "{!r}".format(fileobj)) from None if fd < 0: raise ValueError("Invalid file descriptor: {}".format(fd)) return fd class _SelectorMapping(Mapping): """Mapping of file objects to selector keys.""" def __init__(self, selector): self._selector = selector def __len__(self): return len(self._selector._fd_to_key) def __getitem__(self, fileobj): try: fd = self._selector._fileobj_lookup(fileobj) return self._selector._fd_to_key[fd] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None def __iter__(self): return iter(self._selector._fd_to_key) class ZmqSelector(BaseSelector): """A selector that can be used with asyncio's selector base event loops.""" def __init__(self): # this maps file descriptors to keys self._fd_to_key = {} # read-only mapping returned by get_map() self._map = _SelectorMapping(self) self._poller = ZMQPoller() def _fileobj_lookup(self, fileobj): """Return a file descriptor from a file object. This wraps _fileobj_to_fd() to do an exhaustive search in case the object is invalid but we still have it in our map. This is used by unregister() so we can unregister an object that was previously registered even if it is closed. It is also used by _SelectorMapping. """ try: return _fileobj_to_fd(fileobj) except ValueError: # Do an exhaustive search. for key in self._fd_to_key.values(): if key.fileobj is fileobj: return key.fd # Raise ValueError after all. raise def register(self, fileobj, events, data=None): if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): raise ValueError("Invalid events: {!r}".format(events)) key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) if key.fd in self._fd_to_key: raise KeyError("{!r} (FD {}) is already registered".format(fileobj, key.fd)) z_events = 0 if events & EVENT_READ: z_events |= POLLIN if events & EVENT_WRITE: z_events |= POLLOUT try: self._poller.register(key.fd, z_events) except ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc self._fd_to_key[key.fd] = key return key def unregister(self, fileobj): try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None try: self._poller.unregister(key.fd) except ZMQError as exc: self._fd_to_key[key.fd] = key raise OSError(exc.errno, exc.strerror) from exc return key def modify(self, fileobj, events, data=None): try: fd = self._fileobj_lookup(fileobj) key = self._fd_to_key[fd] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None if data == key.data and events == key.events: return key if events != key.events: z_events = 0 if events & EVENT_READ: z_events |= POLLIN if events & EVENT_WRITE: z_events |= POLLOUT try: self._poller.modify(fd, z_events) except ZMQError as exc: raise OSError(exc.errno, exc.strerror) from exc key = key._replace(data=data, events=events) self._fd_to_key[key.fd] = key return key def close(self): self._fd_to_key.clear() self._poller = None def get_map(self): return self._map def _key_from_fd(self, fd): """Return the key associated to a given file descriptor. Parameters: fd -- file descriptor Returns: corresponding key, or None if not found """ try: return self._fd_to_key[fd] except KeyError: return None def select(self, timeout=None): if timeout is None: timeout = None elif timeout <= 0: timeout = 0 else: # poll() has a resolution of 1 millisecond, round away from # zero to wait *at least* timeout seconds. timeout = math.ceil(timeout * 1e3) ready = [] try: z_events = self._poller.poll(timeout) except ZMQError as exc: if exc.errno == EINTR: return ready else: raise OSError(exc.errno, exc.strerror) from exc for fd, evt in z_events: events = 0 if evt & POLLIN: events |= EVENT_READ if evt & POLLOUT: events |= EVENT_WRITE if evt & POLLERR: events = EVENT_READ | EVENT_WRITE key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/stream.py0000644000076600000240000002212700000000000015141 0ustar00jellestaffimport collections import asyncio from .core import create_zmq_connection from .interface import ZmqProtocol class ZmqStreamClosed(Exception): """A stream was closed""" async def create_zmq_stream( zmq_type, *, bind=None, connect=None, loop=None, zmq_sock=None, high_read=None, low_read=None, high_write=None, low_write=None, events_backlog=100 ): """A wrapper for create_zmq_connection() returning a Stream instance. The arguments are all the usual arguments to create_zmq_connection() except protocol_factory; most common are positional host and port, with various optional keyword arguments following. Additional optional keyword arguments are loop (to set the event loop instance to use) and high_read, low_read, high_write, low_write -- high and low watermarks for reading and writing respectively. events_backlog -- backlog size for monitoring events, 100 by default. It specifies size of event queue. If count of unread events exceeds events_backlog the oldest events are discarded. """ if loop is None: loop = asyncio.get_event_loop() stream = ZmqStream( loop=loop, high=high_read, low=low_read, events_backlog=events_backlog ) tr, _ = await create_zmq_connection( lambda: stream._protocol, zmq_type, bind=bind, connect=connect, zmq_sock=zmq_sock, loop=loop, ) tr.set_write_buffer_limits(high_write, low_write) return stream class ZmqStreamProtocol(ZmqProtocol): """Helper class to adapt between ZmqProtocol and ZmqStream. This is a helper class to use ZmqStream instead of subclassing ZmqProtocol. """ def __init__(self, stream, loop): self._loop = loop self._stream = stream self._paused = False self._drain_waiter = None self._connection_lost = False def pause_writing(self): assert not self._paused self._paused = True def resume_writing(self): assert self._paused self._paused = False waiter = self._drain_waiter if waiter is not None: self._drain_waiter = None if not waiter.done(): waiter.set_result(None) def connection_made(self, transport): self._stream.set_transport(transport) def connection_lost(self, exc): self._connection_lost = True if exc is None: self._stream.feed_closing() else: self._stream.set_exception(exc) if not self._paused: return waiter = self._drain_waiter if waiter is None: return self._drain_waiter = None if waiter.done(): return if exc is None: waiter.set_result(None) else: waiter.set_exception(exc) async def _drain_helper(self): if self._connection_lost: raise ConnectionResetError("Connection lost") if not self._paused: return waiter = self._drain_waiter assert waiter is None or waiter.cancelled() waiter = asyncio.Future(loop=self._loop) self._drain_waiter = waiter await waiter def msg_received(self, msg): self._stream.feed_msg(msg) def event_received(self, event): self._stream.feed_event(event) class ZmqStream: """Wraps a ZmqTransport. Has write() method and read() coroutine for writing and reading ZMQ messages. It adds drain() coroutine which can be used for waiting for flow control. It also adds a transport property which references the ZmqTransport directly. """ def __init__(self, loop, *, high=None, low=None, events_backlog=100): self._transport = None self._protocol = ZmqStreamProtocol(self, loop=loop) self._loop = loop self._queue = collections.deque() self._event_queue = collections.deque(maxlen=events_backlog) self._closing = False # Whether we're done. self._waiter = None # A future. self._event_waiter = None # A future. self._exception = None self._paused = False self._set_read_buffer_limits(high, low) self._queue_len = 0 @property def transport(self): return self._transport def write(self, msg): self._transport.write(msg) def close(self): return self._transport.close() def get_extra_info(self, name, default=None): return self._transport.get_extra_info(name, default) async def drain(self): """Flush the write buffer. The intended use is to write w.write(data) await w.drain() """ if self._exception is not None: raise self._exception await self._protocol._drain_helper() def exception(self): return self._exception def set_exception(self, exc): """Private""" self._exception = exc waiter = self._waiter if waiter is not None: self._waiter = None if not waiter.cancelled(): waiter.set_exception(exc) waiter = self._event_waiter if waiter is not None: self._event_waiter = None if not waiter.cancelled(): waiter.set_exception(exc) def set_transport(self, transport): """Private""" assert self._transport is None, "Transport already set" self._transport = transport def _set_read_buffer_limits(self, high=None, low=None): if high is None: if low is None: high = 64 * 1024 else: high = 4 * low if low is None: low = high // 4 if not high >= low >= 0: raise ValueError("high (%r) must be >= low (%r) must be >= 0" % (high, low)) self._high_water = high self._low_water = low def set_read_buffer_limits(self, high=None, low=None): self._set_read_buffer_limits(high, low) self._maybe_resume_transport() def _maybe_resume_transport(self): if self._paused and self._queue_len <= self._low_water: self._paused = False self._transport.resume_reading() def feed_closing(self): """Private""" self._closing = True self._transport = None waiter = self._waiter if waiter is not None: self._waiter = None if not waiter.cancelled(): waiter.set_exception(ZmqStreamClosed()) waiter = self._event_waiter if waiter is not None: self._event_waiter = None if not waiter.cancelled(): waiter.set_exception(ZmqStreamClosed()) def at_closing(self): """Return True if the buffer is empty and 'feed_closing' was called.""" return self._closing and not self._queue def feed_msg(self, msg): """Private""" assert not self._closing, "feed_msg after feed_closing" msg_len = sum(len(i) for i in msg) self._queue.append((msg_len, msg)) self._queue_len += msg_len waiter = self._waiter if waiter is not None: self._waiter = None if not waiter.cancelled(): waiter.set_result(None) if ( self._transport is not None and not self._paused and self._queue_len > self._high_water ): self._transport.pause_reading() self._paused = True def feed_event(self, event): """Private""" assert not self._closing, "feed_event after feed_closing" self._event_queue.append(event) event_waiter = self._event_waiter if event_waiter is not None: self._event_waiter = None if not event_waiter.cancelled(): event_waiter.set_result(None) async def read(self): if self._exception is not None: raise self._exception if self._closing: raise ZmqStreamClosed() if not self._queue_len: if self._waiter is not None: raise RuntimeError( "read called while another coroutine is " "already waiting for incoming data" ) self._waiter = asyncio.Future(loop=self._loop) try: await self._waiter finally: self._waiter = None msg_len, msg = self._queue.popleft() self._queue_len -= msg_len self._maybe_resume_transport() return msg async def read_event(self): if self._closing: raise ZmqStreamClosed() if not self._event_queue: if self._event_waiter is not None: raise RuntimeError( "read_event called while another coroutine" " is already waiting for incoming data" ) self._event_waiter = asyncio.Future(loop=self._loop) try: await self._event_waiter finally: self._event_waiter = None event = self._event_queue.popleft() return event ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/aiozmq/util.py0000644000076600000240000000075000000000000014621 0ustar00jellestafffrom collections.abc import Set class _EndpointsSet(Set): __slots__ = ("_collection",) def __init__(self, collection): self._collection = collection def __len__(self): return len(self._collection) def __contains__(self, endpoint): return endpoint in self._collection def __iter__(self): return iter(self._collection) def __repr__(self): return "{" + ", ".join(sorted(self._collection)) + "}" __str__ = __repr__ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3378198 aiozmq-1.0.0/aiozmq.egg-info/0000755000076600000240000000000000000000000014762 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439287.0 aiozmq-1.0.0/aiozmq.egg-info/PKG-INFO0000644000076600000240000001567700000000000016077 0ustar00jellestaffMetadata-Version: 2.1 Name: aiozmq Version: 1.0.0 Summary: ZeroMQ integration with asyncio. Home-page: http://aiozmq.readthedocs.org Download-URL: https://pypi.python.org/pypi/aiozmq Author: Nikolay Kim Author-email: fafhrd91@gmail.com Maintainer: Jelle Zijlstra Maintainer-email: jelle.zijlstra@gmail.com License: BSD Platform: POSIX Platform: Windows Platform: MacOS X Classifier: License :: OSI Approved :: BSD License Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Operating System :: POSIX Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: Microsoft :: Windows Classifier: Environment :: Web Environment Classifier: Development Status :: 4 - Beta Classifier: Framework :: AsyncIO Provides-Extra: rpc License-File: LICENSE asyncio integration with ZeroMQ =============================== asyncio (PEP 3156) support for ZeroMQ. .. image:: https://travis-ci.com/aio-libs/aiozmq.svg?branch=master :target: https://travis-ci.com/aio-libs/aiozmq The difference between ``aiozmq`` and vanilla ``pyzmq`` (``zmq.asyncio``) is: ``zmq.asyncio`` works only by replacing the *base event loop* with a custom one. This approach works but has two disadvantages: 1. ``zmq.asyncio.ZMQEventLoop`` cannot be combined with other loop implementations (most notable is the ultra fast ``uvloop``). 2. It uses the internal ZMQ Poller which has fast ZMQ Sockets support but isn't intended to work fast with many (thousands) regular TCP sockets. In practice it means that ``zmq.asyncio`` is not recommended to be used with web servers like ``aiohttp``. See also https://github.com/zeromq/pyzmq/issues/894 Documentation ------------- See http://aiozmq.readthedocs.org Simple high-level client-server RPC example: .. code-block:: python import asyncio import aiozmq.rpc class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote_func(self, a:int, b:int) -> int: return a + b async def go(): server = await aiozmq.rpc.serve_rpc( ServerHandler(), bind='tcp://127.0.0.1:5555') client = await aiozmq.rpc.connect_rpc( connect='tcp://127.0.0.1:5555') ret = await client.call.remote_func(1, 2) assert 3 == ret server.close() client.close() asyncio.run(go()) Low-level request-reply example: .. code-block:: python import asyncio import aiozmq import zmq async def go(): router = await aiozmq.create_zmq_stream( zmq.ROUTER, bind='tcp://127.0.0.1:*') addr = list(router.transport.bindings())[0] dealer = await aiozmq.create_zmq_stream( zmq.DEALER, connect=addr) for i in range(10): msg = (b'data', b'ask', str(i).encode('utf-8')) dealer.write(msg) data = await router.read() router.write(data) answer = await dealer.read() print(answer) dealer.close() router.close() asyncio.run(go()) Comparison to pyzmq ------------------- ``zmq.asyncio`` provides an *asyncio compatible loop* implementation. But it's based on ``zmq.Poller`` which doesn't work well with massive non-zmq socket usage. E.g. if you build a web server for handling at least thousands of parallel web requests (1000-5000) ``pyzmq``'s internal poller will be slow. ``aiozmq`` works with epoll natively, it doesn't need a custom loop implementation and cooperates pretty well with `uvloop` for example. For details see https://github.com/zeromq/pyzmq/issues/894 Requirements ------------ * Python_ 3.6+ * pyzmq_ 13.1+ * optional submodule ``aiozmq.rpc`` requires msgpack_ 0.5+ License ------- aiozmq is offered under the BSD license. .. _python: https://www.python.org/ .. _pyzmq: https://pypi.python.org/pypi/pyzmq .. _asyncio: https://pypi.python.org/pypi/asyncio .. _msgpack: https://pypi.python.org/pypi/msgpack CHANGES ------- 1.0.0 (2022-11-02) ^^^^^^^^^^^^^^^^^^ * Support Python 3.9, 3.10, and 3.11 (thanks in part to Esben Sonne) * Drop support for Python 3.5 * Remove support for using annotations as conversion functions 0.9.0 (2020-01-25) ^^^^^^^^^^^^^^^^^^ * Support Python 3.7 and 3.8 0.8.0 (2016-12-07) ^^^^^^^^^^^^^^^^^^ * Respect `events_backlog` parameter in zmq stream creation #86 0.7.1 (2015-09-20) ^^^^^^^^^^^^^^^^^^ * Fix monitoring events implementation * Make the library compatible with Python 3.5 0.7.0 (2015-07-31) ^^^^^^^^^^^^^^^^^^ * Implement monitoring ZMQ events #50 * Do deeper lookup for inhereted classes #54 * Relax endpont check #56 * Implement monitoring events for stream api #52 0.6.1 (2015-05-19) ^^^^^^^^^^^^^^^^^^ * Dynamically get list of pyzmq socket types 0.6.0 (2015-02-14) ^^^^^^^^^^^^^^^^^^ * Process asyncio specific exceptions as builtins. * Add repr(exception) to rpc server call logs if any * Add transport.get_write_buffer_limits() method * Add __repr__ to transport * Add zmq_type to tr.get_extra_info() * Add zmq streams 0.5.2 (2014-10-09) ^^^^^^^^^^^^^^^^^^ * Poll events after sending zmq message for eventless transport 0.5.1 (2014-09-27) ^^^^^^^^^^^^^^^^^^ * Fix loopless transport implementation. 0.5.0 (2014-08-23) ^^^^^^^^^^^^^^^^^^ * Support zmq devices in aiozmq.rpc.serve_rpc() * Add loopless 0MQ transport 0.4.1 (2014-07-03) ^^^^^^^^^^^^^^^^^^ * Add exclude_log_exceptions parameter to rpc servers. 0.4.0 (2014-05-28) ^^^^^^^^^^^^^^^^^^ * Implement pause_reading/resume_reading methods in ZmqTransport. 0.3.0 (2014-05-17) ^^^^^^^^^^^^^^^^^^ * Add limited support for Windows. * Fix unstable test execution, change ZmqEventLoop to use global shared zmq.Context by default. * Process cancellation on rpc servers and clients. 0.2.0 (2014-04-18) ^^^^^^^^^^^^^^^^^^ * msg in msg_received now is a list, not tuple * Allow to send empty msg by trsansport.write() * Add benchmarks * Derive ServiceClosedError from aiozmq.rpc.Error, not Exception * Implement logging from remote calls at server side (log_exceptions parameter). * Optimize byte counting in ZmqTransport. 0.1.3 (2014-04-10) ^^^^^^^^^^^^^^^^^^ * Function default values are not passed to an annotaion. Add check for libzmq version (should be >= 3.0) 0.1.2 (2014-04-01) ^^^^^^^^^^^^^^^^^^ * Function default values are not passed to an annotaion. 0.1.1 (2014-03-31) ^^^^^^^^^^^^^^^^^^ * Rename plural module names to single ones. 0.1.0 (2014-03-30) ^^^^^^^^^^^^^^^^^^ * Implement ZmqEventLoop with *create_zmq_connection* method which operates on zmq transport and protocol. * Implement ZmqEventLoopPolicy. * Introduce ZmqTransport and ZmqProtocol. * Implement zmq.rpc with RPC, PUSHPULL and PUBSUB protocols. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439287.0 aiozmq-1.0.0/aiozmq.egg-info/SOURCES.txt0000644000076600000240000000343000000000000016646 0ustar00jellestaff.coveragerc .gitignore ACKS.txt CHANGES.txt LICENSE MANIFEST.in Makefile README.rst requirements-bench.txt requirements.txt runtests.py setup.cfg setup.py .github/workflows/test.yml aiozmq/__init__.py aiozmq/_test_util.py aiozmq/core.py aiozmq/interface.py aiozmq/log.py aiozmq/selector.py aiozmq/stream.py aiozmq/util.py aiozmq.egg-info/PKG-INFO aiozmq.egg-info/SOURCES.txt aiozmq.egg-info/dependency_links.txt aiozmq.egg-info/entry_points.txt aiozmq.egg-info/requires.txt aiozmq.egg-info/top_level.txt aiozmq/cli/__init__.py aiozmq/cli/proxy.py aiozmq/rpc/__init__.py aiozmq/rpc/base.py aiozmq/rpc/log.py aiozmq/rpc/packer.py aiozmq/rpc/pipeline.py aiozmq/rpc/pubsub.py aiozmq/rpc/rpc.py aiozmq/rpc/util.py benchmarks/simple.py docs/Makefile docs/conf.py docs/core.rst docs/examples.rst docs/glossary.rst docs/index.rst docs/make.bat docs/rpc.rst docs/spelling_wordlist.txt docs/stream.rst docs/_static/PLACEHOLDER examples/core_dealer_router.py examples/rpc_custom_translator.py examples/rpc_dict_handler.py examples/rpc_dynamic.py examples/rpc_exception_translator.py examples/rpc_incorrect_calls.py examples/rpc_pipeline.py examples/rpc_pubsub.py examples/rpc_simple.py examples/rpc_with_subhandlers.py examples/socket_event_monitor.py examples/stream_dealer_router.py examples/stream_monitor.py examples/sync_async.py tests/echo.py tests/echo2.py tests/echo3.py tests/interface_test.py tests/keycert3.pem tests/monitor_test.py tests/policy_test.py tests/pycacert.pem tests/rpc_namespace_test.py tests/rpc_packer_test.py tests/rpc_pipeline_test.py tests/rpc_pubsub_test.py tests/rpc_test.py tests/rpc_translators_test.py tests/sample.crt tests/sample.key tests/selectors_test.py tests/ssl_cert.pem tests/ssl_key.pem tests/transport_test.py tests/version_test.py tests/zmq_events_test.py tests/zmq_stream_test.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439287.0 aiozmq-1.0.0/aiozmq.egg-info/dependency_links.txt0000644000076600000240000000000100000000000021030 0ustar00jellestaff ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439287.0 aiozmq-1.0.0/aiozmq.egg-info/entry_points.txt0000644000076600000240000000006700000000000020263 0ustar00jellestaff[console_scripts] aiozmq-proxy = aiozmq.cli.proxy:main ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439287.0 aiozmq-1.0.0/aiozmq.egg-info/requires.txt0000644000076600000240000000005300000000000017360 0ustar00jellestaffpyzmq!=17.1.2,>=13.1 [rpc] msgpack>=0.5.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667439287.0 aiozmq-1.0.0/aiozmq.egg-info/top_level.txt0000644000076600000240000000000700000000000017511 0ustar00jellestaffaiozmq ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3427658 aiozmq-1.0.0/benchmarks/0000755000076600000240000000000000000000000014105 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667226339.0 aiozmq-1.0.0/benchmarks/simple.py0000644000076600000240000002572300000000000015761 0ustar00jellestaffimport aiozmq import aiozmq.rpc import argparse import asyncio import gc import multiprocessing import random import sys import threading import time import zmq import uvloop from scipy.stats import norm, tmean, tvar, tstd from numpy import array, arange import matplotlib.pyplot as plt from matplotlib import cm def test_raw_zmq(count): """single thread raw zmq""" print(".", end="", flush=True) ctx = zmq.Context() router = ctx.socket(zmq.ROUTER) router.bind("tcp://127.0.0.1:*") address = router.getsockopt(zmq.LAST_ENDPOINT).rstrip(b"\0") dealer = ctx.socket(zmq.DEALER) dealer.connect(address) msg = b"func", b"\0" * 200 gc.collect() t1 = time.monotonic() for i in range(count): dealer.send_multipart(msg) addr, m1, m2 = router.recv_multipart() router.send_multipart((addr, m1, m2)) dealer.recv_multipart() t2 = time.monotonic() gc.collect() router.close() dealer.close() ctx.destroy() return t2 - t1 def test_zmq_with_poller(count): """single thread zmq with poller""" print(".", end="", flush=True) ctx = zmq.Context() router = ctx.socket(zmq.ROUTER) router.bind("tcp://127.0.0.1:*") address = router.getsockopt(zmq.LAST_ENDPOINT).rstrip(b"\0") dealer = ctx.socket(zmq.DEALER) dealer.connect(address) msg = b"func", b"\0" * 200 poller = zmq.Poller() poller.register(router) poller.register(dealer) def wait(socket, event=zmq.POLLIN): while True: ret = poller.poll() for sock, ev in ret: if ev & event and sock == socket: return gc.collect() t1 = time.monotonic() for i in range(count): dealer.send_multipart(msg, zmq.DONTWAIT) wait(router) addr, m1, m2 = router.recv_multipart(zmq.NOBLOCK) router.send_multipart((addr, m1, m2), zmq.DONTWAIT) wait(dealer) dealer.recv_multipart(zmq.NOBLOCK) t2 = time.monotonic() gc.collect() router.close() dealer.close() ctx.destroy() return t2 - t1 def test_zmq_with_thread(count): """zmq with threads""" print(".", end="", flush=True) ctx = zmq.Context() dealer = ctx.socket(zmq.DEALER) dealer.bind("tcp://127.0.0.1:*") address = dealer.getsockopt(zmq.LAST_ENDPOINT).rstrip(b"\0") msg = b"func", b"\0" * 200 def router_thread(): router = ctx.socket(zmq.ROUTER) router.connect(address) for i in range(count): addr, m1, m2 = router.recv_multipart() router.send_multipart((addr, m1, m2)) router.close() th = threading.Thread(target=router_thread) th.start() gc.collect() t1 = time.monotonic() for i in range(count): dealer.send_multipart(msg) dealer.recv_multipart() t2 = time.monotonic() gc.collect() th.join() dealer.close() ctx.destroy() return t2 - t1 class ZmqRouterProtocol(aiozmq.ZmqProtocol): transport = None def __init__(self, on_close): self.on_close = on_close def connection_made(self, transport): self.transport = transport def msg_received(self, msg): self.transport.write(msg) def connection_lost(self, exc): self.on_close.set_result(exc) class ZmqDealerProtocol(aiozmq.ZmqProtocol): transport = None def __init__(self, count, on_close): self.count = count self.on_close = on_close def connection_made(self, transport): self.transport = transport def msg_received(self, msg): self.count -= 1 if self.count: self.transport.write(msg) else: self.transport.close() def connection_lost(self, exc): self.on_close.set_result(exc) def test_core_aiozmq_uvloop(count): """core aiozmq with uvloop""" loop = uvloop.new_event_loop() return _test_core_aiozmq(count, loop) def test_core_aiozmq_loopless(count): """core aiozmq loopless""" loop = asyncio.new_event_loop() return _test_core_aiozmq(count, loop) def _test_core_aiozmq(count, loop): print(".", end="", flush=True) async def go(): router_closed = asyncio.Future() dealer_closed = asyncio.Future() router, _ = await aiozmq.create_zmq_connection( lambda: ZmqRouterProtocol(router_closed), zmq.ROUTER, bind="tcp://127.0.0.1:*", loop=loop, ) addr = next(iter(router.bindings())) dealer, _ = await aiozmq.create_zmq_connection( lambda: ZmqDealerProtocol(count, dealer_closed), zmq.DEALER, connect=addr, loop=loop, ) msg = b"func", b"\0" * 200 gc.collect() t1 = time.monotonic() dealer.write(msg) await dealer_closed t2 = time.monotonic() gc.collect() router.close() await router_closed return t2 - t1 ret = loop.run_until_complete(go()) loop.close() return ret def test_core_aiozmq_legacy(count): """core aiozmq legacy""" print(".", end="", flush=True) loop = aiozmq.ZmqEventLoop() async def go(): router_closed = asyncio.Future() dealer_closed = asyncio.Future() router, _ = await aiozmq.create_zmq_connection( lambda: ZmqRouterProtocol(router_closed), zmq.ROUTER, bind="tcp://127.0.0.1:*", loop=loop, ) addr = next(iter(router.bindings())) dealer, _ = await aiozmq.create_zmq_connection( lambda: ZmqDealerProtocol(count, dealer_closed), zmq.DEALER, connect=addr, loop=loop, ) msg = b"func", b"\0" * 200 gc.collect() t1 = time.monotonic() dealer.write(msg) await dealer_closed t2 = time.monotonic() gc.collect() router.close() await router_closed return t2 - t1 ret = loop.run_until_complete(go()) loop.close() return ret class Handler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def func(self, data): return data def test_aiozmq_rpc(count): """aiozmq.rpc""" print(".", end="", flush=True) loop = asyncio.new_event_loop() async def go(): server = await aiozmq.rpc.serve_rpc( Handler(), bind="tcp://127.0.0.1:*", loop=loop ) addr = next(iter(server.transport.bindings())) client = await aiozmq.rpc.connect_rpc(connect=addr, loop=loop) data = b"\0" * 200 gc.collect() t1 = time.monotonic() for i in range(count): await client.call.func(data) t2 = time.monotonic() gc.collect() server.close() await server.wait_closed() client.close() await client.wait_closed() return t2 - t1 ret = loop.run_until_complete(go()) loop.close() return ret avail_tests = { f.__name__: f for f in [ test_raw_zmq, test_zmq_with_poller, test_aiozmq_rpc, test_core_aiozmq_legacy, test_core_aiozmq_uvloop, test_core_aiozmq_loopless, test_zmq_with_thread, ] } ARGS = argparse.ArgumentParser(description="Run benchmark.") ARGS.add_argument( "-n", "--count", action="store", nargs="?", type=int, default=1000, help="iterations count", ) ARGS.add_argument( "-t", "--tries", action="store", nargs="?", type=int, default=30, help="count of tries", ) ARGS.add_argument( "-p", "--plot-file-name", action="store", type=str, default=None, dest="plot_file_name", help="file name for plot", ) ARGS.add_argument("-v", "--verbose", action="count", help="verbosity level") ARGS.add_argument( "--without-multiprocessing", action="store_false", default=True, dest="use_multiprocessing", help="don't use multiprocessing", ) ARGS.add_argument( dest="tests", type=str, nargs="*", help="tests, {} by default".format(list(sorted(avail_tests))), ) def run_tests(tries, count, use_multiprocessing, funcs): results = {func.__doc__: [] for func in funcs} queue = [] print("Run tests for {}*{} iterations: {}".format(tries, count, sorted(results))) test_plan = [func for func in funcs for i in range(tries)] random.shuffle(test_plan) if use_multiprocessing: with multiprocessing.Pool() as pool: for test in test_plan: res = pool.apply_async(test, (count,)) queue.append((test.__doc__, res)) pool.close() pool.join() for name, res in queue: results[name].append(res.get()) else: for test in test_plan: results[test.__doc__].append(test(count)) print() return results def print_and_plot_results(count, results, verbose, plot_file_name): print("RPS calculated as 95% confidence interval") rps_mean_ar = [] rps_err_ar = [] test_name_ar = [] for test_name in sorted(results): data = results[test_name] rps = count / array(data) rps_mean = tmean(rps) rps_var = tvar(rps) low, high = norm.interval(0.95, loc=rps_mean, scale=rps_var**0.5) times = array(data) * 1000000 / count times_mean = tmean(times) times_stdev = tstd(times) print("Results for", test_name) print( "RPS: {:d}: [{:d}, {:d}],\tmean: {:.3f} μs," "\tstandard deviation {:.3f} μs".format( int(rps_mean), int(low), int(high), times_mean, times_stdev ) ) test_name_ar.append(test_name) rps_mean_ar.append(rps_mean) rps_err_ar.append(high - rps_mean) if verbose: print(" from", times) print() if plot_file_name is not None: fig = plt.figure() ax = fig.add_subplot(111) L = len(rps_mean_ar) color = [cm.autumn(float(c) / (L - 1)) for c in arange(L)] bars = ax.bar(arange(L), rps_mean_ar, color=color, yerr=rps_err_ar, ecolor="k") # order of legend is reversed for visual appeal ax.legend( reversed(bars), reversed(test_name_ar), loc="upper left", framealpha=0.5 ) ax.get_xaxis().set_visible(False) plt.ylabel("Requets per Second", fontsize=16) plt.savefig(plot_file_name, dpi=300) print("Plot is saved to {}".format(plot_file_name)) if verbose: plt.show() def main(argv): args = ARGS.parse_args() count = args.count tries = args.tries verbose = args.verbose plot_file_name = args.plot_file_name use_multiprocessing = args.use_multiprocessing tests = args.tests if tests: tests = [avail_tests[t] for t in tests] else: tests = avail_tests.values() res = run_tests(tries, count, use_multiprocessing, tests) print() print_and_plot_results(count, res, verbose, plot_file_name) if __name__ == "__main__": asyncio.set_event_loop(None) sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3487396 aiozmq-1.0.0/docs/0000755000076600000240000000000000000000000012720 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/docs/Makefile0000644000076600000240000001315000000000000014360 0ustar00jellestaff# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/aiozmq.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/aiozmq.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/aiozmq" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/aiozmq" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." spelling: $(SPHINXBUILD) -b spelling -d $(BUILDDIR)/doctrees . $(BUILDDIR)/spelling @echo @echo "Spelling checker messages written to $(BUILDDIR)/spelling/output.txt." ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3497372 aiozmq-1.0.0/docs/_static/0000755000076600000240000000000000000000000014346 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/docs/_static/PLACEHOLDER0000644000076600000240000000000000000000000016001 0ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/docs/conf.py0000644000076600000240000001534000000000000014222 0ustar00jellestaff#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # aiozmq documentation build configuration file, created by # sphinx-quickstart on Mon Mar 17 15:12:47 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import re, os, os.path def get_release(): regexp = re.compile(r'^__version__\W*=\W*"([\d.abrc]+)"') here = os.path.dirname(__file__) root = os.path.dirname(here) init_py = os.path.join(root, 'aiozmq', '__init__.py') with open(init_py) as f: for line in f: match = regexp.match(line) if match is not None: return match.group(1) else: raise RuntimeError('Cannot find version in aiozmq/__init__.py') def get_version(release): parts = release.split('.') return '.'.join(parts[:2]) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] #'sphinxcontrib.spelling'] intersphinx_mapping = {'python': ('http://docs.python.org/3', None), 'pyzmq': ('http://zeromq.github.io/pyzmq', None)} # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'aiozmq' copyright = '2014, 2015, Nikolay Kim and Andrew Svetlov' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '.'.join(str(i) for i in aiozmq.version_info[:2]) # The full version, including alpha/beta/rc tags. #release = aiozmq.__version__ release = get_release() version = get_version(release) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] highlight_language = 'python3' # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'default' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: html_theme = 'default' else: try: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] except ImportError: html_theme = 'pyramid' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'aiozmqdoc' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/docs/core.rst0000644000076600000240000005232000000000000014404 0ustar00jellestaff.. _aiozmq-core: Core API ======== .. module:: aiozmq :synopsis: Low level API for ZeroMQ support .. currentmodule:: aiozmq create_zmq_connection --------------------- .. function:: create_zmq_connection(protocol_factory, zmq_type, *, \ bind=None, connect=None, zmq_sock=None, loop=None) Create a ZeroMQ connection. This method is a :ref:`coroutine `. If you don't use *bind* or *connect* params you can do it later by :meth:`ZmqTransport.bind` and :meth:`ZmqTransport.connect` calls. :param callable protocol_factory: a factory that instantiates :class:`~ZmqProtocol` object. :param int zmq_type: a type of :term:`ZeroMQ` socket (*zmq.REQ*, *zmq.REP*, *zmq.PUB*, *zmq.SUB*, zmq.PAIR*, *zmq.DEALER*, *zmq.ROUTER*, *zmq.PULL*, *zmq.PUSH*, etc.) :param bind: endpoints specification. Every :term:`endpoint` generates call to :meth:`ZmqTransport.bind` for accepting connections from specified endpoint. Other side should use *connect* parameter to connect to this transport. :type bind: str or iterable of strings :param connect: endpoints specification. Every :term:`endpoint` generates call to :meth:`ZmqTransport.connect` for connecting transport to specified endpoint. Other side should use bind parameter to wait for incoming connections. :type connect: str or iterable of strings :param zmq.Socket zmq_sock: a preexisting zmq socket that will be passed to returned transport. :param asyncio.AbstractEventLoop loop: optional event loop instance, ``None`` for default event loop. :return: a pair of ``(transport, protocol)`` where transport supports :class:`~ZmqTransport` interface. :rtype: :class:`tuple` .. versionadded:: 0.5 ZmqTransport ------------ .. class:: ZmqTransport Transport for :term:`ZeroMQ` connections. Implements :class:`asyncio.BaseTransport` interface. End user should never create :class:`~ZmqTransport` objects directly, he gets it by ``await aiozmq.create_zmq_connection()`` call. .. method:: get_extra_info(key, default=None) Return optional transport information if name is present otherwise return *default*. :class:`ZmqTransport` supports the only valid *key*: ``"zmq_socket"``. The value is :class:`zmq.Socket` instance. :param str name: name of info record. :param default: default value .. method:: close() Close the transport. Buffered data will be flushed asynchronously. No more data will be received. After all buffered data is flushed, the protocol's :meth:`~ZmqProtocol.connection_lost` method will (eventually) called with *None* as its argument. .. method:: write(data) Write message to the transport. :param data: iterable to send as multipart message. This does not block; it buffers the data and arranges for it to be sent out asynchronously. .. method:: abort() Close the transport immediately. Buffered data will be lost. No more data will be received. The protocol's :meth:`~ZmqProtocol.connection_lost` method will (eventually) be called with *None* as it's argument. .. method:: getsockopt(option) Get :term:`ZeroMQ` socket option. :param int option: a constant like *zmq.SUBSCRIBE*, *zmq.UNSUBSCRIBE*, *zmq.TYPE* etc. For list of available options please see: http://api.zeromq.org/master:zmq-getsockopt :return: option value :raise OSError: if call to ZeroMQ was unsuccessful. .. method:: setsockopt(option, value) Set :term:`ZeroMQ` socket option. :param int option: a constant like *zmq.SUBSCRIBE*, *zmq.UNSUBSCRIBE*, *zmq.TYPE* etc. :param value: a new option value, it's type depend of option name. For list of available options please see: http://api.zeromq.org/master:zmq-setsockopt .. method:: get_write_buffer_limits() Get the *high*- and *low*-water limits for write flow control. Return a tuple ``(low, high)`` where *low* and *high* are positive number of bytes. Use :meth:`set_write_buffer_limits` to set the limits. .. versionadded:: 0.6 .. method:: set_write_buffer_limits(high=None, low=None) Set the high- and low-water limits for write flow control. :param high: high-water limit :type high: int or None :param low: low-water limit :type low: int or None These two values control when to call the protocol's :meth:`~ZmqProtocol.pause_writing` and :meth:`~ZmqProtocol.resume_writing()` methods. If specified, the low-water limit must be less than or equal to the high-water limit. Neither value can be negative. The defaults are implementation-specific. If only the high-water limit is given, the low-water limit defaults to a implementation-specific value less than or equal to the high-water limit. Setting high to zero forces low to zero as well, and causes :meth:`~ZmqProtocol.pause_writing` to be called whenever the buffer becomes non-empty. Setting low to zero causes :meth:`~ZmqProtocol.resume_writing` to be called only once the buffer is empty. Use of zero for either limit is generally sub-optimal as it reduces opportunities for doing I/O and computation concurrently. Use :meth:`get_write_buffer_limits` to get the limits. .. method:: get_write_buffer_size() Return the current size of the write buffer. .. method:: pause_reading() Pause the receiving end. No data will be passed to the protocol's :meth:`ZmqProtocol.msg_received` method until :meth:`ZmqTransport.resume_reading` is called. .. seealso:: :meth:`ZmqTransport.resume_reading` method. .. method:: resume_reading() Resume the receiving end. Data received will once again be passed to the protocol's :meth:`ZmqProtocol.msg_received` method. .. seealso:: :meth:`ZmqTransport.pause_reading` method. .. method:: bind(endpoint) Bind transpot to :term:`endpoint`. See http://api.zeromq.org/master:zmq-bind for details. This method is a :ref:`coroutine `. :param endpoint: a string in format ``transport://address`` as :term:`ZeroMQ` requires. :return: bound endpoint, unwinding wildcards if needed. :rtype: :class:`str` :raise OSError: on error from ZeroMQ layer :raise TypeError: if *endpoint* is not a :class:`str` .. method:: unbind(endpoint) Unbind transpot from :term:`endpoint`. This method is a :ref:`coroutine `. :param endpoint: a string in format ``transport://address`` as :term:`ZeroMQ` requires. :return: *None* :raise OSError: on error from ZeroMQ layer :raise TypeError: if *endpoint* is not a :class:`str` .. method:: bindings() Return immutable set of :term:`endpoints ` bound to transport. .. note:: Returned endpoints include only ones that has been bound via :meth:`ZmqTransport.bind` or :func:`create_zmq_connection` calls and do not include bindings that have been done on *zmq_sock* before :func:`create_zmq_connection` call. .. method:: connect(endpoint) Connect transpot to :term:`endpoint`. See http://api.zeromq.org/master:zmq-connect for details. This method is a :ref:`coroutine `. :param str endpoint: a string in format ``transport://address`` as :term:`ZeroMQ` requires. For tcp connections the *endpoint* should specify *IPv4* or *IPv6* address, not *DNS* name. Use ``await get_event_loop().getaddrinfo(host, port)`` for translating *DNS* into *IP address*. :return: endpoint :rtype: :class:`str` :raise ValueError: if the endpoint is a tcp DNS address. :raise OSError: on error from ZeroMQ layer :raise TypeError: if *endpoint* is not a :class:`str` .. method:: disconnect(endpoint) Disconnect transpot from :term:`endpoint`. This method is a :ref:`coroutine `. :param endpoint: a string in format ``transport://address`` as :term:`ZeroMQ` requires. :return: *None* :raise OSError: on error from ZeroMQ layer :raise TypeError: if *endpoint* is not a :class:`str` .. method:: connections() Return immutable set of :term:`endpoints ` connected to transport. .. note:: Returned endpoints include only ones that has been connected via :meth:`ZmqTransport.connect` or :func:`create_zmq_connection` calls and do not include connections that have been done to *zmq_sock* before :func:`create_zmq_connection` call. .. method:: subscribe(value) Establish a new message filter on *SUB* transport. Newly created *SUB* transports filters out all incoming messages, therefore you should call this method to establish an initial message filter. An empty (``b''``) *value* subscribes to all incoming messages. A non-empty value subscribes to all messages beginning with the specified prefix. Multiple filters may be attached to a single *SUB* transport, in which case a message shall be accepted if it matches at least one filter. :param bytes value: a filter value to add to *SUB* filters. :raise NotImplementedError: the transport is not *SUB*. :raise TypeError: when *value* is not bytes. .. _aiozmq-transport-subscribe-warning: .. warning:: Unlike to :term:`ZeroMQ` socket level the call first check for *value* in :meth:`ZmqTransport.subscriptions` and does nothing if the transport already has been subscribed to the *value*. .. method:: unsubscribe(value) Remove an existing message filter on a *SUB* transport. The filter specified must match an existing filter previously established with the :meth:`ZmqTransport.subscribe`. If the transport has several instances of the same filter attached the ``.unsubscribe()`` removes only one instance, leaving the rest in place and functional (if you use :meth:`ZmqTransport.subscribe` to adding new filters that never happens, see :ref:`difference between aiozmq and ZeroMQ raw sockets ` for details). :param bytes value: a filter value to add to *SUB* filters. :raise NotImplementedError: the transport is not *SUB*. :raise TypeError: when *value* is not bytes. .. method:: subscriptions() Return immutable set of subscriptions (set of bytes) subscribed on transport. .. note:: Returned subscriptions include only ones that has been subscribed via :meth:`ZmqTransport.subscribe` call and do not include subscribtions that have been done to zmq_sock before :func:`create_zmq_connection` call. :raise NotImplementedError: the transport is not *SUB*. .. method:: enable_monitor(events=None) Enables socket events to be reported for this socket. Socket events are passed to the protocol's :meth:`ZmqProtocol.event_received` method. The socket event monitor capability requires ``libzmq >= 4`` and ``pyzmq >= 14.4``. This method is a coroutine. :param events: a bitmask of socket events to watch for. If no value is specified then all events will monitored (i.e. ``zmq.EVENT_ALL``). For list of available events please see: http://api.zeromq.org/4-0:zmq-socket-monitor :raise NotImplementedError: if *libzmq* or *pyzmq* versions do not support socket monitoring. .. versionadded:: 0.7 .. method:: disable_monitor() Stop the socket event monitor. This method is a coroutine. .. versionadded:: 0.7 ZmqProtocol -------------------- .. class:: ZmqProtocol Protocol for :term:`ZeroMQ` connections. Derives from :class:`asyncio.BaseProtocol`. .. method:: connection_made(transport) Called when a connection is made. :param ZmqTransport transport: representing the pipe connection. To receive data, wait for :meth:`~ZmqProtocol.msg_received` calls. When the connection is closed, :meth:`~ZmqProtocol.connection_lost` is called. .. method:: connection_lost(exc) Called when the connection is lost or closed. :param exc: an exception object or *None* (the latter meaning the connection was aborted or closed). :type exc: instance of :class:`Exception` or derived class .. method:: pause_writing() Called when the transport's buffer goes over the high-water mark. Pause and resume calls are paired -- :meth:`~ZmqProtocol.pause_writing` is called once when the buffer goes strictly over the high-water mark (even if subsequent writes increases the buffer size even more), and eventually :meth:`~ZmqProtocol.resume_writing` is called once when the buffer size reaches the low-water mark. Note that if the buffer size equals the high-water mark, :meth:`~ZmqProtocol.pause_writing` is not called -- it must go strictly over. Conversely, :meth:`~ZmqProtocol.resume_writing` is called when the buffer size is equal or lower than the low-water mark. These end conditions are important to ensure that things go as expected when either mark is zero. .. note:: This is the only Protocol callback that is not called through :meth:`asyncio.AbstractEventLoop.call_soon` -- if it were, it would have no effect when it's most needed (when the app keeps writing without awaiting until :meth:`~ZmqProtocol.pause_writing` is called). .. method:: resume_writing() Called when the transport's buffer drains below the low-water mark. See :meth:`~ZmqProtocol.pause_writing` for details. .. method:: msg_received(data) Called when some ZeroMQ message is received. :param list data: the multipart list of bytes with at least one item. .. method:: event_received(event) Called when a ZeroMQ socket event is received. This method is only called when a socket monitor is enabled. :param event: a SocketEvent namedtuple containing 3 items: `event`, `value`, and `endpoint`. :type event: :class:`namedtuple` .. versionadded:: 0.7 Exception policy ---------------- Every call to :class:`zmq.Socket` method can raise :class:`zmq.ZMQError` exception. But all methods of :class:`ZmqEventLoop` and :class:`ZmqTransport` translate ZMQError into :class:`OSError` (or descendat) with errno and strerror borrowed from underlying ZMQError values. The reason for translation is that Python 3.3 implements :pep:`3151` **--- Reworking the OS and IO Exception Hierarchy** which gets rid of exceptions zoo and uses :class:`OSError` and descendants for all exceptions generated by system function calls. :mod:`aiozmq` implements the same pattern. Internally it looks like:: try: return self._zmq_sock.getsockopt(option) except zmq.ZMQError as exc: raise OSError(exc.errno, exc.strerror) Also public methods of :mod:`aiozmq` will never raise :exc:`InterruptedError` (aka *EINTR*), they process interruption internally. Getting aiozmq version ---------------------- .. data:: version a text version of the library:: '0.1.0 , Python 3.3.2+ (default, Feb 28 2014, 00:52:16) \n[GCC 4.8.1]' .. data:: version_info a named tuple with version information, useful for comparison:: VersionInfo(major=0, minor=1, micro=0, releaselevel='alpha', serial=0) The Python itself uses the same schema (:const:`sys.version_info`). .. _install-aiozmq-policy: Installing ZeroMQ event loop ---------------------------- .. deprecated:: 0.5 :mod:`aiozmq` works with any *asyncio* event loop, it doesn't require dedicated event loop policy. To use :term:`ZeroMQ` layer you **may** install proper event loop first. The recommended way is to setup *global event loop policy*:: import asyncio import aiozmq asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy()) That installs :class:`ZmqEventLoopPolicy` globally. After installing you can get event loop instance from main thread by :func:`asyncio.get_event_loop` call:: loop = asyncio.get_event_loop() If you need to execute event loop in your own (not main) thread you have to set it up first:: import threading def thread_func(): loop = asyncio.new_event_loop() asyncio.set_event_loop() loop.run_forever() thread = threading.Thread(target=thread_func) thread.start() ZmqEventLoopPolicy --------------------------- .. deprecated:: 0.5 :mod:`aiozmq` works with any *asyncio* event loop, it doesn't require dedicated event loop policy. ZeroMQ policy implementation for accessing the event loop. In this policy, each thread has its own event loop. However, we only automatically create an event loop by default for the main thread; other threads by default have no event loop. :class:`ZmqEventLoopPolicy` implements an :class:`asyncio.AbstractEventLoopPolicy` interface. .. class:: ZmqEventLoopPolicy() Create policy for ZeroMQ event loops. .. note:: policy should be **installed**, see :ref:`install-aiozmq-policy`. .. method:: get_event_loop() Get the event loop. If current thread is the main thread and there are no registered event loop for current thread then the call creates new event loop and registers it. :return: Return an instance of :class:`ZmqEventLoop`. :raise RuntimeError: if there is no registered event loop for current thread. .. method:: new_event_loop() Create a new event loop. You must call :meth:`ZmqEventLoopPolicy.set_event_loop` to make this the current event loop. .. method:: set_event_loop(loop) Set the event loop. As a side effect, if a child watcher was set before, then calling ``.set_event_loop()`` from the main thread will call :meth:`asyncio.AbstractChildWatcher.attach_loop` on the child watcher. :param loop: an :class:`asyncio.AbstractEventLoop` instance or *None* :raise TypeError: if loop is not instance of :class:`asyncio.AbstractEventLoop` .. method:: get_child_watcher() Get the child watcher If not yet set, a :class:`asyncio.SafeChildWatcher` object is automatically created. :return: Return an instance of :class:`asyncio.AbstractChildWatcher`. .. method:: set_child_watcher(watcher) Set the child watcher. :param watcher: an :class:`asyncio.AbstractChildWatcher` instance or *None* :raise TypeError: if watcher is not instance of :class:`asyncio.AbstractChildWatcher` ZmqEventLoop --------------------- .. deprecated:: 0.5 :mod:`aiozmq` works with any *asyncio* event loop, it doesn't require dedicated event loop object. Event loop with :term:`ZeroMQ` support. Follows :class:`asyncio.AbstractEventLoop` specification and has :meth:`~ZmqEventLoop.create_zmq_connection` method for :term:`ZeroMQ` sockets layer. .. class:: ZmqEventLoop(*, zmq_context=None) :param zmq.Context zmq_context: explicit context to use for ZeroMQ socket creation inside :meth:`ZmqEventLoop.create_zmq_connection` calls. :mod:`aiozmq` shares global context returned by :meth:`zmq.Context.instance` call if *zmq_context* parameter is ``None``. .. method:: create_zmq_connection(protocol_factory, zmq_type, *, \ bind=None, connect=None, zmq_sock=None) Create a ZeroMQ connection. If you don't use *bind* or *connect* params you can do it later by :meth:`ZmqTransport.bind` and :meth:`ZmqTransport.connect` calls. :param callable protocol_factory: a factory that instantiates :class:`~ZmqProtocol` object. :param int zmq_type: a type of :term:`ZeroMQ` socket (*zmq.REQ*, *zmq.REP*, *zmq.PUB*, *zmq.SUB*, zmq.PAIR*, *zmq.DEALER*, *zmq.ROUTER*, *zmq.PULL*, *zmq.PUSH*, etc.) :param bind: endpoints specification. Every :term:`endpoint` generates call to :meth:`ZmqTransport.bind` for accepting connections from specified endpoint. Other side should use *connect* parameter to connect to this transport. :type bind: str or iterable of strings :param connect: endpoints specification. Every :term:`endpoint` generates call to :meth:`ZmqTransport.connect` for connecting transport to specified endpoint. Other side should use bind parameter to wait for incoming connections. :type connect: str or iterable of strings :param zmq.Socket zmq_sock: a preexisting zmq socket that will be passed to returned transport. :return: a pair of ``(transport, protocol)`` where transport supports :class:`~ZmqTransport` interface. :rtype: :class:`tuple` ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/docs/examples.rst0000644000076600000240000000463400000000000015277 0ustar00jellestaffExamples of aiozmq usage ======================== There is a list of examples from `aiozmq/examples `_ Every example is a correct tiny python program. .. _aiozmq-examples-core-dealer-router: Simple DEALER-ROUTER pair implemented on Core level --------------------------------------------------- .. literalinclude:: ../examples/core_dealer_router.py .. _aiozmq-examples-stream-dealer-router: DEALER-ROUTER pair implemented with streams ------------------------------------------- .. literalinclude:: ../examples/stream_dealer_router.py .. _aiozmq-examples-rpc-rpc: Remote Procedure Call --------------------- .. literalinclude:: ../examples/rpc_simple.py .. _aiozmq-examples-rpc-pipeline: Pipeline aka Notifier --------------------- .. literalinclude:: ../examples/rpc_pipeline.py .. _aiozmq-examples-rpc-pubsub: Publish-Subscribe ----------------- .. literalinclude:: ../examples/rpc_pubsub.py .. _aiozmq-examples-rpc-exception-trasnslator: Translation RPC exceptions back to client ----------------------------------------- .. literalinclude:: ../examples/rpc_exception_translator.py .. _aiozmq-examples-rpc-custom-value-trasnslator: Translation instances of custom classes via RPC ------------------------------------------------ .. literalinclude:: ../examples/rpc_custom_translator.py .. _aiozmq-examples-rpc-incorrect-calls: Validation of RPC methods -------------------------- .. literalinclude:: ../examples/rpc_incorrect_calls.py .. _aiozmq-examples-rpc-subhandlers: RPC lookup in nested namespaces ------------------------------- .. literalinclude:: ../examples/rpc_with_subhandlers.py .. _aiozmq-examples-rpc-dict-handler: Use dict as RPC lookup table ---------------------------- .. literalinclude:: ../examples/rpc_dict_handler.py .. _aiozmq-examples-rpc-dynamic-handler: Use dynamic RPC lookup ---------------------- .. literalinclude:: ../examples/rpc_dynamic.py .. _aiozmq-examples-socket-event-monitor: Socket event monitor -------------------- .. literalinclude:: ../examples/socket_event_monitor.py .. _aiozmq-examples-stream-socket-event-monitor: Stream socket event monitor --------------------------- .. literalinclude:: ../examples/stream_monitor.py .. _aiozmq-examples-sync-async: Synchronous and asynchronous code works together ------------------------------------------------- .. literalinclude:: ../examples/sync_async.py ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/docs/glossary.rst0000644000076600000240000000420200000000000015313 0ustar00jellestaff.. _glossary: ******** Glossary ******** .. if you add new entries, keep the alphabetical sorting! .. glossary:: asyncio Reference implementation of :pep:`3156` See https://pypi.python.org/pypi/asyncio/ callable Any object that can be called. Use :func:`callable` to check that. endpoint A string consisting of two parts as follows: *transport://address.* The transport part specifies the underlying transport protocol to use. The meaning of the address part is specific to the underlying transport protocol selected. The following transports are defined: inproc local in-process (inter-thread) communication transport, see http://api.zeromq.org/master:zmq-inproc. ipc local inter-process communication transport, see http://api.zeromq.org/master:zmq-ipc tcp unicast transport using TCP, see http://api.zeromq.org/master:zmq_tcp pgm, epgm reliable multicast transport using PGM, see http://api.zeromq.org/master:zmq_pgm enduser Software engeneer who wants to *just use* human-like communications via that library. We offer that simple API for RPC, Push/Pull and Pub/Sub services. msgpack Fast and compact binary serialization format. See http://msgpack.org/ for the description of the standard. https://pypi.python.org/pypi/msgpack/ is the Python implementation. pyzmq PyZMQ is the Python bindings for :term:`ZeroMQ`. See https://github.com/zeromq/pyzmq trafaret Trafaret is a validation library with support for data structure convertors. See https://github.com/Deepwalker/trafaret ZeroMQ ØMQ (also spelled ZeroMQ, 0MQ or ZMQ) is a high-performance asynchronous messaging library aimed at use in scalable distributed or concurrent applications. It provides a message queue, but unlike message-oriented middleware, a ØMQ system can run without a dedicated message broker. The library is designed to have a familiar socket-style API. See http://zeromq.org/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/docs/index.rst0000644000076600000240000001016300000000000014562 0ustar00jellestaff.. aiozmq documentation master file, created by sphinx-quickstart on Mon Mar 17 15:12:47 2014. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. aiozmq ==================================== ZeroMQ integration with asyncio (:pep:`3156`). .. _GitHub: https://github.com/aio-libs/aiozmq Features -------- - Implements :func:`~aiozmq.create_zmq_connection` coroutine for making 0MQ connections. - Provides :class:`~aiozmq.ZmqTransport` and :class:`~aiozmq.ZmqProtocol` - Provides RPC :ref:`aiozmq-rpc-rpc`, :ref:`aiozmq-rpc-pushpull` and :ref:`aiozmq-rpc-pubsub` patterns for *remote calls*. .. note:: The library works on Linux, MacOS X and Windows. But Windows is a second-class citizen in :term:`ZeroMQ` world, sorry. Thus *aiozmq* has *limited* support for Windows also. Limitations are: * You obviously cannot use *ipc://name* schema for :term:`endpoint` * aiozmq`s loop :class:`aiozmq.ZmqEventLoop` is built on top of ``select`` system call, so it's not fast comparing to :class:`asyncio.ProactorEventLoop` and it doesn't support :ref:`subprocesses `. Library Installation -------------------- The :ref:`core ` requires only :term:`pyzmq` and can be installed (with pyzmq as dependency) by executing:: pip3 install aiozmq Also probably you want to use :mod:`aiozmq.rpc`. .. _aiozmq-install-msgpack: RPC module is **optional** and requires :term:`msgpack`. You can install *msgpack* by executing:: pip3 install msgpack .. note:: *aiozmq* can be executed by *Python 3* only. The most Linux distributions uses *pip3* for installing *Python 3 libraries*. But your system may be using *Python 3* by default than try just *pip* instead of *pip3*. The same may be true for *virtualenv*, *travis continuous integration system* etc. Source code ----------- The project is hosted on GitHub_ Please feel free to file an issue on `bug tracker `_ if you have found a bug or have some suggestion for library improvement. The library uses Github Actions for Continious Integration. Dependencies ------------ - Python 3.6+ - :term:`ZeroMQ` 3.2+ - :term:`pyzmq` 13.1+ (did not test with earlier versions) - aiozmq.rpc requires :term:`msgpack` Authors and License ------------------- The ``aiozmq`` package is initially written by Nikolay Kim, later maintained by Andrew Svetlov, and now by Jelle Zijlstra. It's BSD licensed and freely available. Feel free to improve this package and send a pull request to GitHub_. Getting Started --------------- Low-level request-reply example:: import asyncio import aiozmq import zmq async def go(): router = await aiozmq.create_zmq_stream( zmq.ROUTER, bind='tcp://127.0.0.1:*') addr = list(router.transport.bindings())[0] dealer = await aiozmq.create_zmq_stream( zmq.DEALER, connect=addr) for i in range(10): msg = (b'data', b'ask', str(i).encode('utf-8')) dealer.write(msg) data = await router.read() router.write(data) answer = await dealer.read() print(answer) dealer.close() router.close() asyncio.run(go()) Example of RPC usage:: import asyncio import aiozmq.rpc class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote_func(self, a:int, b:int) -> int: return a + b async def go(): server = await aiozmq.rpc.serve_rpc( ServerHandler(), bind='tcp://127.0.0.1:5555') client = await aiozmq.rpc.connect_rpc( connect='tcp://127.0.0.1:5555') ret = await client.call.remote_func(1, 2) assert 3 == ret server.close() client.close() asyncio.run(go()) .. note:: To execute the last example you need to :ref:`install msgpack` first. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. toctree:: stream rpc core examples glossary ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/docs/make.bat0000644000076600000240000001175000000000000014331 0ustar00jellestaff@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\aiozmq.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\aiozmq.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/docs/rpc.rst0000644000076600000240000006637100000000000014253 0ustar00jellestaff.. _aiozmq-rpc: ====================== Remote Procedure Calls ====================== .. module:: aiozmq.rpc :synopsis: RPC for ZeroMQ transports .. currentmodule:: aiozmq.rpc Intro ===== While :ref:`core API ` provides a core support for :term:`ZeroMQ` transports, the :term:`End User ` may need some high-level API. Thus we have the *aiozmq.rpc* module for Remote Procedure Calls. The main goal of the module is to provide *easy-to-use interface* for calling some method from the remote process (which can be running on the other host). :term:`ZeroMQ` itself gives some handy sockets but says nothing about RPC. On the other hand, this module provides *human* API, but it is not compatible with *other implementations*. If you need to support a custom protocol over :term:`ZeroMQ` layer, please feel free to build your own implementation on top of the :ref:`core primitives `. The :mod:`aiozmq.rpc` supports three pairs of communications: * :ref:`aiozmq-rpc-rpc` * :ref:`aiozmq-rpc-pushpull` * :ref:`aiozmq-rpc-pubsub` .. warning:: The :mod:`aiozmq.rpc` module is **optional** and requires :term:`msgpack`. You can install *msgpack* by executing:: pip3 install msgpack .. _aiozmq-rpc-rpc: Request-Reply ============= This is a **Remote Procedure Call** pattern itself. Client calls a remote function on server and waits for the returned value. If the remote function raises an exception, that exception instance is also raised on the client side. Let's assume we have *N* clients bound to *M* servers. Any client can connect to several servers and any server can listen to multiple *endpoints*. When client sends a message, the message will be delivered to any server that is ready (doesn't processes another message). When the server sends a reply with the result of the remote call back, the result is routed to the client that has sent the request originally. This pair uses *DEALER*/*ROUTER* :term:`ZeroMQ` sockets. The basic usage is:: import asyncio from aiozmq import rpc class Handler(rpc.AttrHandler): @rpc.method def remote(self, arg1, arg2): return arg1 + arg2 async def go(): server = await rpc.serve_rpc(Handler(), bind='tcp://127.0.0.1:5555') client = await rpc.connect_rpc(connect='tcp://127.0.0.1:5555') ret = await client.call.remote(1, 2) assert ret == 3 event_loop.run_until_complete(go()) .. function:: connect_rpc(*, connect=None, bind=None, loop=None, \ error_table=None, timeout=None, \ translation_table=None) A :ref:`coroutine` that creates and connects/binds *RPC* client. Usually for this function you need to use *connect* parameter, but :term:`ZeroMQ` does not forbid to use *bind*. Parameters *bind*, *connect* and *loop* work like that of :func:`aiozmq.create_zmq_connection`. :param dict error_table: an optional table for custom exception translators. .. seealso:: :ref:`aiozmq-rpc-exception-translation` :param float timeout: an optional timeout for RPC calls. If *timeout* is not *None* and remote call takes longer than *timeout* seconds then :exc:`asyncio.TimeoutError` will be raised on client side. If the server will return an answer after timeout has been raised that answer **is ignored**. .. seealso:: :meth:`RPCClient.with_timeout` method. :param dict translation_table: an optional table for custom value translators. .. seealso:: :ref:`aiozmq-rpc-value-translators` :return: :class:`RPCClient` instance. .. function:: serve_rpc(handler, *, bind=None, connect=None, loop=None, \ log_exceptions=False, exclude_log_exceptions=(), \ translation_table=None, timeout=None) A :ref:`coroutine` that creates and connects/binds *RPC* server instance. Usually for this function you need to use *bind* parameter, but :term:`ZeroMQ` does not forbid to use *connect*. Parameters *bind*, *connect* and *loop* work like that of :func:`aiozmq.create_zmq_connection`. :param aiozmq.rpc.AbstractHander handler: an object which processes incoming RPC calls. Usually you like to pass :class:`AttrHandler` instance. :param bool log_exceptions: log exceptions from remote calls if ``True``. .. seealso:: :ref:`aiozmq-rpc-log-exceptions` :param sequence exclude_log_exceptions: sequence of exception types that should not to be logged if *log_exceptions* is ``True``. .. seealso:: :ref:`aiozmq-rpc-log-exceptions` :param dict translation_table: an optional table for custom value translators. .. seealso:: :ref:`aiozmq-rpc-value-translators` :param float timeout: timeout for performing handling of async server calls. If call handling takes longer than *timeout* then procedure will be cancelled with :exc:`asyncio.TimeoutError`. The value should be a bit longer than timeout for client side. :return: :class:`Service` instance. .. versionchanged:: 0.2 Added *log_exceptions* parameter. .. _aiozmq-rpc-pushpull: Push-Pull ========= This is a **Notify** aka **Pipeline** pattern. Client calls a remote function on the server and **doesn't** wait for the result. If a *remote function call* raises an exception, this exception is only **logged** at the server side. Client **cannot** get any information about *processing the remote call on server*. Thus this is **one-way** communication: **fire and forget**. Let's assume that we have *N* clients bound to *M* servers. Any client can connect to several servers and any server can listen to multiple *endpoints*. When client sends a message, the message will be delivered to any server that is *ready* (doesn't processes another message). That's all. This pair uses *PUSH*/*PULL* :term:`ZeroMQ` sockets. The basic usage is:: import asyncio from aiozmq import rpc class Handler(rpc.AttrHandler): @rpc.method def remote(self): do_something(arg) async def go(): server = await rpc.serve_pipeline(Handler(), bind='tcp://127.0.0.1:5555') client = await rpc.connect_pipeline(connect='tcp://127.0.0.1:5555') ret = await client.notify.remote(1) event_loop.run_until_complete(go()) .. function:: connect_pipeline(*, connect=None, bind=None, loop=None, \ error_table=None, translation_table=None) A :ref:`coroutine` that creates and connects/binds *pipeline* client. Parameters *bind*, *connect* and *loop* work like that of :func:`aiozmq.create_zmq_connection`. Usually for this function you need to use *connect* parameter, but :term:`ZeroMQ` does not forbid to use *bind*. :param dict translation_table: an optional table for custom value translators. .. seealso:: :ref:`aiozmq-rpc-value-translators` :return: :class:`PipelineClient` instance. .. function:: serve_pipeline(handler, *, connect=None, bind=None, loop=None, \ log_exceptions=False, exclude_log_exceptions=(), \ translation_table=None, timeout=None) A :ref:`coroutine` that creates and connects/binds *pipeline* server instance. Usually for this function you need to use *bind* parameter, but :term:`ZeroMQ` does not forbid to use *connect*. Parameters *bind*, *connect* and *loop* work like that of :func:`aiozmq.create_zmq_connection`. :param aiozmq.rpc.AbstractHander handler: an object which processes incoming *pipeline* calls. Usually you like to pass :class:`AttrHandler` instance. :param bool log_exceptions: log exceptions from remote calls if ``True``. .. seealso:: :ref:`aiozmq-rpc-log-exceptions` :param sequence exclude_log_exceptions: sequence of exception types that should not to be logged if *log_exceptions* is ``True``. .. seealso:: :ref:`aiozmq-rpc-log-exceptions` :param dict translation_table: an optional table for custom value translators. .. seealso:: :ref:`aiozmq-rpc-value-translators` :param float timeout: timeout for performing handling of async server calls. If call handling takes longer than *timeout* then procedure will be cancelled with :exc:`asyncio.TimeoutError`. The value should be a bit longer than timeout for client side. :return: :class:`Service` instance. .. versionchanged:: 0.2 Added *log_exceptions* parameter. .. _aiozmq-rpc-pubsub: Publish-Subscribe ================= This is **PubSub** pattern. It's very close to :ref:`aiozmq-rpc-pubsub` but has some difference: * server *subscribes* to *topics* in order to receive messages only from that *topics*. * client sends a message to concrete *topic*. Let's assume we have *N* clients bound to *M* servers. Any client can connect to several servers and any server can listen to multiple *endpoints*. When client sends a message to *topic*, the message will be delivered to servers that only has been subscribed to this *topic*. This pair uses *PUB*/*SUB* :term:`ZeroMQ` sockets. The basic usage is:: import asyncio from aiozmq import rpc class Handler(rpc.AttrHandler): @rpc.method def remote(self): do_something(arg) async def go(): server = await rpc.serve_pubsub(Handler(), subscribe='topic', bind='tcp://127.0.0.1:5555') client = await rpc.connect_pubsub(connect='tcp://127.0.0.1:5555') ret = await client.publish('topic').remote(1) event_loop.run_until_complete(go()) .. function:: connect_pubsub(*, connect=None, bind=None, loop=None, \ error_table=None, translation_table=None) A :ref:`coroutine` that creates and connects/binds *pubsub* client. Usually for this function you need to use *connect* parameter, but :term:`ZeroMQ` does not forbid to use *bind*. Parameters *bind*, *connect* and *loop* work like that of :func:`aiozmq.create_zmq_connection`. :param dict translation_table: an optional table for custom value translators. .. seealso:: :ref:`aiozmq-rpc-value-translators` :return: :class:`PubSubClient` instance. .. function:: serve_pubsub(handler, *, connect=None, bind=None, subscribe=None,\ loop=None, log_exceptions=False, \ exclude_log_exceptions=(), translation_table=None,\ timeout=None) A :ref:`coroutine` that creates and connects/binds *pubsub* server instance. Usually for this function you need to use *bind* parameter, but :term:`ZeroMQ` does not forbid to use *connect*. Parameters *bind*, *connect* and *loop* work like that of :func:`aiozmq.create_zmq_connection`. :param aiozmq.rpc.AbstractHander handler: an object which processes incoming *pipeline* calls. Usually you like to pass :class:`AttrHandler` instance. :param bool log_exceptions: log exceptions from remote calls if ``True``. .. seealso:: :ref:`aiozmq-rpc-log-exceptions` :param sequence exclude_log_exceptions: sequence of exception types that should not to be logged if *log_exceptions* is ``True``. .. seealso:: :ref:`aiozmq-rpc-log-exceptions` :param subscribe: subscription specification. Subscribe server to *topics*. Allowed parameters are :class:`str`, :class:`bytes`, *iterable* of *str* or *bytes*. :param dict translation_table: an optional table for custom value translators. .. seealso:: :ref:`aiozmq-rpc-value-translators` :param float timeout: timeout for performing handling of async server calls. If call handling takes longer than *timeout* then procedure will be cancelled with :exc:`asyncio.TimeoutError`. The value should be a bit longer than timeout for client side. :return: :class:`PubSubService` instance. :raise OSError: on system error. :raise TypeError: if arguments have inappropriate type. .. versionchanged:: 0.2 Added *log_exceptions* parameter. .. _aiozmq-rpc-exception-translation: Exception translation on client side ==================================== If a remote server method raises an exception, that exception is passed back to the client and raised on the client side, as follows:: try: await client.call.func_raises_value_error() except ValueError as exc: log.exception(exc) The rules for exception translation are: * if remote method raises an exception --- server answers with *full exception class name* (like ``package.subpackage.MyError``) and *exception constructor arguments* (:attr:`~BaseException.args`). * *translator table* is a *mapping* of ``{excpetion_name: exc_class}`` where keys are *full names* of exception class (str) and values are exception classes. * if translation is found then client code gives exception ``raise exc_class(args)``. * user defined translators are searched first. * all :ref:`builtin exceptions ` are translated by default. * :exc:`NotFoundError` and :exc:`ParameterError` are translated by default also. * if there is no registered traslation then ``GenericError(excpetion_name, args)`` is raised. For example if custom RPC server handler can raise ``mod1.Error1`` and ``pack.mod2.Error2`` then *error_table* should be:: from mod1 import Error1 from pack.mod2 import Error2 error_table = {'mod1.Error1': Error1, 'pack.mod2.Error2': Error2} client = loop.run_until_complete( rpc.connect_rpc(connect='tcp://127.0.0.1:5555', error_table=error_table)) You have to have the way to import exception classes from server-side. Or you can build your own translators without server-side code, use only string for *full exception class name* and tuple of *args* --- that's up to you. .. seealso:: *error_table* argument in :func:`connect_rpc` function. .. _aiozmq-rpc-signature-validation: Signature validation ==================== Previous versions of the library performed automatic conversion of call parameters using the function's annotations. This feature interfered with PEP 484-style type annotations and it was removed in version 1.0. .. _aiozmq-rpc-value-translators: Value translators ================= aiozmq.rpc uses :term:`msgpack` for transferring python objects from client to server and back. You can think about :term:`msgpack` as: this is a-like JSON but fast and compact. Every object that can be passed to :func:`json.dump`, can be passed to :func:`msgpack.dump` also. The same for unpacking. The only difference is: *aiozmq.rpc* converts all :class:`lists ` to :class:`tuples `. The reasons is are: * you never need to modify given list as it is your *incoming* value. If you still want to use :class:`list` data type you can do it easy by ``list(val)`` call. * tuples are a bit faster for unpacking. * tuple can be a *key* in :class:`dict`, so you can pack something like ``{(1,2): 'a'}`` and unpack it on other side without any error. Lists cannot be *keys* in dicts, they are unhashable. This point is the main reason for choosing tuples. Unfortunatelly msgpack gives no way to mix tuples and lists in the same pack. But sometimes you want to call remote side with *non-plain-json* arguments. :class:`datetime.datetime` is a good example. :mod:`aiozmq.rpc` supports all family of dates, times and timezones from :mod:`datetime` *from-the-box* (:ref:`predefined translators `). If you need to transfer a custom object via RPC you should register **translator** at both server and client side. Say, you need to pass the instances of your custom class ``Point`` via RPC. There is an example:: import asyncio import aiozmq, aiozmq.rpc import msgpack class Point: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): if isinstance(other, Point): return (self.x, self.y) == (other.x, other.y) return NotImplemented translation_table = { 0: (Point, lambda value: msgpack.packb((value.x, value.y)), lambda binary: Point(*msgpack.unpackb(binary))), } class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote(self, val): return val async def go(): server = await aiozmq.rpc.serve_rpc( ServerHandler(), bind='tcp://127.0.0.1:5555', translation_table=translation_table) client = await aiozmq.rpc.connect_rpc( connect='tcp://127.0.0.1:5555', translation_table=translation_table) ret = await client.call.remote(Point(1, 2)) assert ret == Point(1, 2) You should create a *translation table* and pass it to both :func:`connect_rpc` and :func:`serve_rpc`. That's all, server and client now have all information about passing your ``Point`` via the wire. * Translation table is the dict. * Keys should be an integers in range [0, 127]. We recommend to use keys starting from 0 for custom translators, high numbers are reserved for library itself (it uses the same schema for passing *datetime* objects etc). * Values are tuples of ``(translated_class, packer, unpacker)``. * *translated_class* is a class which you want to pass to peer. * *packer* is a :term:`callable` which receives your class instance and returns :class:`bytes` of *instance data*. * *unpacker* is a :term:`callable` which receives :class:`bytes` of *instance data* and returns your *class instance*. * When the library tries to pack your class instance it searches the *translation table* in ascending order. * If your object is an :func:`instance ` of *translated_class* then *packer* is called and resulting :class:`bytes` will be sent to peer. * On unpacking *unpacker* is called with the :class:`bytes` received by peer. The result should to be your class instance. .. warning:: Please be careful with *translation table* order. Say, if you have :class:`object` at position 0 then every lookup will stop at this. Even *datetime* objects will be redirected to *packer* and *unpacker* for registered *object* type. .. warning:: While the easiest way to write *packer* and *unpacker* is to use :mod:`pickle` we **don't encourage that**. The reason is simple: *pickle* packs an object itself and all instances which are referenced by that object. So you can easy pass via network a half of your program without any warning. .. _aiozmq-rpc-predifined-translators: Table of predefined translators: +---------+-------------------------------+ | Ordinal | Class | +=========+===============================+ | 123 | :class:`datetime.tzinfo` | +---------+-------------------------------+ | 124 | :class:`datetime.timedelta` | +---------+-------------------------------+ | 125 | :class:`datetime.time` | +---------+-------------------------------+ | 126 | :class:`datetime.datetime` | +---------+-------------------------------+ | 127 | :class:`datetime.date` | +---------+-------------------------------+ .. note:: `pytz `_ timezones processed by predefined traslator for *tzinfo* (ordinal number 123) because they are inherited from :class:`datetime.tzinfo`. So you don't need to register a custom translator for ``pytz.datetime`` . That's happens because :mod:`aiozmq.rpc` uses :mod:`pickle` for translation :mod:`datetime` classes. Pickling in this particular case is **safe** because all datetime classes are terminals and doesn't have a links to foreign class instances. .. _aiozmq-rpc-log-exceptions: Logging exceptions from remote calls at server side =================================================== By default :mod:`aiozmq.rpc` does no logging if remote call raises an exception. That behavoir can be changed by passing ``log_exceptions=True`` to rpc servers: :func:`serve_rpc`, :func:`serve_pipeline` and :func:`serve_pubsub`. If, say, you make PubSub server as:: server = await rpc.serve_pubsub(handler, subscribe='topic', bind='tcp://127.0.0.1:5555', log_exceptions=True) then exceptions raised from *handler* remote calls will be logged by standard :attr:`aiozmq.rpc.logger`. But sometimes you don't want to log exceptions of some types. Say, you use your own exceptions as part of public API to report about expected failures. In this case you probably want to pass that exceptions over the log, but record all other unexpected errors. For that case you can use *exclude_log_exceptions* parameter:: server = await rpc.serve_rpc(handler, bind='tcp://127.0.0.1:7777', log_exceptions=True, exclude_log_exceptions=(MyError, OtherError)) Exceptions ========== .. exception:: Error Base class for :mod:`aiozmq.rpc` exceptions. Derived from :exc:`Exception`. .. exception:: GenericError Subclass of :exc:`Error`, raised when a remote call produces exception that cannot be translated. .. attribute:: exc_type A string contains *full name* of unknown exception(``"package.module.MyError"``). .. attribute:: arguments A tuple of arguments passed to *unknown exception* constructor .. seealso:: :attr:`BaseException.args` - parameters for exception constructor. .. seealso:: :ref:`aiozmq-rpc-exception-translation` .. exception:: NotFoundError Subclass of both :exc:`Error` and :exc:`LookupError`, raised when a remote call name is not found at RPC server. .. exception:: ParameterError Subclass of both :exc:`Error` and :exc:`ValueError`, raised by remote call when parameter substitution failed. .. exception:: ServiceClosedError Subclass of :exc:`Error`, raised :class:`Service` has been closed. .. seealso:: :attr:`Service.transport` property. Classes ======= .. decorator:: method Marks a decorated function as RPC endpoint handler. Methods are objects that returned by :meth:`AbstractHandler.__getitem__` lookup at RPC method search stage. .. class:: AbstractHandler The base class for all RPC handlers. Every handler should be *AbstractHandler* by direct inheritance or indirect subclassing (method *__getitem__* should be defined). Therefore :class:`AttrHandler` and :class:`dict` are both good citizens. Returned value eighter should implement :class:`AbstractHandler` interface itself for looking up forward or must be callable decorated by :func:`method`. .. method:: __getitem__(self, key) Returns subhandler or terminal function decorated by :func:`method`. :raise KeyError: if key is not found. .. seealso:: :func:`start_server` coroutine. .. class:: AttrHandler Subclass of :class:`AbstractHandler`. Does lookup for *subhandlers* and *rpc methods* by :func:`getattr`. Here is an example of a trivial *handler*:: class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote_func(self, a:int, b:int) -> int: return a + b .. class:: Service RPC service base class. Instances of *Service* (or descendants) are returned by coroutines that create clients or servers (:func:`connect_rpc`, :func:`serve_rpc` and others). Implements :class:`asyncio.AbstractServer`. .. attribute:: transport The readonly property that returns service's :class:`transport `. You can use the transport to dynamically bind/unbind, connect/disconnect etc. :raise aiozmq.rpc.ServiceClosedError: if the service has been closed. .. method:: close() Stop serving. This leaves existing connections open. .. method:: wait_closed() :ref:`Coroutine ` to wait until service is closed. .. class:: RPCClient Class that returned by :func:`connect_rpc` call. Inherited from :class:`Service`. For RPC calls use :attr:`~RPCClient.rpc` property. .. attribute:: call The readonly property that returns ephemeral object used to making RPC call. A construction like:: ret = await client.call.ns.method(1, 2, 3) makes a remote call with arguments(1, 2, 3) and returns the answer from this call. You can also pass *named parameters*:: ret = await client.call.ns.method(1, b=2, c=3) If the call raises an exception that exception propagates to client side. Say, if remote raises :class:`ValueError` client catches ``ValueError`` instance with *args* sent by remote:: try: await client.call.raise_value_error() except ValueError as exc: process_error(exc) .. method:: with_timeout(timeout) Override default timeout for client. Can be used in two forms:: await client.with_timeout(1.5).call.func() and:: with client.with_timeout(1.5) as new_client: await new_client.call.func1() await new_client.call.func2() :param float timeout: a timeout for RPC calls. If *timeout* is not *None* and remote call takes longer than *timeout* seconds then :exc:`asyncio.TimeoutError` will be raised on client side. If the server will return an answer after timeout has been raised that answer **is ignored**. .. seealso:: :func:`connect_rpc` coroutine. .. seealso:: :ref:`aiozmq-rpc-exception-translation` .. class:: PipelineClient Class that returned by :func:`connect_pipeline` call. Inherited from :class:`Service`. .. attribute:: notify The readonly property that returns ephemeral object used to making notification call. Construction like:: ret = await client.notify.ns.method(1, 2, 3) makes a remote call with arguments(1, 2, 3) and returns *None*. You cannot get any answer from the server. .. class:: PubSubClient Class that is returned by :func:`connect_pubsub` call. Inherited from :class:`Service`. For *pubsub* calls use :meth:`~RPCClient.publish` method. .. method:: publish(topic) The call that returns an ephemeral object used to make a *publisher call*. A construction like:: ret = await client.publish('topic').ns.method(1, b=2) makes a remote call with arguments ``(1, b=2)`` and topic name ``b'topic'`` and returns *None*. You cannot get any answer from the server. Logger ====== .. data:: logger An instance of :class:`logging.Logger` with *name* ``aiozmq.rpc``. The library sends log messages (:ref:`aiozmq-rpc-log-exceptions` for example) to this logger. You can configure your own :ref:`handlers ` to filter, save or what-you-wish the log events from the library. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/docs/spelling_wordlist.txt0000644000076600000240000000000000000000000017213 0ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/docs/stream.rst0000644000076600000240000001516700000000000014757 0ustar00jellestaff.. _aiozmq-stream: Streams API =========================== .. currentmodule:: aiozmq .. versionadded:: 0.6 *aiozmq* provides a high level stream oriented API on top of the low-level API (:class:`ZmqTransport` and :class:`ZmqProtocol`) which can provide a more convinient API. Here's an example:: import asyncio import aiozmq import zmq async def go(): router = await aiozmq.create_zmq_stream( zmq.ROUTER, bind='tcp://127.0.0.1:*') addr = list(router.transport.bindings())[0] dealer = await aiozmq.create_zmq_stream( zmq.DEALER, connect=addr) for i in range(10): msg = (b'data', b'ask', str(i).encode('utf-8')) dealer.write(msg) data = await router.read() router.write(data) answer = await dealer.read() print(answer) dealer.close() router.close() asyncio.run(go()) The code creates two streams for request and response part of :term:`ZeroMQ` connection and sends message through the wire with waiting for response. Socket events can also be monitored when using streams. .. literalinclude:: ../examples/stream_monitor.py create_zmq_stream ----------------- .. function:: create_zmq_stream(zmq_type, *, bind=None, connect=None, \ loop=None, zmq_sock=None, \ high_read=None, low_read=None, \ high_write=None, low_write=None, \ events_backlog=100) A wrapper for :func:`create_zmq_connection` returning a ZeroMQ stream (:class:`ZmqStream` instance). The arguments are all the usual arguments to :func:`create_zmq_connection` plus high and low watermarks for reading and writing messages. This function is a :ref:`coroutine `. :param int zmq_type: a type of :term:`ZeroMQ` socket (*zmq.REQ*, *zmq.REP*, *zmq.PUB*, *zmq.SUB*, zmq.PAIR*, *zmq.DEALER*, *zmq.ROUTER*, *zmq.PULL*, *zmq.PUSH*, etc.) :param bind: endpoints specification. Every :term:`endpoint` generates call to :meth:`ZmqTransport.bind` for accepting connections from specified endpoint. Other side should use *connect* parameter to connect to this transport. :type bind: str or iterable of strings :param connect: endpoints specification. Every :term:`endpoint` generates call to :meth:`ZmqTransport.connect` for connecting transport to specified endpoint. Other side should use bind parameter to wait for incoming connections. :type connect: str or iterable of strings :param zmq.Socket zmq_sock: a preexisting zmq socket that will be passed to returned transport. :param asyncio.AbstractEventLoop loop: optional event loop instance, ``None`` for default event loop. :param int high_read: high-watermark for reading from :term:`ZeroMQ` socket. ``None`` by default (no limits). :param int low_read: low-watermark for reading from :term:`ZeroMQ` socket. ``None`` by default (no limits). :param int high_write: high-watermark for writing into :term:`ZeroMQ` socket. ``None`` by default (no limits). :param int low_write: low-watermark for writing into :term:`ZeroMQ` socket. ``None`` by default (no limits). :param int events_backlog: backlog size for monitoring events, ``100`` by default. It specifies size of event queue. If count of unread events exceeds *events_backlog* the oldest events are discarded. Use ``None`` for unlimited backlog size. :return: ZeroMQ stream object, :class:`ZmqStream` instance. .. versionadded:: 0.7 events_backlog parameter ZmqStream --------- .. class:: ZmqStream A class for sending and receiving :term:`ZeroMQ` messages. .. attribute:: transport :class:`ZmqTransport` instance, used for the stream. .. method:: at_closing() Return ``True`` if the buffer is empty and :meth:`feed_closing` was called. .. method:: close() Close the stream and underlying :term:`ZeroMQ` socket. .. method:: drain() Wait until the write buffer of the underlying transport is flushed. The intended use is to write:: w.write(data) await w.drain() When the transport buffer is full (the protocol is paused), block until the buffer is (partially) drained and the protocol is resumed. When there is nothing to wait for, the await continues immediately. This method is a :ref:`coroutine `. .. method:: exception() Get the stream exception. .. method:: get_extra_info(name, default=None) Return optional transport information: see :meth:`asyncio.BaseTransport.get_extra_info`. .. method:: read() Read one :term:`ZeroMQ` message from the wire and return it. Raise :exc:`ZmqStreamClosed` if the stream was closed. .. method:: read_event() Read one :term:`ZeroMQ` monitoring event and return it. Raise :exc:`ZmqStreamClosed` if the stream was closed. Monitoring mode should be enabled by :meth:`ZmqTransport.enable_monitor` call first:: await stream.transport.enable_monitor() .. versionadded:: 0.7 .. method:: write(msg) Writes message *msg* into :term:`ZeroMQ` socket. :param msg: a sequence (:class:`tuple` or :class:`list`), containing multipart message daata. *Internal API* .. method:: set_exception(exc) Set the exception to *exc*. The exception may be retrieved by :meth:`exception` call or raised by next :meth:`read`, *the private method*. .. method:: set_transport(transport) Set the transport to *transport*, *the private method*. .. method:: set_read_buffer_limits(high=None, low=None) Set read buffer limits, *the private method*. .. method:: feed_closing() Feed the socket closing signal, *the private method*. .. method:: feed_msg(msg) Feed *msg* message to the stream's internal buffer. Any operations waiting for the data will be resumed. *The private method*. .. method:: feed_event(event) Feed a socket *event* message to the stream's internal buffer. *The private method*. Exceptions ---------- .. exception:: ZmqStreamClosed Raised by read operations on closed stream. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3555114 aiozmq-1.0.0/examples/0000755000076600000240000000000000000000000013606 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/core_dealer_router.py0000644000076600000240000000305300000000000020025 0ustar00jellestaffimport asyncio import aiozmq import zmq class ZmqDealerProtocol(aiozmq.ZmqProtocol): transport = None def __init__(self, queue, on_close): self.queue = queue self.on_close = on_close def connection_made(self, transport): self.transport = transport def msg_received(self, msg): self.queue.put_nowait(msg) def connection_lost(self, exc): self.on_close.set_result(exc) class ZmqRouterProtocol(aiozmq.ZmqProtocol): transport = None def __init__(self, on_close): self.on_close = on_close def connection_made(self, transport): self.transport = transport def msg_received(self, msg): self.transport.write(msg) def connection_lost(self, exc): self.on_close.set_result(exc) async def go(): router_closed = asyncio.Future() dealer_closed = asyncio.Future() router, _ = await aiozmq.create_zmq_connection( lambda: ZmqRouterProtocol(router_closed), zmq.ROUTER, bind="tcp://127.0.0.1:*" ) addr = list(router.bindings())[0] queue = asyncio.Queue() dealer, _ = await aiozmq.create_zmq_connection( lambda: ZmqDealerProtocol(queue, dealer_closed), zmq.DEALER, connect=addr ) for i in range(10): msg = (b"data", b"ask", str(i).encode("utf-8")) dealer.write(msg) answer = await queue.get() print(answer) dealer.close() await dealer_closed router.close() await router_closed def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_custom_translator.py0000644000076600000240000000221000000000000020602 0ustar00jellestaffimport asyncio import aiozmq.rpc import msgpack class Point: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): if isinstance(other, Point): return (self.x, self.y) == (other.x, other.y) return NotImplemented translation_table = { 0: ( Point, lambda value: msgpack.packb((value.x, value.y)), lambda binary: Point(*msgpack.unpackb(binary)), ), } class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote(self, val): return val async def go(): server = await aiozmq.rpc.serve_rpc( ServerHandler(), bind="tcp://*:*", translation_table=translation_table ) server_addr = list(server.transport.bindings())[0] client = await aiozmq.rpc.connect_rpc( connect=server_addr, translation_table=translation_table ) ret = await client.call.remote(Point(1, 2)) assert ret == Point(1, 2) server.close() await server.wait_closed() client.close() await client.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_dict_handler.py0000644000076600000240000000130000000000000017436 0ustar00jellestaffimport asyncio import aiozmq.rpc @aiozmq.rpc.method def a(): return "a" @aiozmq.rpc.method def b(): return "b" handlers_dict = {"a": a, "subnamespace": {"b": b}} async def go(): server = await aiozmq.rpc.serve_rpc(handlers_dict, bind="tcp://*:*") server_addr = list(server.transport.bindings())[0] client = await aiozmq.rpc.connect_rpc(connect=server_addr) ret = await client.call.a() assert "a" == ret ret = await client.call.subnamespace.b() assert "b" == ret server.close() await server.wait_closed() client.close() await client.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_dynamic.py0000644000076600000240000000203200000000000016445 0ustar00jellestaffimport asyncio import aiozmq.rpc class DynamicHandler(aiozmq.rpc.AttrHandler): def __init__(self, namespace=()): self.namespace = namespace def __getitem__(self, key): try: return getattr(self, key) except AttributeError: return DynamicHandler(self.namespace + (key,)) @aiozmq.rpc.method def func(self): return (self.namespace, "val") async def go(): server = await aiozmq.rpc.serve_rpc(DynamicHandler(), bind="tcp://*:*") server_addr = list(server.transport.bindings())[0] client = await aiozmq.rpc.connect_rpc(connect=server_addr) ret = await client.call.func() assert ((), "val") == ret, ret ret = await client.call.a.func() assert (("a",), "val") == ret, ret ret = await client.call.a.b.func() assert (("a", "b"), "val") == ret, ret server.close() await server.wait_closed() client.close() await client.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_exception_translator.py0000644000076600000240000000153200000000000021274 0ustar00jellestaffimport asyncio import aiozmq.rpc class CustomError(Exception): def __init__(self, val): self.val = val super().__init__(val) exc_name = CustomError.__module__ + "." + CustomError.__name__ error_table = {exc_name: CustomError} class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote(self, val): raise CustomError(val) async def go(): server = await aiozmq.rpc.serve_rpc(ServerHandler(), bind="tcp://*:*") server_addr = list(server.transport.bindings())[0] client = await aiozmq.rpc.connect_rpc(connect=server_addr, error_table=error_table) try: await client.call.remote("value") except CustomError as exc: exc.val == "value" server.close() client.close() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_incorrect_calls.py0000644000076600000240000000233100000000000020171 0ustar00jellestaffimport asyncio import aiozmq.rpc class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote_func(self, a: int, b: int) -> int: return a + b async def go(): server = await aiozmq.rpc.serve_rpc(ServerHandler(), bind="tcp://*:*") server_addr = list(server.transport.bindings())[0] client = await aiozmq.rpc.connect_rpc(connect=server_addr) try: await client.call.unknown_function() except aiozmq.rpc.NotFoundError as exc: print("client.rpc.unknown_function(): {}".format(exc)) try: await client.call.remote_func(bad_arg=1) except aiozmq.rpc.ParametersError as exc: print("client.rpc.remote_func(bad_arg=1): {}".format(exc)) try: await client.call.remote_func(1) except aiozmq.rpc.ParametersError as exc: print("client.rpc.remote_func(1): {}".format(exc)) try: await client.call.remote_func("a", "b") except aiozmq.rpc.ParametersError as exc: print("client.rpc.remote_func('a', 'b'): {}".format(exc)) server.close() await server.wait_closed() client.close() await client.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_pipeline.py0000644000076600000240000000164100000000000016633 0ustar00jellestaffimport asyncio import aiozmq.rpc from itertools import count class Handler(aiozmq.rpc.AttrHandler): def __init__(self): self.connected = False @aiozmq.rpc.method def remote_func(self, step, a: int, b: int): self.connected = True print("HANDLER", step, a, b) async def go(): handler = Handler() listener = await aiozmq.rpc.serve_pipeline(handler, bind="tcp://*:*") listener_addr = list(listener.transport.bindings())[0] notifier = await aiozmq.rpc.connect_pipeline(connect=listener_addr) for step in count(0): await notifier.notify.remote_func(step, 1, 2) if handler.connected: break else: await asyncio.sleep(0.01) listener.close() await listener.wait_closed() notifier.close() await notifier.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_pubsub.py0000644000076600000240000000203000000000000016317 0ustar00jellestaffimport asyncio import aiozmq.rpc from itertools import count class Handler(aiozmq.rpc.AttrHandler): def __init__(self): self.connected = False @aiozmq.rpc.method def remote_func(self, step, a: int, b: int): self.connected = True print("HANDLER", step, a, b) async def go(): handler = Handler() subscriber = await aiozmq.rpc.serve_pubsub( handler, subscribe="topic", bind="tcp://127.0.0.1:*", log_exceptions=True ) subscriber_addr = list(subscriber.transport.bindings())[0] print("SERVE", subscriber_addr) publisher = await aiozmq.rpc.connect_pubsub(connect=subscriber_addr) for step in count(0): await publisher.publish("topic").remote_func(step, 1, 2) if handler.connected: break else: await asyncio.sleep(0.1) subscriber.close() await subscriber.wait_closed() publisher.close() await publisher.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_simple.py0000644000076600000240000000121000000000000016307 0ustar00jellestaffimport asyncio import aiozmq.rpc class ServerHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method def remote_func(self, a: int, b: int) -> int: return a + b async def go(): server = await aiozmq.rpc.serve_rpc(ServerHandler(), bind="tcp://*:*") server_addr = list(server.transport.bindings())[0] client = await aiozmq.rpc.connect_rpc(connect=server_addr) ret = await client.call.remote_func(1, 2) assert 3 == ret server.close() await server.wait_closed() client.close() await client.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/rpc_with_subhandlers.py0000644000076600000240000000205000000000000020366 0ustar00jellestaffimport asyncio import aiozmq.rpc class Handler(aiozmq.rpc.AttrHandler): def __init__(self, ident): self.ident = ident self.subhandler = SubHandler(self.ident, "subident") @aiozmq.rpc.method def a(self): return (self.ident, "a") class SubHandler(aiozmq.rpc.AttrHandler): def __init__(self, ident, subident): self.ident = ident self.subident = subident @aiozmq.rpc.method def b(self): return (self.ident, self.subident, "b") async def go(): server = await aiozmq.rpc.serve_rpc(Handler("ident"), bind="tcp://*:*") server_addr = list(server.transport.bindings())[0] client = await aiozmq.rpc.connect_rpc(connect=server_addr) ret = await client.call.a() assert ("ident", "a") == ret ret = await client.call.subhandler.b() assert ("ident", "subident", "b") == ret server.close() await server.wait_closed() client.close() await client.wait_closed() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667226339.0 aiozmq-1.0.0/examples/socket_event_monitor.py0000644000076600000240000000647100000000000020430 0ustar00jellestaff""" This example demonstrates how to use the ZMQ socket monitor to receive socket events. The socket event monitor capability requires libzmq >= 4 and pyzmq >= 14.4. """ import asyncio import aiozmq import zmq ZMQ_EVENTS = { getattr(zmq, name): name.replace("EVENT_", "").lower().replace("_", " ") for name in [i for i in dir(zmq) if i.startswith("EVENT_")] } def event_description(event): """Return a human readable description of the event""" return ZMQ_EVENTS.get(event, "unknown") class Protocol(aiozmq.ZmqProtocol): def __init__(self): self.wait_ready = asyncio.Future() self.wait_done = asyncio.Future() self.wait_closed = asyncio.Future() self.count = 0 def connection_made(self, transport): self.transport = transport self.wait_ready.set_result(True) def connection_lost(self, exc): self.wait_closed.set_result(exc) def msg_received(self, data): # This protocol is used by both the Router and Dealer sockets in # this example. Router sockets prefix messages with the identity # of the sender and hence contain two frames in this simple test # protocol. if len(data) == 2: identity, msg = data assert msg == b"Hello" self.transport.write([identity, b"World"]) else: msg = data[0] assert msg == b"World" self.count += 1 if self.count >= 4: self.wait_done.set_result(True) def event_received(self, event): print( "event:{}, value:{}, endpoint:{}, description:{}".format( event.event, event.value, event.endpoint, event_description(event.event) ) ) async def go(): st, sp = await aiozmq.create_zmq_connection( Protocol, zmq.ROUTER, bind="tcp://127.0.0.1:*" ) await sp.wait_ready addr = list(st.bindings())[0] ct, cp = await aiozmq.create_zmq_connection(Protocol, zmq.DEALER, connect=addr) await cp.wait_ready # Enable the socket monitor on the client socket. Socket events # are passed to the 'event_received' method on the client protocol. await ct.enable_monitor() # Trigger some socket events while also sending a message to the # server. When the client protocol receives 4 response it will # fire the wait_done future. for i in range(4): await asyncio.sleep(0.1) await ct.disconnect(addr) await asyncio.sleep(0.1) await ct.connect(addr) await asyncio.sleep(0.1) ct.write([b"Hello"]) await cp.wait_done # The socket monitor can be explicitly disabled if necessary. # await ct.disable_monitor() # If a socket monitor is left enabled on a socket being closed, # the socket monitor will be closed automatically. ct.close() await cp.wait_closed st.close() await sp.wait_closed def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": # import logging # logging.basicConfig(level=logging.DEBUG) if zmq.zmq_version_info() < (4,) or zmq.pyzmq_version_info() < (14, 4): raise NotImplementedError( "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4, " "have libzmq:{}, pyzmq:{}".format(zmq.zmq_version(), zmq.pyzmq_version()) ) main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/stream_dealer_router.py0000644000076600000240000000115300000000000020367 0ustar00jellestaffimport asyncio import aiozmq import zmq async def go(): router = await aiozmq.create_zmq_stream(zmq.ROUTER, bind="tcp://127.0.0.1:*") addr = list(router.transport.bindings())[0] dealer = await aiozmq.create_zmq_stream(zmq.DEALER, connect=addr) for i in range(10): msg = (b"data", b"ask", str(i).encode("utf-8")) dealer.write(msg) data = await router.read() router.write(data) answer = await dealer.read() print(answer) dealer.close() router.close() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/stream_monitor.py0000644000076600000240000000162700000000000017230 0ustar00jellestaffimport asyncio import aiozmq import zmq async def monitor_stream(stream): try: while True: event = await stream.read_event() print(event) except aiozmq.ZmqStreamClosed: pass async def go(): router = await aiozmq.create_zmq_stream(zmq.ROUTER, bind="tcp://127.0.0.1:*") addr = list(router.transport.bindings())[0] dealer = await aiozmq.create_zmq_stream(zmq.DEALER) await dealer.transport.enable_monitor() asyncio.Task(monitor_stream(dealer)) await dealer.transport.connect(addr) for i in range(10): msg = (b"data", b"ask", str(i).encode("utf-8")) dealer.write(msg) data = await router.read() router.write(data) answer = await dealer.read() print(answer) router.close() dealer.close() def main(): asyncio.run(go()) print("DONE") if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/examples/sync_async.py0000644000076600000240000000645400000000000016342 0ustar00jellestaffimport asyncio import aiozmq import zmq import argparse import time def main(): ap = argparse.ArgumentParser() ap.add_argument( "--addr", default="tcp://127.0.0.1:7777", help="Address to use, default `%(default)s`", ) gr = ap.add_mutually_exclusive_group() gr.add_argument( "--sync", action="store_const", dest="mode", const=sync_main, default=None, help="Run synchronous example", ) gr.add_argument( "--async", action="store_const", dest="mode", const=async_main, help="Run asynchronous example", ) ap.add_argument( "--client", action="store_true", default=False, help="Run client part" ) ap.add_argument( "--server", action="store_true", default=False, help="Run server part" ) options = ap.parse_args() return options.mode(options) def read_data(): return input("Enter some phrase: ").encode("utf-8").split() def sync_main(options): print("Running sync at {!r}".format(options.addr)) ctx = zmq.Context() srv_sock = cl_sock = None if options.server: srv_sock = ctx.socket(zmq.ROUTER) srv_sock.bind(options.addr) if options.client: cl_sock = ctx.socket(zmq.DEALER) cl_sock.connect(options.addr) data = read_data() cl_sock.send_multipart(data) print("Sync client write: {!r}".format(data)) while True: if srv_sock: try: data = srv_sock.recv_multipart(zmq.NOBLOCK) print("Sync server read: {!r}".format(data)) srv_sock.send_multipart(data) print("Sync server write: {!r}".format(data)) except zmq.ZMQError: pass if cl_sock: try: data = cl_sock.recv_multipart(zmq.NOBLOCK) print("Sync client read: {!r}".format(data)) return except zmq.ZMQError: pass time.sleep(0.1) def async_main(options): print("Running async at {!r}".format(options.addr)) loop = asyncio.get_event_loop() stop = asyncio.Future() async def server(): router = await aiozmq.create_zmq_stream(zmq.ROUTER, bind=options.addr) while True: try: data = await router.read() except asyncio.CancelledError: break print("Async server read: {!r}".format(data)) router.write(data) print("Async server write: {!r}".format(data)) router.close() async def client(): dealer = await aiozmq.create_zmq_stream(zmq.DEALER, connect=options.addr) data = read_data() dealer.write(data) print("Async client write: {!r}".format(data)) echo = await dealer.read() print("Async client read: {!r}".format(echo)) stop.set_result(None) tasks = [] if options.server: tasks.append(asyncio.ensure_future(server())) if options.client: tasks.append(asyncio.ensure_future(client())) if tasks: try: loop.run_until_complete(stop) except KeyboardInterrupt: loop.call_soon(loop.stop) loop.run_forever() loop.close() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/requirements-bench.txt0000644000076600000240000000011700000000000016330 0ustar00jellestaffmatplotlib==2.0.0 scipy>=0.13.3 numpy>=1.8.1 uvloop>=0.5.4 -r requirements.txt ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/requirements.txt0000644000076600000240000000027100000000000015254 0ustar00jellestaffmsgpack>=0.5.0 pycodestyle>=2.4.0 pyflakes>=0.8.1 pyzmq>=14.2.0 wheel>=0.23.0 ipython==8.6.0 ipdb==0.13.9 sphinx==5.3.0 sphinxcontrib-spelling==7.6.2 readthedocs-sphinx-ext==2.2.0 -e . ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/runtests.py0000644000076600000240000002342300000000000014235 0ustar00jellestaff"""Run aiozmq unittests. Usage: python3 runtests.py [flags] [pattern] ... Patterns are matched against the fully qualified name of the test, including package, module, class and method, e.g. 'tests.test_events.PolicyTests.testPolicy'. For full help, try --help. runtests.py --coverage is equivalent of: $(COVERAGE) run --branch runtests.py -v $(COVERAGE) html $(list of files) $(COVERAGE) report -m $(list of files) """ # Originally written by Beech Horn (for NDB). import argparse import gc import logging import os import re import shutil import sys import unittest import threading import traceback import textwrap import importlib.machinery try: import coverage except ImportError: coverage = None from unittest.signals import installHandler assert sys.version_info >= (3, 3), "Please use Python 3.3 or higher." ARGS = argparse.ArgumentParser(description="Run all unittests.") ARGS.add_argument( "-v", action="store", dest="verbose", nargs="?", const=1, type=int, default=0, help="verbose", ) ARGS.add_argument("-x", action="store_true", dest="exclude", help="exclude tests") ARGS.add_argument( "-f", "--failfast", action="store_true", default=False, dest="failfast", help="Stop on first fail or error", ) ARGS.add_argument( "-c", "--catch", action="store_true", default=False, dest="catchbreak", help="Catch control-C and display results", ) ARGS.add_argument( "--forever", action="store_true", dest="forever", default=False, help="run tests forever to catch sporadic errors", ) ARGS.add_argument( "--findleaks", action="store_true", dest="findleaks", help="detect tests that leak memory", ) ARGS.add_argument("-q", action="store_true", dest="quiet", help="quiet") ARGS.add_argument( "--tests", action="store", dest="testsdir", default="tests", help="tests directory" ) ARGS.add_argument( "--coverage", action="store_true", dest="coverage", help="enable html coverage report", ) ARGS.add_argument( "pattern", action="store", nargs="*", help="optional regex patterns to match test ids (default all tests)", ) COV_ARGS = argparse.ArgumentParser(description="Run all unittests.") COV_ARGS.add_argument( "--coverage", action="store", dest="coverage", nargs="?", const="", help="enable coverage report and provide python files directory", ) def load_modules(basedir, suffix=".py", *, verbose=False): def list_dir(prefix, dir): files = [] modpath = os.path.join(dir, "__init__.py") if os.path.isfile(modpath): mod = os.path.split(dir)[-1] files.append(("{}{}".format(prefix, mod), modpath)) prefix = "{}{}.".format(prefix, mod) for name in os.listdir(dir): path = os.path.join(dir, name) if os.path.isdir(path): files.extend(list_dir("{}{}.".format(prefix, name), path)) else: if ( name != "__init__.py" and name.endswith(suffix) and not name.startswith((".", "_")) ): files.append(("{}{}".format(prefix, name[:-3]), path)) return files mods = [] for modname, sourcefile in list_dir("", basedir): if modname == "runtests": continue try: loader = importlib.machinery.SourceFileLoader(modname, sourcefile) mods.append((loader.load_module(), sourcefile)) except SyntaxError: raise except Exception as err: print("Skipping '{}': {}".format(modname, err), file=sys.stderr) if verbose: try: traceback.print_exc() except Exception: pass return mods class TestsFinder: def __init__(self, testsdir, includes=(), excludes=(), *, verbose=False): self._testsdir = testsdir self._includes = includes self._excludes = excludes self._verbose = verbose self.find_available_tests() def find_available_tests(self): """ Find available test classes without instantiating them. """ self._test_factories = [] mods = [mod for mod, _ in load_modules(self._testsdir, verbose=self._verbose)] for mod in mods: for name in set(dir(mod)): obj = getattr(mod, name) if isinstance(obj, type) and issubclass(obj, unittest.TestCase): self._test_factories.append(getattr(mod, name)) def load_tests(self): """ Load test cases from the available test classes and apply optional include / exclude filters. """ loader = unittest.TestLoader() suite = unittest.TestSuite() for test_factory in self._test_factories: tests = loader.loadTestsFromTestCase(test_factory) if self._includes: tests = [ test for test in tests if any(re.search(pat, test.id()) for pat in self._includes) ] if self._excludes: tests = [ test for test in tests if not any(re.search(pat, test.id()) for pat in self._excludes) ] suite.addTests(tests) return suite class TestResult(unittest.TextTestResult): def __init__(self, stream, descriptions, verbosity): super().__init__(stream, descriptions, verbosity) self.leaks = [] def startTest(self, test): super().startTest(test) gc.collect() def addSuccess(self, test): super().addSuccess(test) gc.collect() if gc.garbage: if self.showAll: self.stream.writeln( " Warning: test created {} uncollectable " "object(s).".format(len(gc.garbage)) ) # move the uncollectable objects somewhere so we don't see # them again self.leaks.append((self.getDescription(test), gc.garbage[:])) del gc.garbage[:] class ThreadCntRunner(unittest.TextTestRunner): def run(self, test): cnt1 = threading.active_count() result = super().run(test) cnt2 = threading.active_count() if cnt1 != cnt2: self.stream.writeln("{} extra threads".format(cnt2 - cnt1)) return result class TestRunner(ThreadCntRunner): resultclass = TestResult def run(self, test): result = super().run(test) if result.leaks: self.stream.writeln("{} tests leaks:".format(len(result.leaks))) for name, leaks in result.leaks: self.stream.writeln(" " * 4 + name + ":") for leak in leaks: self.stream.writeln(" " * 8 + repr(leak)) return result def runtests(): args = ARGS.parse_args() if args.coverage and coverage is None: URL = "bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py" print( textwrap.dedent( """ coverage package is not installed. To install coverage3 for Python 3, you need: - Setuptools (https://pypi.python.org/pypi/setuptools) What worked for me: - download {0} * curl -O https://{0} - python3 ez_setup.py - python3 -m easy_install coverage """.format( URL ) ).strip() ) sys.exit(1) testsdir = os.path.abspath(args.testsdir) if not os.path.isdir(testsdir): print("Tests directory is not found: {}\n".format(testsdir)) ARGS.print_help() return excludes = includes = [] if args.exclude: excludes = args.pattern else: includes = args.pattern v = 0 if args.quiet else args.verbose + 1 failfast = args.failfast catchbreak = args.catchbreak findleaks = args.findleaks runner_factory = TestRunner if findleaks else ThreadCntRunner if args.coverage: cov = coverage.coverage( branch=True, source=["aiozmq"], ) cov.start() finder = TestsFinder(args.testsdir, includes, excludes, verbose=args.verbose) logger = logging.getLogger() if v == 0: logger.setLevel(logging.CRITICAL) elif v == 1: logger.setLevel(logging.ERROR) elif v == 2: logger.setLevel(logging.WARNING) elif v == 3: logger.setLevel(logging.INFO) elif v >= 4: logger.setLevel(logging.DEBUG) if catchbreak: installHandler() success = False try: if args.forever: while True: tests = finder.load_tests() result = runner_factory( verbosity=v, failfast=failfast, warnings="always" ).run(tests) if not result.wasSuccessful(): sys.exit(1) else: tests = finder.load_tests() result = runner_factory( verbosity=v, failfast=failfast, warnings="always" ).run(tests) success = result.wasSuccessful() sys.exit(not success) finally: if args.coverage: cov.stop() cov.save() if os.path.exists("htmlcov"): shutil.rmtree("htmlcov") cov.html_report(directory="htmlcov") print("\nCoverage report:") cov.report(show_missing=False) here = os.path.dirname(os.path.abspath(__file__)) print("\nFor html report:") print("open file://{}/htmlcov/index.html".format(here)) os._exit(not success) if __name__ == "__main__": runtests() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3658943 aiozmq-1.0.0/setup.cfg0000644000076600000240000000020700000000000013610 0ustar00jellestaff[easy_install] zip_ok = false [nosetests] nocapture = 1 cover-package = aiozmq cover-erase = 1 [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667228113.0 aiozmq-1.0.0/setup.py0000644000076600000240000000435600000000000013512 0ustar00jellestaffimport os import re import sys from setuptools import setup, find_packages install_requires = ["pyzmq>=13.1,!=17.1.2"] tests_require = install_requires + ["msgpack>=0.5.0"] extras_require = {"rpc": ["msgpack>=0.5.0"]} if sys.version_info < (3, 6): raise RuntimeError("aiozmq requires Python 3.6 or higher") def read(f): return open(os.path.join(os.path.dirname(__file__), f)).read().strip() def read_version(): regexp = re.compile(r'^__version__\W*=\W*"([\d.abrc]+)"') init_py = os.path.join(os.path.dirname(__file__), "aiozmq", "__init__.py") with open(init_py) as f: for line in f: match = regexp.match(line) if match is not None: return match.group(1) else: raise RuntimeError("Cannot find version in aiozmq/__init__.py") classifiers = [ "License :: OSI Approved :: BSD License", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Operating System :: POSIX", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Environment :: Web Environment", "Development Status :: 4 - Beta", "Framework :: AsyncIO", ] setup( name="aiozmq", version=read_version(), description=("ZeroMQ integration with asyncio."), long_description="\n\n".join((read("README.rst"), read("CHANGES.txt"))), classifiers=classifiers, platforms=["POSIX", "Windows", "MacOS X"], author="Nikolay Kim", author_email="fafhrd91@gmail.com", maintainer="Jelle Zijlstra", maintainer_email="jelle.zijlstra@gmail.com", url="http://aiozmq.readthedocs.org", download_url="https://pypi.python.org/pypi/aiozmq", license="BSD", packages=find_packages(), install_requires=install_requires, tests_require=tests_require, extras_require=extras_require, entry_points={ "console_scripts": [ "aiozmq-proxy = aiozmq.cli.proxy:main", ], }, include_package_data=True, ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1667439287.3647473 aiozmq-1.0.0/tests/0000755000076600000240000000000000000000000013132 5ustar00jellestaff././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/echo.py0000644000076600000240000000015600000000000014424 0ustar00jellestaffimport os if __name__ == "__main__": while True: buf = os.read(0, 1024) os.write(1, buf) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/echo2.py0000644000076600000240000000017700000000000014511 0ustar00jellestaffimport os if __name__ == "__main__": buf = os.read(0, 1024) os.write(1, b"OUT:" + buf) os.write(2, b"ERR:" + buf) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/echo3.py0000644000076600000240000000036000000000000014504 0ustar00jellestaffimport os if __name__ == "__main__": while True: buf = os.read(0, 1024) try: os.write(1, b"OUT:" + buf) except OSError as ex: os.write(2, b"ERR:" + ex.__class__.__name__.encode("ascii")) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/interface_test.py0000644000076600000240000000554300000000000016512 0ustar00jellestaffimport asyncio import unittest import aiozmq from aiozmq.core import SocketEvent class ZmqTransportTests(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) def test_interface(self): tr = aiozmq.ZmqTransport() self.assertRaises(NotImplementedError, tr.write, [b"data"]) self.assertRaises(NotImplementedError, tr.abort) self.assertRaises(NotImplementedError, tr.getsockopt, 1) self.assertRaises(NotImplementedError, tr.setsockopt, 1, 2) self.assertRaises(NotImplementedError, tr.set_write_buffer_limits) self.assertRaises(NotImplementedError, tr.get_write_buffer_limits) self.assertRaises(NotImplementedError, tr.get_write_buffer_size) self.assertRaises(NotImplementedError, tr.pause_reading) self.assertRaises(NotImplementedError, tr.resume_reading) self.assertRaises(NotImplementedError, tr.bind, "endpoint") self.assertRaises(NotImplementedError, tr.unbind, "endpoint") self.assertRaises(NotImplementedError, tr.bindings) self.assertRaises(NotImplementedError, tr.connect, "endpoint") self.assertRaises(NotImplementedError, tr.disconnect, "endpoint") self.assertRaises(NotImplementedError, tr.connections) self.assertRaises(NotImplementedError, tr.subscribe, b"filter") self.assertRaises(NotImplementedError, tr.unsubscribe, b"filter") self.assertRaises(NotImplementedError, tr.subscriptions) with self.assertRaises(NotImplementedError): self.loop.run_until_complete(tr.enable_monitor()) with self.assertRaises(NotImplementedError): self.loop.run_until_complete(tr.disable_monitor()) class ZmqProtocolTests(unittest.TestCase): def test_interface(self): pr = aiozmq.ZmqProtocol() self.assertIsNone(pr.msg_received((b"data",))) self.assertIsNone( pr.event_received( SocketEvent(event=1, value=1, endpoint="tcp://127.0.0.1:12345") ) ) class ZmqEventProtocolTests(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) def test_interface(self): pr = aiozmq.ZmqProtocol() epr = aiozmq.core._ZmqEventProtocol(self.loop, pr) # event messages are two frames with self.assertRaises(RuntimeError): epr.msg_received([b""]) # event messages expect 6 bytes in the first frame with self.assertRaises(RuntimeError): epr.msg_received([b"12345", b""]) with self.assertRaises(RuntimeError): epr.msg_received([b"1234567", b""]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/keycert3.pem0000644000076600000240000000772200000000000015376 0ustar00jellestaff-----BEGIN PRIVATE KEY----- MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMLgD0kAKDb5cFyP jbwNfR5CtewdXC+kMXAWD8DLxiTTvhMW7qVnlwOm36mZlszHKvsRf05lT4pegiFM 9z2j1OlaN+ci/X7NU22TNN6crYSiN77FjYJP464j876ndSxyD+rzys386T+1r1aZ aggEdkj1TsSsv1zWIYKlPIjlvhuxAgMBAAECgYA0aH+T2Vf3WOPv8KdkcJg6gCRe yJKXOWgWRcicx/CUzOEsTxmFIDPLxqAWA3k7v0B+3vjGw5Y9lycV/5XqXNoQI14j y09iNsumds13u5AKkGdTJnZhQ7UKdoVHfuP44ZdOv/rJ5/VD6F4zWywpe90pcbK+ AWDVtusgGQBSieEl1QJBAOyVrUG5l2yoUBtd2zr/kiGm/DYyXlIthQO/A3/LngDW 5/ydGxVsT7lAVOgCsoT+0L4efTh90PjzW8LPQrPBWVMCQQDS3h/FtYYd5lfz+FNL 9CEe1F1w9l8P749uNUD0g317zv1tatIqVCsQWHfVHNdVvfQ+vSFw38OORO00Xqs9 1GJrAkBkoXXEkxCZoy4PteheO/8IWWLGGr6L7di6MzFl1lIqwT6D8L9oaV2vynFT DnKop0pa09Unhjyw57KMNmSE2SUJAkEArloTEzpgRmCq4IK2/NpCeGdHS5uqRlbh 1VIa/xGps7EWQl5Mn8swQDel/YP3WGHTjfx7pgSegQfkyaRtGpZ9OQJAa9Vumj8m JAAtI0Bnga8hgQx7BhTQY4CadDxyiRGOGYhwUzYVCqkb2sbVRH9HnwUaJT7cWBY3 RnJdHOMXWem7/w== -----END PRIVATE KEY----- Certificate: Data: Version: 1 (0x0) Serial Number: 12723342612721443281 (0xb09264b1f2da21d1) Signature Algorithm: sha1WithRSAEncryption Issuer: C=XY, O=Python Software Foundation CA, CN=our-ca-server Validity Not Before: Jan 4 19:47:07 2013 GMT Not After : Nov 13 19:47:07 2022 GMT Subject: C=XY, L=Castle Anthrax, O=Python Software Foundation, CN=localhost Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:c2:e0:0f:49:00:28:36:f9:70:5c:8f:8d:bc:0d: 7d:1e:42:b5:ec:1d:5c:2f:a4:31:70:16:0f:c0:cb: c6:24:d3:be:13:16:ee:a5:67:97:03:a6:df:a9:99: 96:cc:c7:2a:fb:11:7f:4e:65:4f:8a:5e:82:21:4c: f7:3d:a3:d4:e9:5a:37:e7:22:fd:7e:cd:53:6d:93: 34:de:9c:ad:84:a2:37:be:c5:8d:82:4f:e3:ae:23: f3:be:a7:75:2c:72:0f:ea:f3:ca:cd:fc:e9:3f:b5: af:56:99:6a:08:04:76:48:f5:4e:c4:ac:bf:5c:d6: 21:82:a5:3c:88:e5:be:1b:b1 Exponent: 65537 (0x10001) Signature Algorithm: sha1WithRSAEncryption 2f:42:5f:a3:09:2c:fa:51:88:c7:37:7f:ea:0e:63:f0:a2:9a: e5:5a:e2:c8:20:f0:3f:60:bc:c8:0f:b6:c6:76:ce:db:83:93: f5:a3:33:67:01:8e:04:cd:00:9a:73:fd:f3:35:86:fa:d7:13: e2:46:c6:9d:c0:29:53:d4:a9:90:b8:77:4b:e6:83:76:e4:92: d6:9c:50:cf:43:d0:c6:01:77:61:9a:de:9b:70:f7:72:cd:59: 00:31:69:d9:b4:ca:06:9c:6d:c3:c7:80:8c:68:e6:b5:a2:f8: ef:1d:bb:16:9f:77:77:ef:87:62:22:9b:4d:69:a4:3a:1a:f1: 21:5e:8c:32:ac:92:fd:15:6b:18:c2:7f:15:0d:98:30:ca:75: 8f:1a:71:df:da:1d:b2:ef:9a:e8:2d:2e:02:fd:4a:3c:aa:96: 0b:06:5d:35:b3:3d:24:87:4b:e0:b0:58:60:2f:45:ac:2e:48: 8a:b0:99:10:65:27:ff:cc:b1:d8:fd:bd:26:6b:b9:0c:05:2a: f4:45:63:35:51:07:ed:83:85:fe:6f:69:cb:bb:40:a8:ae:b6: 3b:56:4a:2d:a4:ed:6d:11:2c:4d:ed:17:24:fd:47:bc:d3:41: a2:d3:06:fe:0c:90:d8:d8:94:26:c4:ff:cc:a1:d8:42:77:eb: fc:a9:94:71 -----BEGIN CERTIFICATE----- MIICpDCCAYwCCQCwkmSx8toh0TANBgkqhkiG9w0BAQUFADBNMQswCQYDVQQGEwJY WTEmMCQGA1UECgwdUHl0aG9uIFNvZnR3YXJlIEZvdW5kYXRpb24gQ0ExFjAUBgNV BAMMDW91ci1jYS1zZXJ2ZXIwHhcNMTMwMTA0MTk0NzA3WhcNMjIxMTEzMTk0NzA3 WjBfMQswCQYDVQQGEwJYWTEXMBUGA1UEBxMOQ2FzdGxlIEFudGhyYXgxIzAhBgNV BAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMRIwEAYDVQQDEwlsb2NhbGhv c3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMLgD0kAKDb5cFyPjbwNfR5C tewdXC+kMXAWD8DLxiTTvhMW7qVnlwOm36mZlszHKvsRf05lT4pegiFM9z2j1Ola N+ci/X7NU22TNN6crYSiN77FjYJP464j876ndSxyD+rzys386T+1r1aZaggEdkj1 TsSsv1zWIYKlPIjlvhuxAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAC9CX6MJLPpR iMc3f+oOY/CimuVa4sgg8D9gvMgPtsZ2ztuDk/WjM2cBjgTNAJpz/fM1hvrXE+JG xp3AKVPUqZC4d0vmg3bkktacUM9D0MYBd2Ga3ptw93LNWQAxadm0ygacbcPHgIxo 5rWi+O8duxafd3fvh2Iim01ppDoa8SFejDKskv0VaxjCfxUNmDDKdY8acd/aHbLv mugtLgL9SjyqlgsGXTWzPSSHS+CwWGAvRawuSIqwmRBlJ//Msdj9vSZruQwFKvRF YzVRB+2Dhf5vacu7QKiutjtWSi2k7W0RLE3tFyT9R7zTQaLTBv4MkNjYlCbE/8yh 2EJ36/yplHE= -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/monitor_test.py0000644000076600000240000001173500000000000016241 0ustar00jellestaffimport asyncio import unittest import unittest.mock import aiozmq import zmq from aiozmq._test_util import find_unused_port ZMQ_EVENTS = [getattr(zmq, attr) for attr in dir(zmq) if attr.startswith("EVENT_")] class Protocol(aiozmq.ZmqProtocol): def __init__(self, loop): self.wait_ready = asyncio.Future() self.wait_done = asyncio.Future() self.wait_closed = asyncio.Future() self.events_received = asyncio.Queue() def connection_made(self, transport): self.transport = transport self.wait_ready.set_result(True) def connection_lost(self, exc): self.wait_closed.set_result(exc) def msg_received(self, data): # This protocol is used by both the Router and Dealer sockets. # Messages received by the router come prefixed with an 'identity' # and hence contain two frames in this simple test protocol. if len(data) == 2: identity, msg = data if msg == b"Hello": self.transport.write([identity, b"World"]) else: msg = data[0] if msg == b"World": self.wait_done.set_result(True) def event_received(self, event): self.events_received.put_nowait(event) class ZmqSocketMonitorTests(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) @unittest.skipIf( zmq.zmq_version_info() < (4,) or zmq.pyzmq_version_info() < (14, 4), "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4", ) def test_socket_monitor(self): port = find_unused_port() async def go(): # Create server and bind st, sp = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.ROUTER, bind="tcp://127.0.0.1:{}".format(port), ) await sp.wait_ready addr = list(st.bindings())[0] # Create client but don't connect it yet. ct, cp = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER ) await cp.wait_ready # Establish an event monitor on the client socket await ct.enable_monitor() # Now that the socket event monitor is established, connect # the client to the server which will generate some events. await ct.connect(addr) await asyncio.sleep(0.1) await ct.disconnect(addr) await asyncio.sleep(0.1) await ct.connect(addr) # Send a message to the server. The server should respond and # this is used to compete the wait_done future. ct.write([b"Hello"]) await cp.wait_done await ct.disable_monitor() ct.close() await cp.wait_closed st.close() await sp.wait_closed # Confirm that the events received by the monitor were valid. self.assertGreater(cp.events_received.qsize(), 0) while not cp.events_received.empty(): event = await cp.events_received.get() self.assertIn(event.event, ZMQ_EVENTS) self.loop.run_until_complete(go()) def test_unsupported_dependencies(self): async def go(): ct, cp = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER ) await cp.wait_ready with unittest.mock.patch.object(zmq, "zmq_version_info", return_value=(3,)): with self.assertRaises(NotImplementedError): await ct.enable_monitor() with unittest.mock.patch.object( zmq, "pyzmq_version_info", return_value=(14, 3) ): with self.assertRaises(NotImplementedError): await ct.enable_monitor() ct.close() await cp.wait_closed self.loop.run_until_complete(go()) @unittest.skipIf( zmq.zmq_version_info() < (4,) or zmq.pyzmq_version_info() < (14, 4), "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4", ) def test_double_enable_disable(self): async def go(): ct, cp = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER ) await cp.wait_ready await ct.enable_monitor() # Enabling the monitor after it is already enabled should not # cause an error await ct.enable_monitor() await ct.disable_monitor() # Disabling the monitor after it is already disabled should not # cause an error await ct.disable_monitor() ct.close() await cp.wait_closed self.loop.run_until_complete(go()) if __name__ == "__main__": unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/policy_test.py0000644000076600000240000001172000000000000016043 0ustar00jellestaffimport asyncio import sys import threading import unittest from unittest import mock import aiozmq class PolicyTests(unittest.TestCase): def setUp(self): self.policy = aiozmq.ZmqEventLoopPolicy() def tearDown(self): asyncio.set_event_loop_policy(None) def test_get_event_loop(self): self.assertIsNone(self.policy._local._loop) loop = self.policy.get_event_loop() self.assertIsInstance(loop, asyncio.AbstractEventLoop) self.assertIs(self.policy._local._loop, loop) self.assertIs(loop, self.policy.get_event_loop()) loop.close() # zmq.Context.instance().term() def test_get_event_loop_calls_set_event_loop(self): with mock.patch.object( self.policy, "set_event_loop", wraps=self.policy.set_event_loop ) as m_set_event_loop: loop = self.policy.get_event_loop() # policy._local._loop must be set through .set_event_loop() # (the unix DefaultEventLoopPolicy needs this call to attach # the child watcher correctly) m_set_event_loop.assert_called_with(loop) loop.close() def test_get_event_loop_after_set_none(self): self.policy.set_event_loop(None) self.assertRaises(AssertionError, self.policy.get_event_loop) @mock.patch("aiozmq.core.threading.current_thread") def test_get_event_loop_thread(self, m_current_thread): def f(): self.assertRaises(AssertionError, self.policy.get_event_loop) th = threading.Thread(target=f) th.start() th.join() def test_new_event_loop(self): loop = self.policy.new_event_loop() self.assertIsInstance(loop, asyncio.AbstractEventLoop) loop.close() def test_set_event_loop(self): old_loop = self.policy.get_event_loop() self.assertRaises(AssertionError, self.policy.set_event_loop, object()) loop = self.policy.new_event_loop() self.policy.set_event_loop(loop) self.assertIs(loop, self.policy.get_event_loop()) self.assertIsNot(old_loop, self.policy.get_event_loop()) loop.close() old_loop.close() @unittest.skipIf(sys.platform == "win32", "Windows doesn't support child watchers") def test_get_child_watcher(self): self.assertIsNone(self.policy._watcher) watcher = self.policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIs(self.policy._watcher, watcher) self.assertIs(watcher, self.policy.get_child_watcher()) self.assertIsNone(watcher._loop) @unittest.skipIf(sys.platform == "win32", "Windows doesn't support child watchers") def test_get_child_watcher_after_set(self): watcher = asyncio.FastChildWatcher() self.policy.set_child_watcher(watcher) self.assertIs(self.policy._watcher, watcher) self.assertIs(watcher, self.policy.get_child_watcher()) @unittest.skipIf(sys.platform == "win32", "Windows doesn't support child watchers") def test_get_child_watcher_with_mainloop_existing(self): loop = self.policy.get_event_loop() self.assertIsNone(self.policy._watcher) watcher = self.policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIs(watcher._loop, loop) loop.close() @unittest.skipIf(sys.platform == "win32", "Windows doesn't support child watchers") def test_get_child_watcher_thread(self): def f(): self.policy.set_event_loop(self.policy.new_event_loop()) self.assertIsInstance( self.policy.get_event_loop(), asyncio.AbstractEventLoop ) watcher = self.policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIsNone(watcher._loop) self.policy.get_event_loop().close() th = threading.Thread(target=f) th.start() th.join() @unittest.skipIf(sys.platform == "win32", "Windows doesn't support child watchers") def test_child_watcher_replace_mainloop_existing(self): loop = self.policy.get_event_loop() watcher = self.policy.get_child_watcher() self.assertIs(watcher._loop, loop) new_loop = self.policy.new_event_loop() self.policy.set_event_loop(new_loop) self.assertIs(watcher._loop, new_loop) self.policy.set_event_loop(None) self.assertIs(watcher._loop, None) loop.close() new_loop.close() @unittest.skipIf(sys.platform == "win32", "Windows doesn't support child watchers") def test_get_child_watcher_to_override_existing_one(self): watcher = asyncio.FastChildWatcher() # initializes default watcher as side-effect self.policy.get_child_watcher() self.policy.set_child_watcher(watcher) self.assertIs(self.policy._watcher, watcher) self.assertIs(watcher, self.policy.get_child_watcher()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/pycacert.pem0000644000076600000240000001032300000000000015446 0ustar00jellestaffCertificate: Data: Version: 3 (0x2) Serial Number: 12723342612721443280 (0xb09264b1f2da21d0) Signature Algorithm: sha1WithRSAEncryption Issuer: C=XY, O=Python Software Foundation CA, CN=our-ca-server Validity Not Before: Jan 4 19:47:07 2013 GMT Not After : Jan 2 19:47:07 2023 GMT Subject: C=XY, O=Python Software Foundation CA, CN=our-ca-server Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (2048 bit) Modulus: 00:e7:de:e9:e3:0c:9f:00:b6:a1:fd:2b:5b:96:d2: 6f:cc:e0:be:86:b9:20:5e:ec:03:7a:55:ab:ea:a4: e9:f9:49:85:d2:66:d5:ed:c7:7a:ea:56:8e:2d:8f: e7:42:e2:62:28:a9:9f:d6:1b:8e:eb:b5:b4:9c:9f: 14:ab:df:e6:94:8b:76:1d:3e:6d:24:61:ed:0c:bf: 00:8a:61:0c:df:5c:c8:36:73:16:00:cd:47:ba:6d: a4:a4:74:88:83:23:0a:19:fc:09:a7:3c:4a:4b:d3: e7:1d:2d:e4:ea:4c:54:21:f3:26:db:89:37:18:d4: 02:bb:40:32:5f:a4:ff:2d:1c:f7:d4:bb:ec:8e:cf: 5c:82:ac:e6:7c:08:6c:48:85:61:07:7f:25:e0:5c: e0:bc:34:5f:e0:b9:04:47:75:c8:47:0b:8d:bc:d6: c8:68:5f:33:83:62:d2:20:44:35:b1:ad:81:1a:8a: cd:bc:35:b0:5c:8b:47:d6:18:e9:9c:18:97:cc:01: 3c:29:cc:e8:1e:e4:e4:c1:b8:de:e7:c2:11:18:87: 5a:93:34:d8:a6:25:f7:14:71:eb:e4:21:a2:d2:0f: 2e:2e:d4:62:00:35:d3:d6:ef:5c:60:4b:4c:a9:14: e2:dd:15:58:46:37:33:26:b7:e7:2e:5d:ed:42:e4: c5:4d Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B X509v3 Authority Key Identifier: keyid:BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B X509v3 Basic Constraints: CA:TRUE Signature Algorithm: sha1WithRSAEncryption 7d:0a:f5:cb:8d:d3:5d:bd:99:8e:f8:2b:0f:ba:eb:c2:d9:a6: 27:4f:2e:7b:2f:0e:64:d8:1c:35:50:4e:ee:fc:90:b9:8d:6d: a8:c5:c6:06:b0:af:f3:2d:bf:3b:b8:42:07:dd:18:7d:6d:95: 54:57:85:18:60:47:2f:eb:78:1b:f9:e8:17:fd:5a:0d:87:17: 28:ac:4c:6a:e6:bc:29:f4:f4:55:70:29:42:de:85:ea:ab:6c: 23:06:64:30:75:02:8e:53:bc:5e:01:33:37:cc:1e:cd:b8:a4: fd:ca:e4:5f:65:3b:83:1c:86:f1:55:02:a0:3a:8f:db:91:b7: 40:14:b4:e7:8d:d2:ee:73:ba:e3:e5:34:2d:bc:94:6f:4e:24: 06:f7:5f:8b:0e:a7:8e:6b:de:5e:75:f4:32:9a:50:b1:44:33: 9a:d0:05:e2:78:82:ff:db:da:8a:63:eb:a9:dd:d1:bf:a0:61: ad:e3:9e:8a:24:5d:62:0e:e7:4c:91:7f:ef:df:34:36:3b:2f: 5d:f5:84:b2:2f:c4:6d:93:96:1a:6f:30:28:f1:da:12:9a:64: b4:40:33:1d:bd:de:2b:53:a8:ea:be:d6:bc:4e:96:f5:44:fb: 32:18:ae:d5:1f:f6:69:af:b6:4e:7b:1d:58:ec:3b:a9:53:a3: 5e:58:c8:9e -----BEGIN CERTIFICATE----- MIIDbTCCAlWgAwIBAgIJALCSZLHy2iHQMA0GCSqGSIb3DQEBBQUAME0xCzAJBgNV BAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUgRm91bmRhdGlvbiBDQTEW MBQGA1UEAwwNb3VyLWNhLXNlcnZlcjAeFw0xMzAxMDQxOTQ3MDdaFw0yMzAxMDIx OTQ3MDdaME0xCzAJBgNVBAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUg Rm91bmRhdGlvbiBDQTEWMBQGA1UEAwwNb3VyLWNhLXNlcnZlcjCCASIwDQYJKoZI hvcNAQEBBQADggEPADCCAQoCggEBAOfe6eMMnwC2of0rW5bSb8zgvoa5IF7sA3pV q+qk6flJhdJm1e3HeupWji2P50LiYiipn9Ybjuu1tJyfFKvf5pSLdh0+bSRh7Qy/ AIphDN9cyDZzFgDNR7ptpKR0iIMjChn8Cac8SkvT5x0t5OpMVCHzJtuJNxjUArtA Ml+k/y0c99S77I7PXIKs5nwIbEiFYQd/JeBc4Lw0X+C5BEd1yEcLjbzWyGhfM4Ni 0iBENbGtgRqKzbw1sFyLR9YY6ZwYl8wBPCnM6B7k5MG43ufCERiHWpM02KYl9xRx 6+QhotIPLi7UYgA109bvXGBLTKkU4t0VWEY3Mya35y5d7ULkxU0CAwEAAaNQME4w HQYDVR0OBBYEFLzdYtl22hvSVGvP4GabHh57VgwLMB8GA1UdIwQYMBaAFLzdYtl2 2hvSVGvP4GabHh57VgwLMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB AH0K9cuN0129mY74Kw+668LZpidPLnsvDmTYHDVQTu78kLmNbajFxgawr/Mtvzu4 QgfdGH1tlVRXhRhgRy/reBv56Bf9Wg2HFyisTGrmvCn09FVwKULeheqrbCMGZDB1 Ao5TvF4BMzfMHs24pP3K5F9lO4MchvFVAqA6j9uRt0AUtOeN0u5zuuPlNC28lG9O JAb3X4sOp45r3l519DKaULFEM5rQBeJ4gv/b2opj66nd0b+gYa3jnookXWIO50yR f+/fNDY7L131hLIvxG2TlhpvMCjx2hKaZLRAMx293itTqOq+1rxOlvVE+zIYrtUf 9mmvtk57HVjsO6lTo15YyJ4= -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/rpc_namespace_test.py0000644000076600000240000000574700000000000017360 0ustar00jellestaffimport unittest import asyncio import aiozmq import aiozmq.rpc from aiozmq._test_util import find_unused_port class MyHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method async def func(self, arg): return arg + 1 class RootHandler(aiozmq.rpc.AttrHandler): ns = MyHandler() class RpcNamespaceTestsMixin: def close(self, service): service.close() self.loop.run_until_complete(service.wait_closed()) def make_rpc_pair(self): port = find_unused_port() async def create(): server = await aiozmq.rpc.serve_rpc( RootHandler(), bind="tcp://127.0.0.1:{}".format(port) ) client = await aiozmq.rpc.connect_rpc( connect="tcp://127.0.0.1:{}".format(port) ) return client, server self.client, self.server = self.loop.run_until_complete(create()) return self.client, self.server def test_ns_func(self): client, server = self.make_rpc_pair() async def communicate(): ret = await client.call.ns.func(1) self.assertEqual(2, ret) self.loop.run_until_complete(communicate()) def test_not_found(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaisesRegex(aiozmq.rpc.NotFoundError, "ns1.func"): await client.call.ns1.func(1) self.loop.run_until_complete(communicate()) def test_bad_handler(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaisesRegex(aiozmq.rpc.NotFoundError, "ns.func.foo"): await client.call.ns.func.foo(1) self.loop.run_until_complete(communicate()) def test_missing_namespace_method(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaisesRegex(aiozmq.rpc.NotFoundError, "ns"): await client.call.ns(1) self.loop.run_until_complete(communicate()) class LoopRpcNamespaceTests(unittest.TestCase, RpcNamespaceTestsMixin): def setUp(self): self.loop = aiozmq.ZmqEventLoop() asyncio.set_event_loop(None) self.client = self.server = None def tearDown(self): if self.client is not None: self.close(self.client) if self.server is not None: self.close(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() class LooplessRpcNamespaceTests(unittest.TestCase, RpcNamespaceTestsMixin): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) self.client = self.server = None def tearDown(self): if self.client is not None: self.close(self.client) if self.server is not None: self.close(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/rpc_packer_test.py0000644000076600000240000001413400000000000016657 0ustar00jellestaffimport unittest import datetime from aiozmq.rpc.packer import _Packer from unittest import mock from msgpack import ExtType, packb from pickle import dumps, loads, HIGHEST_PROTOCOL from functools import partial class Point: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): if isinstance(other, Point): return (self.x, self.y) == (other.x, other.y) return NotImplemented class PackerTests(unittest.TestCase): def test_packb_simple(self): packer = _Packer() self.assertEqual(packb("test"), packer.packb("test")) self.assertEqual(packb([123]), packer.packb([123])) self.assertEqual(packb((123,)), packer.packb([123])) def test_unpackb_simple(self): packer = _Packer() self.assertEqual("test", packer.unpackb(packb("test"))) self.assertEqual((123,), packer.unpackb(packb([123]))) self.assertEqual((123,), packer.unpackb(packb((123,)))) @mock.patch("aiozmq.rpc.packer.ExtType") def test_ext_type__date(self, ExtTypeMock): packer = _Packer() CODE = 127 dt = datetime.date(2014, 3, 25) data = dumps(dt, protocol=HIGHEST_PROTOCOL) self.assertEqual(dt, packer.ext_type_unpack_hook(CODE, data)) packer.ext_type_pack_hook(dt) ExtTypeMock.assert_called_once_with(CODE, data) @mock.patch("aiozmq.rpc.packer.ExtType") def test_ext_type__datetime(self, ExtTypeMock): packer = _Packer() CODE = 126 dt = datetime.datetime(2014, 3, 25, 15, 18) data = dumps(dt, protocol=HIGHEST_PROTOCOL) self.assertEqual(dt, packer.ext_type_unpack_hook(CODE, data)) packer.ext_type_pack_hook(dt) ExtTypeMock.assert_called_once_with(CODE, data) @mock.patch("aiozmq.rpc.packer.ExtType") def test_ext_type__datetime_tzinfo(self, ExtTypeMock): packer = _Packer() CODE = 126 dt = datetime.datetime(2014, 3, 25, 16, 12, tzinfo=datetime.timezone.utc) data = dumps(dt, protocol=HIGHEST_PROTOCOL) self.assertEqual(dt, packer.ext_type_unpack_hook(CODE, data)) packer.ext_type_pack_hook(dt) ExtTypeMock.assert_called_once_with(CODE, data) @mock.patch("aiozmq.rpc.packer.ExtType") def test_ext_type__time(self, ExtTypeMock): packer = _Packer() CODE = 125 tm = datetime.time(15, 51, 0) data = dumps(tm, protocol=HIGHEST_PROTOCOL) self.assertEqual(tm, packer.ext_type_unpack_hook(CODE, data)) packer.ext_type_pack_hook(tm) ExtTypeMock.assert_called_once_with(CODE, data) @mock.patch("aiozmq.rpc.packer.ExtType") def test_ext_type__time_tzinfo(self, ExtTypeMock): packer = _Packer() CODE = 125 tm = datetime.time(15, 51, 0, tzinfo=datetime.timezone.utc) data = dumps(tm, protocol=HIGHEST_PROTOCOL) self.assertEqual(tm, packer.ext_type_unpack_hook(CODE, data)) packer.ext_type_pack_hook(tm) ExtTypeMock.assert_called_once_with(CODE, data) @mock.patch("aiozmq.rpc.packer.ExtType") def test_ext_type__timedelta(self, ExtTypeMock): packer = _Packer() CODE = 124 td = datetime.timedelta(days=1, hours=2, minutes=3, seconds=4) data = dumps(td, protocol=HIGHEST_PROTOCOL) self.assertEqual(td, packer.ext_type_unpack_hook(CODE, data)) packer.ext_type_pack_hook(td) ExtTypeMock.assert_called_once_with(CODE, data) @mock.patch("aiozmq.rpc.packer.ExtType") def test_ext_type__tzinfo(self, ExtTypeMock): packer = _Packer() CODE = 123 tz = datetime.timezone.utc data = dumps(tz, protocol=HIGHEST_PROTOCOL) self.assertEqual(tz, packer.ext_type_unpack_hook(CODE, data)) packer.ext_type_pack_hook(tz) ExtTypeMock.assert_called_once_with(CODE, data) def test_ext_type_errors(self): packer = _Packer() with self.assertRaisesRegex(TypeError, "Unknown type: "): packer.ext_type_pack_hook(packer) self.assertIn(_Packer, packer._pack_cache) self.assertIsNone(packer._pack_cache[_Packer]) # lets try again just for good coverage with self.assertRaisesRegex(TypeError, "Unknown type: "): packer.ext_type_pack_hook(packer) self.assertEqual(ExtType(1, b""), packer.ext_type_unpack_hook(1, b"")) # TODO: should be more specific errors with self.assertRaises(Exception): packer.ext_type_unpack_hook(127, b"bad data") def test_simple_translators(self): translation_table = { 0: (Point, partial(dumps, protocol=HIGHEST_PROTOCOL), loads), } packer = _Packer(translation_table=translation_table) pt = Point(1, 2) data = dumps(pt, protocol=HIGHEST_PROTOCOL) self.assertEqual(pt, packer.unpackb(packer.packb(pt))) self.assertEqual(ExtType(0, data), packer.ext_type_pack_hook(pt)) self.assertEqual(pt, packer.ext_type_unpack_hook(0, data)) def test_override_translators(self): translation_table = { 125: (Point, partial(dumps, protocol=HIGHEST_PROTOCOL), loads), } packer = _Packer(translation_table=translation_table) pt = Point(3, 4) data = dumps(pt, protocol=HIGHEST_PROTOCOL) dt = datetime.time(15, 2) self.assertEqual(ExtType(125, data), packer.ext_type_pack_hook(pt)) with self.assertRaisesRegex(TypeError, "Unknown type: "): packer.ext_type_pack_hook(dt) def test_preserve_resolution_order(self): class A: pass class B(A): pass dump_a = mock.Mock(return_value=b"a") load_a = mock.Mock(return_value=A()) dump_b = mock.Mock(return_value=b"b") load_b = mock.Mock(return_value=B()) translation_table = { 1: (A, dump_a, load_a), 2: (B, dump_b, load_b), } packer = _Packer(translation_table=translation_table) self.assertEqual(packer.packb(ExtType(1, b"a")), packer.packb(A())) self.assertEqual(packer.packb(ExtType(2, b"b")), packer.packb(B())) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/rpc_pipeline_test.py0000644000076600000240000001725200000000000017223 0ustar00jellestaffimport unittest import asyncio import aiozmq import aiozmq.rpc import logging from unittest import mock from aiozmq._test_util import log_hook, RpcMixin class MyHandler(aiozmq.rpc.AttrHandler): def __init__(self, queue, loop): self.queue = queue self.loop = loop @aiozmq.rpc.method async def coro(self, arg): await self.queue.put(arg) @aiozmq.rpc.method def func(self, arg): self.queue.put_nowait(arg) @aiozmq.rpc.method async def add(self, arg: int = 1): await self.queue.put(arg + 1) @aiozmq.rpc.method def func_error(self): raise ValueError @aiozmq.rpc.method def suspicious(self, arg: int): self.queue.put_nowait(arg) return 3 @aiozmq.rpc.method async def fut(self): f = asyncio.Future() await self.queue.put(f) await f class PipelineTestsMixin(RpcMixin): @classmethod def setUpClass(self): logger = logging.getLogger() self.log_level = logger.getEffectiveLevel() logger.setLevel(logging.DEBUG) @classmethod def tearDownClass(self): logger = logging.getLogger() logger.setLevel(self.log_level) def exception_handler(self, loop, context): self.err_queue.put_nowait(context) def make_pipeline_pair( self, log_exceptions=False, exclude_log_exceptions=(), use_loop=True ): async def create(): server = await aiozmq.rpc.serve_pipeline( MyHandler(self.queue, self.loop), bind="tcp://127.0.0.1:*", loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions, ) connect = next(iter(server.transport.bindings())) client = await aiozmq.rpc.connect_pipeline( connect=connect if use_loop else None ) return client, server self.client, self.server = self.loop.run_until_complete(create()) return self.client, self.server def test_coro(self): client, server = self.make_pipeline_pair() async def communicate(): await client.notify.coro(1) ret = await self.queue.get() self.assertEqual(1, ret) await client.notify.coro(2) ret = await self.queue.get() self.assertEqual(2, ret) self.loop.run_until_complete(communicate()) def test_add(self): client, server = self.make_pipeline_pair() async def communicate(): await client.notify.add() ret = await self.queue.get() self.assertEqual(ret, 2) await client.notify.add(2) ret = await self.queue.get() self.assertEqual(ret, 3) self.loop.run_until_complete(communicate()) def test_bad_handler(self): client, server = self.make_pipeline_pair() async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): await client.notify.bad_handler() ret = await self.err_queue.get() self.assertEqual(logging.ERROR, ret.levelno) self.assertEqual("Call to %r caused error: %r", ret.msg) self.assertEqual(("bad_handler", mock.ANY), ret.args) self.assertIsNotNone(ret.exc_info) self.loop.run_until_complete(communicate()) def test_func(self): client, server = self.make_pipeline_pair() async def communicate(): await client.notify.func(123) ret = await self.queue.get() self.assertEqual(ret, 123) self.loop.run_until_complete(communicate()) def test_func_error(self): client, server = self.make_pipeline_pair(log_exceptions=True) async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): await client.notify.func_error() ret = await self.err_queue.get() self.assertEqual(logging.ERROR, ret.levelno) self.assertEqual( "An exception %r from method %r " "call occurred.\n" "args = %s\nkwargs = %s\n", ret.msg, ) self.assertEqual((mock.ANY, "func_error", "()", "{}"), ret.args) self.assertIsNotNone(ret.exc_info) self.loop.run_until_complete(communicate()) def test_default_event_loop(self): asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy()) self.addCleanup(asyncio.set_event_loop_policy, None) self.addCleanup(self.loop.close) self.loop = asyncio.get_event_loop() self.client, self.server = self.make_pipeline_pair(use_loop=False) self.assertIs(self.client._loop, self.loop) self.assertIs(self.server._loop, self.loop) def test_warning_if_remote_return_not_None(self): client, server = self.make_pipeline_pair() async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): await client.notify.suspicious(1) ret = await self.queue.get() self.assertEqual(1, ret) ret = await self.err_queue.get() self.assertEqual(logging.WARNING, ret.levelno) self.assertEqual("Pipeline handler %r returned not None", ret.msg) self.assertEqual(("suspicious",), ret.args) self.assertIsNone(ret.exc_info) async def dummy(): pass self.loop.run_until_complete(communicate()) self.loop.run_until_complete(dummy()) def test_call_closed_pipeline(self): client, server = self.make_pipeline_pair() async def communicate(): client.close() await client.wait_closed() with self.assertRaises(aiozmq.rpc.ServiceClosedError): await client.notify.func() self.loop.run_until_complete(communicate()) def test_server_close(self): client, server = self.make_pipeline_pair() async def communicate(): client.notify.fut() fut = await self.queue.get() self.assertEqual(1, len(server._proto.pending_waiters)) task = next(iter(server._proto.pending_waiters)) self.assertIsInstance(task, asyncio.Task) server.close() await server.wait_closed() await asyncio.sleep(0.1) self.assertEqual(0, len(server._proto.pending_waiters)) fut.cancel() self.loop.run_until_complete(communicate()) class LoopPipelineTests(unittest.TestCase, PipelineTestsMixin): def setUp(self): self.loop = aiozmq.ZmqEventLoop() asyncio.set_event_loop(self.loop) self.client = self.server = None self.queue = asyncio.Queue() self.err_queue = asyncio.Queue() self.loop.set_exception_handler(self.exception_handler) def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() class LooplessPipelineTests(unittest.TestCase, PipelineTestsMixin): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.client = self.server = None self.queue = asyncio.Queue() self.err_queue = asyncio.Queue() self.loop.set_exception_handler(self.exception_handler) def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1667437957.0 aiozmq-1.0.0/tests/rpc_pubsub_test.py0000644000076600000240000003013300000000000016707 0ustar00jellestaffimport unittest import asyncio import aiozmq import aiozmq.rpc import logging from unittest import mock from aiozmq._test_util import find_unused_port, log_hook, RpcMixin from aiozmq.rpc.base import ParametersError class MyHandler(aiozmq.rpc.AttrHandler): def __init__(self, queue, loop): super().__init__() self.queue = queue self.loop = loop @aiozmq.rpc.method async def start(self): await self.queue.put("started") @aiozmq.rpc.method async def coro(self, arg): await self.queue.put(arg) @aiozmq.rpc.method def func(self, arg: int): if not isinstance(arg, int): raise ParametersError("arg must be int") self.queue.put_nowait(arg + 1) @aiozmq.rpc.method def func_raise_error(self): raise RuntimeError @aiozmq.rpc.method def suspicious(self, arg: int): self.queue.put_nowait(arg + 1) return 3 @aiozmq.rpc.method async def fut(self): f = asyncio.Future() await self.queue.put(f) await f class PubSubTestsMixin(RpcMixin): @classmethod def setUpClass(self): logger = logging.getLogger() self.log_level = logger.getEffectiveLevel() logger.setLevel(logging.DEBUG) @classmethod def tearDownClass(self): logger = logging.getLogger() logger.setLevel(self.log_level) def make_pubsub_pair( self, subscribe=None, log_exceptions=False, exclude_log_exceptions=(), use_loop=True, ): loop = self.loop if use_loop else asyncio.get_event_loop() async def create(): server = await aiozmq.rpc.serve_pubsub( MyHandler(self.queue, self.loop), subscribe=subscribe, bind="tcp://127.0.0.1:*", loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions, ) connect = next(iter(server.transport.bindings())) client = await aiozmq.rpc.connect_pubsub( connect=connect, loop=self.loop if use_loop else None ) if subscribe is not None: if not isinstance(subscribe, (str, bytes)): pub = subscribe[0] else: pub = subscribe for i in range(3): try: await client.publish(pub).start() ret = await asyncio.wait_for(self.queue.get(), 0.1) self.assertEqual(ret, "started") break except asyncio.TimeoutError: self.assertLess(i, 3) else: self.fail("Cannot connect") return client, server self.client, self.server = loop.run_until_complete(create()) return self.client, self.server def test_coro(self): client, server = self.make_pubsub_pair("my-topic") async def communicate(): await client.publish("my-topic").coro(1) ret = await self.queue.get() self.assertEqual(ret, 1) await client.publish("other-topic").coro(1) self.assertTrue(self.queue.empty()) self.loop.run_until_complete(communicate()) def test_coro__multiple_topics(self): client, server = self.make_pubsub_pair(("topic1", "topic2")) async def communicate(): await client.publish("topic1").coro(1) ret = await self.queue.get() self.assertEqual(ret, 1) await client.publish("topic2").coro(1) ret = await self.queue.get() self.assertEqual(ret, 1) self.loop.run_until_complete(communicate()) def test_coro__subscribe_to_all(self): client, server = self.make_pubsub_pair("") async def communicate(): await client.publish("sometopic").coro(123) ret = await self.queue.get() self.assertEqual(ret, 123) await client.publish(None).coro("abc") ret = await self.queue.get() self.assertEqual(ret, "abc") await client.publish("").coro(1) ret = await self.queue.get() self.assertEqual(ret, 1) self.loop.run_until_complete(communicate()) def test_call_error(self): client, server = self.make_pubsub_pair() with self.assertRaisesRegex(ValueError, "PubSub method name is empty"): client.publish("topic")() def test_not_found(self): client, server = self.make_pubsub_pair("my-topic") async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): await client.publish("my-topic").bad.method(1, 2) ret = await self.err_queue.get() self.assertEqual(logging.ERROR, ret.levelno) self.assertEqual("Call to %r caused error: %r", ret.msg) self.assertEqual(("bad.method", mock.ANY), ret.args) self.assertIsNotNone(ret.exc_info) self.loop.run_until_complete(communicate()) def test_func(self): client, server = self.make_pubsub_pair("my-topic") async def communicate(): await client.publish("my-topic").func(1) ret = await self.queue.get() self.assertEqual(ret, 2) self.loop.run_until_complete(communicate()) def test_func__arg_error(self): client, server = self.make_pubsub_pair("my-topic") async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): await client.publish("my-topic").func("abc") ret = await self.err_queue.get() self.assertEqual(logging.ERROR, ret.levelno) self.assertEqual("Call to %r caused error: %r", ret.msg) self.assertEqual(("func", mock.ANY), ret.args) self.assertIsNotNone(ret.exc_info) self.assertTrue(self.queue.empty()) self.loop.run_until_complete(communicate()) def test_func_raises_error(self): client, server = self.make_pubsub_pair("my-topic", log_exceptions=True) async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): await client.publish("my-topic").func_raise_error() ret = await self.err_queue.get() self.assertEqual(logging.ERROR, ret.levelno) self.assertEqual( "An exception %r from method %r call occurred.\n" "args = %s\nkwargs = %s\n", ret.msg, ) self.assertEqual((mock.ANY, "func_raise_error", "()", "{}"), ret.args) self.assertIsNotNone(ret.exc_info) self.loop.run_until_complete(communicate()) def test_subscribe_to_bytes(self): client, server = self.make_pubsub_pair(b"my-topic") async def communicate(): await client.publish(b"my-topic").func(1) ret = await self.queue.get() self.assertEqual(ret, 2) self.loop.run_until_complete(communicate()) def test_subscribe_to_invalid(self): async def go(): server = await aiozmq.rpc.serve_pubsub( MyHandler(self.queue, self.loop), bind="tcp://127.0.0.1:*", ) self.assertRaises(TypeError, server.subscribe, 123) self.loop.run_until_complete(go()) def test_unsubscribe(self): async def go(): server = await aiozmq.rpc.serve_pubsub( MyHandler(self.queue, self.loop), bind="tcp://127.0.0.1:*", ) self.assertRaises(TypeError, server.subscribe, 123) server.subscribe("topic") server.unsubscribe("topic") self.assertNotIn("topic", server.transport.subscriptions()) server.subscribe(b"btopic") server.unsubscribe(b"btopic") self.assertNotIn(b"btopic", server.transport.subscriptions()) self.assertRaises(TypeError, server.unsubscribe, 123) self.loop.run_until_complete(go()) def test_default_event_loop(self): asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy()) self.addCleanup(asyncio.set_event_loop_policy, None) self.addCleanup(self.loop.close) self.loop = loop = asyncio.get_event_loop() # should use the default loop, not closed one self.queue = asyncio.Queue() self.client, self.server = self.make_pubsub_pair( use_loop=False, subscribe="topic" ) async def communicate(): await self.client.publish("topic").func(1) ret = await self.queue.get() self.assertEqual(2, ret) loop.run_until_complete(communicate()) def test_serve_bad_subscription(self): port = find_unused_port() async def create(): with self.assertRaises(TypeError): await aiozmq.rpc.serve_pubsub( {}, bind="tcp://127.0.0.1:{}".format(port), subscribe=123, ) self.loop.run_until_complete(create()) def test_publish_to_invalid_topic(self): client, server = self.make_pubsub_pair("") async def communicate(): with self.assertRaises(TypeError): await client.publish(123).coro(123) self.loop.run_until_complete(communicate()) def test_warning_if_remote_return_not_None(self): client, server = self.make_pubsub_pair("topic") async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): await client.publish("topic").suspicious(1) ret = await self.queue.get() self.assertEqual(2, ret) ret = await self.err_queue.get() self.assertEqual(logging.WARNING, ret.levelno) self.assertEqual("PubSub handler %r returned not None", ret.msg) self.assertEqual(("suspicious",), ret.args) self.assertIsNone(ret.exc_info) self.loop.run_until_complete(communicate()) def test_call_closed_pubsub(self): client, server = self.make_pubsub_pair() async def communicate(): client.close() await client.wait_closed() with self.assertRaises(aiozmq.rpc.ServiceClosedError): await client.publish("ab").func() self.loop.run_until_complete(communicate()) def test_server_close(self): client, server = self.make_pubsub_pair("my-topic") async def communicate(): client.publish("my-topic").fut() fut = await self.queue.get() self.assertEqual(1, len(server._proto.pending_waiters)) task = next(iter(server._proto.pending_waiters)) self.assertIsInstance(task, asyncio.Task) server.close() await server.wait_closed() await asyncio.sleep(0.1) self.assertEqual(0, len(server._proto.pending_waiters)) fut.cancel() self.loop.run_until_complete(communicate()) class LoopPubSubTests(unittest.TestCase, PubSubTestsMixin): def setUp(self): self.loop = aiozmq.ZmqEventLoop() asyncio.set_event_loop(self.loop) self.client = self.server = None self.queue = asyncio.Queue() self.err_queue = asyncio.Queue() def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() class LooplessPubSubTests(unittest.TestCase, PubSubTestsMixin): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.client = self.server = None self.queue = asyncio.Queue() self.err_queue = asyncio.Queue() def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/rpc_test.py0000644000076600000240000005653600000000000015346 0ustar00jellestaffimport unittest import asyncio import aiozmq import aiozmq.rpc import datetime import logging import time from unittest import mock import zmq import msgpack import struct from aiozmq import create_zmq_connection from aiozmq._test_util import find_unused_port, log_hook, RpcMixin from aiozmq.rpc.log import logger class MyException(Exception): pass class MyHandler(aiozmq.rpc.AttrHandler): def __init__(self, loop): self.loop = loop @aiozmq.rpc.method def func(self, arg): return arg + 1 @aiozmq.rpc.method async def coro(self, arg): return arg + 1 @aiozmq.rpc.method def exc(self, arg): raise RuntimeError("bad arg", arg) @aiozmq.rpc.method async def exc_coro(self, arg): raise RuntimeError("bad arg 2", arg) @aiozmq.rpc.method def add(self, a1, a2): return a1 + a2 @aiozmq.rpc.method def generic_exception(self): raise MyException("additional", "data") @aiozmq.rpc.method async def slow_call(self): await asyncio.sleep(0.2) @aiozmq.rpc.method @asyncio.coroutine def fut(self): return asyncio.Future() @aiozmq.rpc.method async def cancelled_fut(self): ret = asyncio.Future() ret.cancel() return ret @aiozmq.rpc.method def exc2(self, arg): raise ValueError("bad arg", arg) @aiozmq.rpc.method async def not_so_fast(self): await asyncio.sleep(0.001) return "ok" class Protocol(aiozmq.ZmqProtocol): def __init__(self, loop): self.transport = None self.connected = asyncio.Future() self.closed = asyncio.Future() self.state = "INITIAL" self.received = asyncio.Queue() def connection_made(self, transport): self.transport = transport assert self.state == "INITIAL", self.state self.state = "CONNECTED" self.connected.set_result(None) def connection_lost(self, exc): assert self.state == "CONNECTED", self.state self.state = "CLOSED" self.closed.set_result(None) self.transport = None def pause_writing(self): pass def resume_writing(self): pass def msg_received(self, data): assert isinstance(data, tuple), data assert self.state == "CONNECTED", self.state self.received.put_nowait(data) class RpcTestsMixin(RpcMixin): @classmethod def setUpClass(self): root_logger = logging.getLogger() self.log_level = logger.getEffectiveLevel() root_logger.setLevel(logging.DEBUG) @classmethod def tearDownClass(self): root_logger = logging.getLogger() root_logger.setLevel(self.log_level) def make_rpc_pair( self, *, error_table=None, timeout=None, log_exceptions=False, exclude_log_exceptions=(), use_loop=True ): async def create(): port = find_unused_port() server = await aiozmq.rpc.serve_rpc( MyHandler(self.loop), bind="tcp://127.0.0.1:{}".format(port), loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions, ) client = await aiozmq.rpc.connect_rpc( connect="tcp://127.0.0.1:{}".format(port), loop=self.loop if use_loop else None, error_table=error_table, timeout=timeout, ) return client, server self.client, self.server = self.loop.run_until_complete(create()) return self.client, self.server def test_func(self): client, server = self.make_rpc_pair() async def communicate(): ret = await client.call.func(1) self.assertEqual(2, ret) client.close() await client.wait_closed() self.loop.run_until_complete(communicate()) def test_exc(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaises(RuntimeError) as exc: await client.call.exc(1) self.assertEqual(("bad arg", 1), exc.exception.args) self.loop.run_until_complete(communicate()) def test_not_found(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaises(aiozmq.rpc.NotFoundError) as exc: await client.call.unknown_method(1, 2, 3) self.assertEqual(("unknown_method",), exc.exception.args) self.loop.run_until_complete(communicate()) def test_coro(self): client, server = self.make_rpc_pair() async def communicate(): ret = await client.call.coro(2) self.assertEqual(3, ret) self.loop.run_until_complete(communicate()) def test_exc_coro(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaises(RuntimeError) as exc: await client.call.exc_coro(1) self.assertEqual(("bad arg 2", 1), exc.exception.args) self.loop.run_until_complete(communicate()) def test_datetime_translators(self): client, server = self.make_rpc_pair() async def communicate(): ret = await client.call.add( datetime.date(2014, 3, 21), datetime.timedelta(days=2) ) self.assertEqual(datetime.date(2014, 3, 23), ret) self.loop.run_until_complete(communicate()) def test_not_found_empty_name(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaises(ValueError) as exc: await client.call(1, 2, 3) self.assertEqual(("RPC method name is empty",), exc.exception.args) self.loop.run_until_complete(communicate()) def test_not_found_empty_name_on_server(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaises(aiozmq.rpc.NotFoundError) as exc: await client._proto.call("", (), {}) self.assertEqual(("",), exc.exception.args) self.loop.run_until_complete(communicate()) def test_generic_exception(self): client, server = self.make_rpc_pair() async def communicate(): with self.assertRaises(aiozmq.rpc.GenericError) as exc: await client.call.generic_exception() self.assertEqual( ( "rpc_test.MyException", ("additional", "data"), "MyException('additional', 'data')", ), exc.exception.args, ) self.assertEqual( "", repr(exc.exception), ) self.loop.run_until_complete(communicate()) def test_exception_translator(self): client, server = self.make_rpc_pair( error_table={__name__ + ".MyException": MyException} ) async def communicate(): with self.assertRaises(MyException) as exc: await client.call.generic_exception() self.assertEqual(("additional", "data"), exc.exception.args) self.loop.run_until_complete(communicate()) def test_default_event_loop(self): asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy()) self.addCleanup(asyncio.set_event_loop_policy, None) self.addCleanup(self.loop.close) self.loop = loop = asyncio.get_event_loop() client, server = self.make_rpc_pair(use_loop=False) async def communicate(): ret = await client.call.func(1) self.assertEqual(2, ret) loop.run_until_complete(communicate()) def test_service_transport(self): client, server = self.make_rpc_pair() self.assertIsInstance(client.transport, aiozmq.ZmqTransport) self.assertIsInstance(server.transport, aiozmq.ZmqTransport) client.close() self.loop.run_until_complete(client.wait_closed()) with self.assertRaises(aiozmq.rpc.ServiceClosedError): client.transport server.close() self.loop.run_until_complete(server.wait_closed()) with self.assertRaises(aiozmq.rpc.ServiceClosedError): server.transport def test_client_timeout(self): client, server = self.make_rpc_pair() async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): with self.assertRaises(asyncio.TimeoutError): with client.with_timeout(0.1) as timedout: await timedout.call.slow_call() t0 = time.monotonic() with self.assertRaises(asyncio.TimeoutError): await client.with_timeout(0.1).call.slow_call() t1 = time.monotonic() self.assertTrue(0.08 <= t1 - t0 <= 0.12, t1 - t0) # NB: dont check log records, they are not necessary present self.loop.run_until_complete(communicate()) def test_client_override_global_timeout(self): client, server = self.make_rpc_pair(timeout=10) async def communicate(): with self.assertRaises(asyncio.TimeoutError): with client.with_timeout(0.1) as timedout: await timedout.call.slow_call() t0 = time.monotonic() with self.assertRaises(asyncio.TimeoutError): await client.with_timeout(0.1).call.slow_call() t1 = time.monotonic() self.assertTrue(0.08 <= t1 - t0 <= 0.12, t1 - t0) server.close() client.close() await asyncio.gather(server.wait_closed(), client.wait_closed()) self.loop.run_until_complete(communicate()) def test_type_of_handler(self): async def go(): with self.assertRaises(TypeError): await aiozmq.rpc.serve_rpc("Bad Handler", bind="tcp://127.0.0.1:*") self.loop.run_until_complete(go()) def test_unknown_format_at_server(self): async def go(): port = find_unused_port() server = await aiozmq.rpc.serve_rpc( MyHandler(self.loop), bind="tcp://127.0.0.1:{}".format(port), ) tr, pr = await create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, connect="tcp://127.0.0.1:{}".format(port), ) await asyncio.sleep(0.001) with log_hook("aiozmq.rpc", self.err_queue): tr.write([b"invalid", b"structure"]) ret = await self.err_queue.get() self.assertEqual(logging.CRITICAL, ret.levelno) self.assertEqual("Cannot unpack %r", ret.msg) self.assertEqual(([mock.ANY, b"invalid", b"structure"],), ret.args) self.assertIsNotNone(ret.exc_info) self.assertTrue(pr.received.empty()) server.close() self.loop.run_until_complete(go()) def test_malformed_args(self): async def go(): port = find_unused_port() server = await aiozmq.rpc.serve_rpc( MyHandler(self.loop), bind="tcp://127.0.0.1:{}".format(port), ) tr, pr = await create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, connect="tcp://127.0.0.1:{}".format(port), ) with log_hook("aiozmq.rpc", self.err_queue): tr.write([struct.pack("=HHLd", 1, 2, 3, 4), b"bad args", b"bad_kwargs"]) ret = await self.err_queue.get() self.assertEqual(logging.CRITICAL, ret.levelno) self.assertEqual("Cannot unpack %r", ret.msg) self.assertEqual( ([mock.ANY, mock.ANY, b"bad args", b"bad_kwargs"],), ret.args ) self.assertIsNotNone(ret.exc_info) self.assertTrue(pr.received.empty()) server.close() self.loop.run_until_complete(go()) def test_malformed_kwargs(self): async def go(): port = find_unused_port() server = await aiozmq.rpc.serve_rpc( MyHandler(self.loop), bind="tcp://127.0.0.1:{}".format(port), ) tr, pr = await create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, connect="tcp://127.0.0.1:{}".format(port), ) with log_hook("aiozmq.rpc", self.err_queue): tr.write( [ struct.pack("=HHLd", 1, 2, 3, 4), msgpack.packb((1, 2)), b"bad_kwargs", ] ) ret = await self.err_queue.get() self.assertEqual(logging.CRITICAL, ret.levelno) self.assertEqual("Cannot unpack %r", ret.msg) self.assertEqual( ([mock.ANY, mock.ANY, mock.ANY, b"bad_kwargs"],), ret.args ) self.assertIsNotNone(ret.exc_info) self.assertTrue(pr.received.empty()) server.close() self.loop.run_until_complete(go()) def test_unknown_format_at_client(self): async def go(): port = find_unused_port() tr, pr = await create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port), ) client = await aiozmq.rpc.connect_rpc( connect="tcp://127.0.0.1:{}".format(port) ) with log_hook("aiozmq.rpc", self.err_queue): tr.write([b"invalid", b"structure"]) ret = await self.err_queue.get() self.assertEqual(logging.CRITICAL, ret.levelno) self.assertEqual("Cannot unpack %r", ret.msg) self.assertEqual(([b"invalid", b"structure"],), ret.args) self.assertIsNotNone(ret.exc_info) client.close() self.loop.run_until_complete(go()) def test_malformed_answer_at_client(self): async def go(): port = find_unused_port() tr, pr = await create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port), ) client = await aiozmq.rpc.connect_rpc( connect="tcp://127.0.0.1:{}".format(port) ) with log_hook("aiozmq.rpc", self.err_queue): tr.write([struct.pack("=HHLd?", 1, 2, 3, 4, True), b"bad_answer"]) ret = await self.err_queue.get() self.assertEqual(logging.CRITICAL, ret.levelno) self.assertEqual("Cannot unpack %r", ret.msg) self.assertEqual(([mock.ANY, b"bad_answer"],), ret.args) self.assertIsNotNone(ret.exc_info) client.close() tr.close() self.loop.run_until_complete(go()) def test_unknown_req_id_at_client(self): async def go(): port = find_unused_port() tr, pr = await create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port), ) client = await aiozmq.rpc.connect_rpc( connect="tcp://127.0.0.1:{}".format(port) ) with log_hook("aiozmq.rpc", self.err_queue): tr.write( [struct.pack("=HHLd?", 1, 2, 34435, 4, True), msgpack.packb((1, 2))] ) ret = await self.err_queue.get() self.assertEqual(logging.CRITICAL, ret.levelno) self.assertEqual("Unknown answer id: %d (%d %d %f %d) -> %s", ret.msg) self.assertEqual((mock.ANY, 1, 2, 4.0, True, (1, 2)), ret.args) self.assertIsNone(ret.exc_info) client.close() self.loop.run_until_complete(go()) def test_overflow_client_counter(self): client, server = self.make_rpc_pair() async def communicate(): client._proto.counter = 0xFFFFFFFF ret = await client.call.func(1) self.assertEqual(2, ret) self.assertEqual(0, client._proto.counter) client.close() await client.wait_closed() server.close() self.loop.run_until_complete(communicate()) def test_log_exceptions(self): client, server = self.make_rpc_pair(log_exceptions=True) async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): with self.assertRaises(RuntimeError) as exc: await client.call.exc(1) self.assertEqual(("bad arg", 1), exc.exception.args) ret = await self.err_queue.get() self.assertEqual(logging.ERROR, ret.levelno) self.assertEqual( "An exception %r from method %r " "call occurred.\n" "args = %s\nkwargs = %s\n", ret.msg, ) self.assertEqual((mock.ANY, "exc", "(1,)", "{}"), ret.args) self.assertIsNotNone(ret.exc_info) self.loop.run_until_complete(communicate()) def test_call_closed_rpc(self): client, server = self.make_rpc_pair() async def communicate(): client.close() await client.wait_closed() with self.assertRaises(aiozmq.rpc.ServiceClosedError): await client.call.func() self.loop.run_until_complete(communicate()) def test_call_closed_rpc_cancelled(self): client, server = self.make_rpc_pair() async def communicate(): server.close() waiter = client.call.func() server.close() await server.wait_closed() client.close() await client.wait_closed() with self.assertRaises(asyncio.CancelledError): await waiter self.loop.run_until_complete(communicate()) def test_server_close(self): client, server = self.make_rpc_pair() async def communicate(): waiter = client.call.fut() await asyncio.sleep(0.01) self.assertEqual(1, len(server._proto.pending_waiters)) task = next(iter(server._proto.pending_waiters)) self.assertIsInstance(task, asyncio.Task) server.close() await server.wait_closed() await asyncio.sleep(0.01) self.assertEqual(0, len(server._proto.pending_waiters)) del waiter self.loop.run_until_complete(communicate()) @mock.patch("aiozmq.rpc.base.logger") def test_exclude_log_exceptions(self, m_log): client, server = self.make_rpc_pair( log_exceptions=True, exclude_log_exceptions=(MyException,) ) async def communicate(): with self.assertRaises(RuntimeError): await client.call.exc(1) m_log.exception.assert_called_with( "An exception %r from method %r call occurred.\n" "args = %s\nkwargs = %s\n", mock.ANY, mock.ANY, mock.ANY, mock.ANY, ) m_log.reset_mock() with self.assertRaises(ValueError): await client.call.exc2() self.assertFalse(m_log.called) self.loop.run_until_complete(communicate()) def test_client_restore_after_timeout(self): client, server = self.make_rpc_pair() async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): ret = await client.call.func(1) self.assertEqual(2, ret) with self.assertRaises(asyncio.TimeoutError): await client.with_timeout(0.1).call.slow_call() ret = await client.call.func(2) self.assertEqual(3, ret) with self.assertRaises(asyncio.TimeoutError): await client.with_timeout(0.1).call.slow_call() ret = await client.call.func(3) self.assertEqual(4, ret) self.loop.run_until_complete(communicate()) def test_client_restore_after_timeout2(self): client, server = self.make_rpc_pair() async def communicate(): with log_hook("aiozmq.rpc", self.err_queue): ret = await client.call.func(1) self.assertEqual(2, ret) with self.assertRaises(asyncio.TimeoutError): await client.with_timeout(0.1).call.slow_call() await asyncio.sleep(0.3) ret = await client.call.func(2) self.assertEqual(3, ret) with self.assertRaises(asyncio.TimeoutError): await client.with_timeout(0.1).call.slow_call() ret = await client.call.func(3) self.assertEqual(4, ret) self.loop.run_until_complete(communicate()) def xtest_wait_closed(self): client, server = self.make_rpc_pair() async def go(): f1 = client.call.not_so_fast() client.close() client.wait_closed() r = await f1 self.assertEqual("ok", r) self.loop.run_until_complete(go()) class LoopRpcTests(unittest.TestCase, RpcTestsMixin): def setUp(self): self.loop = aiozmq.ZmqEventLoop() asyncio.set_event_loop(self.loop) self.client = self.server = None self.err_queue = asyncio.Queue() def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() class LoopLessRpcTests(unittest.TestCase, RpcTestsMixin): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.client = self.server = None self.err_queue = asyncio.Queue() def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() class AbstractHandlerTests(unittest.TestCase): def test___getitem__(self): class MyHandler(aiozmq.rpc.AbstractHandler): def __getitem__(self, key): return super().__getitem__(key) with self.assertRaises(KeyError): MyHandler()[1] def test_subclass(self): self.assertTrue(issubclass(dict, aiozmq.rpc.AbstractHandler)) self.assertIsInstance({}, aiozmq.rpc.AbstractHandler) self.assertFalse(issubclass(object, aiozmq.rpc.AbstractHandler)) self.assertNotIsInstance(object(), aiozmq.rpc.AbstractHandler) self.assertNotIsInstance("string", aiozmq.rpc.AbstractHandler) self.assertNotIsInstance(b"bytes", aiozmq.rpc.AbstractHandler) class TestLogger(unittest.TestCase): def test_logger_name(self): self.assertEqual("aiozmq.rpc", logger.name) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/rpc_translators_test.py0000644000076600000240000000476100000000000017773 0ustar00jellestaffimport unittest import asyncio import aiozmq import aiozmq.rpc import msgpack from aiozmq._test_util import find_unused_port, RpcMixin class Point: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): if isinstance(other, Point): return (self.x, self.y) == (other.x, other.y) return NotImplemented translation_table = { 0: ( Point, lambda value: msgpack.packb((value.x, value.y)), lambda binary: Point(*msgpack.unpackb(binary)), ), } class MyHandler(aiozmq.rpc.AttrHandler): @aiozmq.rpc.method async def func(self, point): assert isinstance(point, Point) return point class RpcTranslatorsMixin(RpcMixin): def make_rpc_pair(self, *, error_table=None): port = find_unused_port() async def create(): server = await aiozmq.rpc.serve_rpc( MyHandler(), bind="tcp://127.0.0.1:{}".format(port), translation_table=translation_table, ) client = await aiozmq.rpc.connect_rpc( connect="tcp://127.0.0.1:{}".format(port), error_table=error_table, translation_table=translation_table, ) return client, server self.client, self.server = self.loop.run_until_complete(create()) return self.client, self.server def test_simple(self): client, server = self.make_rpc_pair() pt = Point(1, 2) async def communicate(): ret = await client.call.func(pt) self.assertEqual(ret, pt) self.loop.run_until_complete(communicate()) class LoopRpcTranslatorsTests(unittest.TestCase, RpcTranslatorsMixin): def setUp(self): self.loop = aiozmq.ZmqEventLoop() asyncio.set_event_loop(None) self.client = self.server = None def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() class LooplessRpcTranslatorsTests(unittest.TestCase, RpcTranslatorsMixin): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) self.client = self.server = None def tearDown(self): self.close_service(self.client) self.close_service(self.server) self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/sample.crt0000644000076600000240000000146600000000000015134 0ustar00jellestaff-----BEGIN CERTIFICATE----- MIICMzCCAZwCCQDFl4ys0fU7iTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJV UzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuLUZyYW5jaXNjbzEi MCAGA1UECgwZUHl0aG9uIFNvZnR3YXJlIEZvbmRhdGlvbjAeFw0xMzAzMTgyMDA3 MjhaFw0yMzAzMTYyMDA3MjhaMF4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxp Zm9ybmlhMRYwFAYDVQQHDA1TYW4tRnJhbmNpc2NvMSIwIAYDVQQKDBlQeXRob24g U29mdHdhcmUgRm9uZGF0aW9uMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCn t3s+J7L0xP/YdAQOacpPi9phlrzKZhcXL3XMu2LCUg2fNJpx/47Vc5TZSaO11uO7 gdwVz3Z7Q2epAgwo59JLffLt5fia8+a/SlPweI/j4+wcIIIiqusnLfpqR8cIAavg Z06cLYCDvb9wMlheIvSJY12skc1nnphWS2YJ0Xm6uQIDAQABMA0GCSqGSIb3DQEB BQUAA4GBAE9PknG6pv72+5z/gsDGYy8sK5UNkbWSNr4i4e5lxVsF03+/M71H+3AB MxVX4+A+Vlk2fmU+BrdHIIUE0r1dDcO3josQ9hc9OJpp5VLSQFP8VeuJCmzYPp9I I8WbW93cnXnChTrYQVdgVoFdv7GE9YgU7NYkrGIM0nZl1/f/bHPB -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/sample.key0000644000076600000240000000156700000000000015136 0ustar00jellestaff-----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQCnt3s+J7L0xP/YdAQOacpPi9phlrzKZhcXL3XMu2LCUg2fNJpx /47Vc5TZSaO11uO7gdwVz3Z7Q2epAgwo59JLffLt5fia8+a/SlPweI/j4+wcIIIi qusnLfpqR8cIAavgZ06cLYCDvb9wMlheIvSJY12skc1nnphWS2YJ0Xm6uQIDAQAB AoGABfm8k19Yue3W68BecKEGS0VBV57GRTPT+MiBGvVGNIQ15gk6w3sGfMZsdD1y bsUkQgcDb2d/4i5poBTpl/+Cd41V+c20IC/sSl5X1IEreHMKSLhy/uyjyiyfXlP1 iXhToFCgLWwENWc8LzfUV8vuAV5WG6oL9bnudWzZxeqx8V0CQQDR7xwVj6LN70Eb DUhSKLkusmFw5Gk9NJ/7wZ4eHg4B8c9KNVvSlLCLhcsVTQXuqYeFpOqytI45SneP lr0vrvsDAkEAzITYiXu6ox5huDCG7imX2W9CAYuX638urLxBqBXMS7GqBzojD6RL 21Q8oPwJWJquERa3HDScq1deiQbM9uKIkwJBAIa1PLslGN216Xv3UPHPScyKD/aF ynXIv+OnANPoiyp6RH4ksQ/18zcEGiVH8EeNpvV9tlAHhb+DZibQHgNr74sCQQC0 zhToplu/bVKSlUQUNO0rqrI9z30FErDewKeCw5KSsIRSU1E/uM3fHr9iyq4wiL6u GNjUtKZ0y46lsT9uW6LFAkB5eqeEQnshAdr3X5GykWHJ8DDGBXPPn6Rce1NX4RSq V9khG2z1bFyfo+hMqpYnF2k32hVq3E54RS8YYnwBsVof -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/selectors_test.py0000644000076600000240000003414200000000000016552 0ustar00jellestaffimport errno import os import random import signal import socket from time import sleep import unittest from unittest import mock try: from time import monotonic as time except ImportError: from time import time as time try: import resource except ImportError: resource = None import zmq from aiozmq.selector import ZmqSelector, EVENT_READ, EVENT_WRITE, SelectorKey from aiozmq._test_util import requires_mac_ver if hasattr(socket, "socketpair"): socketpair = socket.socketpair else: def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): with socket.socket(family, type, proto) as server: server.bind(("127.0.0.1", 0)) server.listen(3) c = socket.socket(family, type, proto) try: c.connect(server.getsockname()) caddr = c.getsockname() while True: a, addr = server.accept() # check that we've got the correct client if addr == caddr: return c, a a.close() except OSError: c.close() raise def find_ready_matching(ready, flag): match = [] for key, events in ready: if events & flag: match.append(key.fileobj) return match class SelectorTests(unittest.TestCase): SELECTOR = ZmqSelector def make_socketpair(self): rd, wr = socketpair() self.addCleanup(rd.close) self.addCleanup(wr.close) return rd, wr def test_register(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, EVENT_READ, "data") self.assertIsInstance(key, SelectorKey) self.assertEqual(key.fileobj, rd) self.assertEqual(key.fd, rd.fileno()) self.assertEqual(key.events, EVENT_READ) self.assertEqual(key.data, "data") # register an unknown event self.assertRaises(ValueError, s.register, 0, 999999) # register an invalid FD self.assertRaises(ValueError, s.register, -10, EVENT_READ) # register twice self.assertRaises(KeyError, s.register, rd, EVENT_READ) # register the same FD, but with a different object self.assertRaises(KeyError, s.register, rd.fileno(), EVENT_READ) # register an invalid fd type self.assertRaises(ValueError, s.register, "abc", EVENT_READ) def test_register_with_zmq_error(self): s = self.SELECTOR() self.addCleanup(s.close) m = mock.Mock() m.side_effect = zmq.ZMQError(errno.EFAULT, "not a socket") s._poller.register = m with self.assertRaises(OSError) as ctx: s.register(1, EVENT_READ) self.assertEqual(errno.EFAULT, ctx.exception.errno) self.assertNotIn(1, s.get_map()) def test_unregister(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) s.unregister(rd) # unregister an unknown file obj self.assertRaises(KeyError, s.unregister, 999999) # unregister twice self.assertRaises(KeyError, s.unregister, rd) def test_unregister_with_zmq_error(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) m = mock.Mock() m.side_effect = zmq.ZMQError(errno.EFAULT, "not a socket") s._poller.unregister = m with self.assertRaises(OSError) as ctx: s.unregister(rd) self.assertEqual(errno.EFAULT, ctx.exception.errno) self.assertIn(rd, s.get_map()) def test_unregister_after_fd_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, EVENT_READ) s.register(w, EVENT_WRITE) rd.close() wr.close() s.unregister(r) s.unregister(w) @unittest.skipUnless(os.name == "posix", "requires posix") def test_unregister_after_fd_close_and_reuse(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, EVENT_READ) s.register(w, EVENT_WRITE) rd2, wr2 = self.make_socketpair() rd.close() wr.close() os.dup2(rd2.fileno(), r) os.dup2(wr2.fileno(), w) self.addCleanup(os.close, r) self.addCleanup(os.close, w) s.unregister(r) s.unregister(w) def test_unregister_after_socket_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) s.register(wr, EVENT_WRITE) rd.close() wr.close() s.unregister(rd) s.unregister(wr) def test_modify(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, EVENT_READ) # modify events key2 = s.modify(rd, EVENT_WRITE) self.assertNotEqual(key.events, key2.events) self.assertEqual(key2, s.get_key(rd)) s.unregister(rd) # modify data d1 = object() d2 = object() key = s.register(rd, EVENT_READ, d1) key2 = s.modify(rd, EVENT_READ, d2) self.assertEqual(key.events, key2.events) self.assertNotEqual(key.data, key2.data) self.assertEqual(key2, s.get_key(rd)) self.assertEqual(key2.data, d2) key3 = s.modify(rd, EVENT_READ, d2) self.assertIs(key3, key2) # modify unknown file obj self.assertRaises(KeyError, s.modify, 999999, EVENT_READ) # modify use a shortcut d3 = object() s.register = mock.Mock() s.unregister = mock.Mock() s.modify(rd, EVENT_READ, d3) self.assertFalse(s.register.called) self.assertFalse(s.unregister.called) def test_modify_with_zmq_error(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) m = mock.Mock() m.side_effect = zmq.ZMQError(errno.EFAULT, "not a socket") s._poller.modify = m with self.assertRaises(OSError) as ctx: s.modify(rd, EVENT_WRITE) self.assertEqual(errno.EFAULT, ctx.exception.errno) self.assertIn(rd, s.get_map()) def test_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) s.register(wr, EVENT_WRITE) s.close() self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) def test_get_key(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, EVENT_READ, "data") self.assertEqual(key, s.get_key(rd)) # unknown file obj self.assertRaises(KeyError, s.get_key, 999999) def test_get_map(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() keys = s.get_map() self.assertFalse(keys) self.assertEqual(len(keys), 0) self.assertEqual(list(keys), []) key = s.register(rd, EVENT_READ, "data") self.assertIn(rd, keys) self.assertEqual(key, keys[rd]) self.assertEqual(len(keys), 1) self.assertEqual(list(keys), [rd.fileno()]) self.assertEqual(list(keys.values()), [key]) # unknown file obj with self.assertRaises(KeyError): keys[999999] # Read-only mapping with self.assertRaises(TypeError): del keys[rd] def test_select(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) wr_key = s.register(wr, EVENT_WRITE) result = s.select() for key, events in result: self.assertTrue(isinstance(key, SelectorKey)) self.assertTrue(events) self.assertFalse(events & ~(EVENT_READ | EVENT_WRITE)) self.assertEqual([(wr_key, EVENT_WRITE)], result) def test_select_with_zmq_error(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) m = mock.Mock() m.side_effect = zmq.ZMQError(errno.EFAULT, "not a socket") s._poller.poll = m with self.assertRaises(OSError) as ctx: s.select() self.assertEqual(errno.EFAULT, ctx.exception.errno) def test_select_without_key(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(wr, EVENT_WRITE) s._fd_to_key = {} result = s.select() self.assertFalse(result) def test_context_manager(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() with s as sel: sel.register(rd, EVENT_READ) sel.register(wr, EVENT_WRITE) self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) def test_fileno(self): s = self.SELECTOR() self.addCleanup(s.close) if hasattr(s, "fileno"): fd = s.fileno() self.assertTrue(isinstance(fd, int)) self.assertGreaterEqual(fd, 0) def test_selector(self): s = self.SELECTOR() self.addCleanup(s.close) NUM_SOCKETS = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_SOCKETS): rd, wr = self.make_socketpair() s.register(rd, EVENT_READ) s.register(wr, EVENT_WRITE) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = s.select() ready_writers = find_ready_matching(ready, EVENT_WRITE) if not ready_writers: self.fail("no sockets ready for writing") wr = random.choice(ready_writers) wr.send(MSG) for i in range(10): ready = s.select() ready_readers = find_ready_matching(ready, EVENT_READ) if ready_readers: break # there might be a delay between the write to the write end and # the read end is reported ready sleep(0.1) else: self.fail("no sockets ready for reading") self.assertEqual([w2r[wr]], ready_readers) rd = ready_readers[0] buf = rd.recv(MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) s.unregister(r2w[rd]) s.unregister(rd) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_SOCKETS) def test_timeout(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(wr, EVENT_WRITE) t = time() self.assertEqual(1, len(s.select(0))) self.assertEqual(1, len(s.select(-1))) self.assertLess(time() - t, 0.5) s.unregister(wr) s.register(rd, EVENT_READ) t = time() self.assertFalse(s.select(0)) self.assertFalse(s.select(-1)) self.assertLess(time() - t, 0.5) t0 = time() self.assertFalse(s.select(1)) t1 = time() dt = t1 - t0 self.assertTrue(0.8 <= dt <= 1.6, dt) @unittest.skipUnless( hasattr(signal, "alarm"), "signal.alarm() required for this test" ) def test_select_interrupt(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None) self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) self.addCleanup(signal.alarm, 0) signal.alarm(1) s.register(rd, EVENT_READ) t = time() self.assertFalse(s.select(2)) self.assertLess(time() - t, 3.5) # see issue #18963 for why it's skipped on older OS X versions @requires_mac_ver(10, 5) @unittest.skipUnless(resource, "Test needs resource module") def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = hard except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, EVENT_READ) s.register(wr, EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select())) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/ssl_cert.pem0000644000076600000240000000154300000000000015456 0ustar00jellestaff-----BEGIN CERTIFICATE----- MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/ssl_key.pem0000644000076600000240000000162400000000000015311 0ustar00jellestaff-----BEGIN PRIVATE KEY----- MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F 0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ SPIXQuT8RMPDVNQ= -----END PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/transport_test.py0000644000076600000240000006741500000000000016614 0ustar00jellestaffimport unittest import asyncio import collections import zmq import aiozmq import errno import selectors import weakref from collections import deque from aiozmq.core import _ZmqTransportImpl, _ZmqLooplessTransportImpl from unittest import mock from aiozmq._test_util import check_errno async def dummy(): pass # make_test_protocol, TestSelector, and TestLoop were taken from # test.test_asyncio.utils in CPython. # https://github.com/python/cpython/blob/9602643120a509858d0bee4215d7f150e6125468/Lib/test/test_asyncio/utils.py def make_test_protocol(base): dct = {} for name in dir(base): if name.startswith("__") and name.endswith("__"): # skip magic names continue dct[name] = mock.Mock(return_value=None) return type("TestProtocol", (base,) + base.__bases__, dct)() class TestSelector(selectors.BaseSelector): def __init__(self): self.keys = {} def register(self, fileobj, events, data=None): key = selectors.SelectorKey(fileobj, 0, events, data) self.keys[fileobj] = key return key def unregister(self, fileobj): return self.keys.pop(fileobj) def select(self, timeout): return [] def get_map(self): return self.keys class TestLoop(asyncio.base_events.BaseEventLoop): def __init__(self): super().__init__() self._selector = TestSelector() self.readers = {} self.writers = {} self.reset_counters() self._transports = weakref.WeakValueDictionary() def _add_reader(self, fd, callback, *args): self.readers[fd] = asyncio.events.Handle(callback, args, self) def _remove_reader(self, fd): self.remove_reader_count[fd] += 1 if fd in self.readers: del self.readers[fd] return True else: return False def assert_reader(self, fd, callback, *args): if fd not in self.readers: raise AssertionError("fd {fd} is not registered".format(fd=fd)) handle = self.readers[fd] if handle._callback != callback: raise AssertionError( "unexpected callback: {handle._callback} != {callback}".format( handle=handle, callback=callback ) ) if handle._args != args: raise AssertionError( "unexpected callback args: {handle._args} != {args}".format( handle=handle, args=args ) ) def assert_no_reader(self, fd): if fd in self.readers: raise AssertionError("fd {fd} is registered".format(fd=fd)) def _add_writer(self, fd, callback, *args): self.writers[fd] = asyncio.events.Handle(callback, args, self) def _remove_writer(self, fd): self.remove_writer_count[fd] += 1 if fd in self.writers: del self.writers[fd] return True else: return False def assert_writer(self, fd, callback, *args): assert fd in self.writers, "fd {} is not registered".format(fd) handle = self.writers[fd] assert handle._callback == callback, "{!r} != {!r}".format( handle._callback, callback ) assert handle._args == args, "{!r} != {!r}".format(handle._args, args) def _ensure_fd_no_transport(self, fd): try: transport = self._transports[fd] except KeyError: pass else: raise RuntimeError( "File descriptor {!r} is used by transport {!r}".format(fd, transport) ) def add_reader(self, fd, callback, *args): """Add a reader callback.""" self._ensure_fd_no_transport(fd) return self._add_reader(fd, callback, *args) def remove_reader(self, fd): """Remove a reader callback.""" self._ensure_fd_no_transport(fd) return self._remove_reader(fd) def add_writer(self, fd, callback, *args): """Add a writer callback..""" self._ensure_fd_no_transport(fd) return self._add_writer(fd, callback, *args) def remove_writer(self, fd): """Remove a writer callback.""" self._ensure_fd_no_transport(fd) return self._remove_writer(fd) def reset_counters(self): self.remove_reader_count = collections.defaultdict(int) self.remove_writer_count = collections.defaultdict(int) def _process_events(self, event_list): return def _write_to_self(self): pass class TransportTests(unittest.TestCase): def setUp(self): self.loop = TestLoop() asyncio.set_event_loop(self.loop) self.sock = mock.Mock() self.sock.closed = False self.proto = make_test_protocol(aiozmq.ZmqProtocol) self.tr = _ZmqTransportImpl(self.loop, zmq.SUB, self.sock, self.proto) self.exc_handler = mock.Mock() self.loop.set_exception_handler(self.exc_handler) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) def test_empty_write(self): self.tr.write([b""]) self.assertTrue(self.sock.send_multipart.called) self.assertFalse(self.proto.pause_writing.called) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.assertNotIn(self.sock, self.loop.writers) def test_write(self): self.tr.write((b"a", b"b")) self.sock.send_multipart.assert_called_with((b"a", b"b"), zmq.DONTWAIT) self.assertFalse(self.proto.pause_writing.called) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.assertNotIn(self.sock, self.loop.writers) def test_partial_write(self): self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EAGAIN) self.tr.write((b"a", b"b")) self.sock.send_multipart.assert_called_with((b"a", b"b"), zmq.DONTWAIT) self.assertFalse(self.proto.pause_writing.called) self.assertEqual([(2, (b"a", b"b"))], list(self.tr._buffer)) self.assertEqual(2, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.loop.assert_writer(self.sock, self.tr._write_ready) def test_partial_double_write(self): self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EAGAIN) self.tr.write((b"a", b"b")) self.tr.write((b"c",)) self.sock.send_multipart.mock_calls = [mock.call((b"a", b"b"), zmq.DONTWAIT)] self.assertFalse(self.proto.pause_writing.called) self.assertEqual([(2, (b"a", b"b")), (1, (b"c",))], list(self.tr._buffer)) self.assertEqual(3, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.loop.assert_writer(self.sock, self.tr._write_ready) def test__write_ready(self): self.tr._buffer.append((2, (b"a", b"b"))) self.tr._buffer.append((1, (b"c",))) self.tr._buffer_size = 3 self.loop.add_writer(self.sock, self.tr._write_ready) self.tr._write_ready() self.sock.send_multipart.mock_calls = [mock.call((b"a", b"b"), zmq.DONTWAIT)] self.assertFalse(self.proto.pause_writing.called) self.assertEqual([(1, (b"c",))], list(self.tr._buffer)) self.assertEqual(1, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.loop.assert_writer(self.sock, self.tr._write_ready) def test__write_ready_sent_whole_buffer(self): self.tr._buffer.append((2, (b"a", b"b"))) self.tr._buffer_size = 2 self.loop.add_writer(self.sock, self.tr._write_ready) self.sock.send_multipart.mock_calls = [mock.call((b"a", b"b"), zmq.DONTWAIT)] self.tr._write_ready() self.assertFalse(self.proto.pause_writing.called) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.assertEqual(1, self.loop.remove_writer_count[self.sock]) def test__write_ready_raises_ZMQError(self): self.tr._buffer.append((2, (b"a", b"b"))) self.tr._buffer_size = 2 self.loop.add_writer(self.sock, self.tr._write_ready) self.sock.send_multipart.side_effect = zmq.ZMQError( errno.ENOTSUP, "not supported" ) self.tr._write_ready() self.assertFalse(self.proto.pause_writing.called) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertTrue(self.exc_handler.called) self.assertEqual(1, self.loop.remove_writer_count[self.sock]) self.assertTrue(self.tr._closing) self.assertEqual(1, self.loop.remove_reader_count[self.sock]) def test__write_ready_raises_EAGAIN(self): self.tr._buffer.append((2, (b"a", b"b"))) self.tr._buffer_size = 2 self.loop.add_writer(self.sock, self.tr._write_ready) self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EAGAIN, "try again") self.tr._write_ready() self.assertFalse(self.proto.pause_writing.called) self.assertEqual( [ ( 2, ( b"a", b"b", ), ) ], list(self.tr._buffer), ) self.assertEqual(2, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.loop.assert_writer(self.sock, self.tr._write_ready) self.assertFalse(self.tr._closing) self.loop.assert_reader(self.sock, self.tr._read_ready) def test_close_with_empty_buffer(self): self.tr.close() self.assertTrue(self.tr._closing) self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertIsNotNone(self.tr._protocol) self.assertIsNotNone(self.tr._zmq_sock) self.assertIsNotNone(self.tr._loop) self.assertFalse(self.sock.close.called) self.loop.run_until_complete(dummy()) self.proto.connection_lost.assert_called_with(None) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.sock.close.assert_called_with() def test_close_already_closed_socket(self): self.tr._zmq_sock.closed = True self.tr.close() self.assertTrue(self.tr._closing) self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertIsNotNone(self.tr._protocol) self.assertIsNotNone(self.tr._zmq_sock) self.assertIsNotNone(self.tr._loop) self.assertFalse(self.sock.close.called) self.loop.run_until_complete(dummy()) self.proto.connection_lost.assert_called_with(None) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.assertFalse(self.sock.close.called) def test_close_with_waiting_buffer(self): self.tr._buffer = deque([(b"data",)]) self.tr._buffer_size = 4 self.loop.add_writer(self.sock, self.tr._write_ready) self.tr.close() self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertEqual(0, self.loop.remove_writer_count[self.sock]) self.assertEqual([(b"data",)], list(self.tr._buffer)) self.assertEqual(4, self.tr._buffer_size) self.assertTrue(self.tr._closing) self.assertIsNotNone(self.tr._protocol) self.assertIsNotNone(self.tr._zmq_sock) self.assertIsNotNone(self.tr._loop) self.assertFalse(self.sock.close.called) self.loop.run_until_complete(dummy()) self.assertIsNotNone(self.tr._protocol) self.assertIsNotNone(self.tr._zmq_sock) self.assertIsNotNone(self.tr._loop) self.assertFalse(self.sock.close.called) self.assertFalse(self.proto.connection_lost.called) def test_double_closing(self): self.tr.close() self.tr.close() self.assertEqual(1, self.loop.remove_reader_count[self.sock]) def test_close_on_last__write_ready(self): self.tr._buffer = deque([(4, (b"data",))]) self.tr._buffer_size = 4 self.loop.add_writer(self.sock, self.tr._write_ready) self.tr.close() self.tr._write_ready() self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.proto.connection_lost.assert_called_with(None) self.sock.close.assert_called_with() def test_close_paused(self): self.tr.pause_reading() self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.tr.close() self.assertTrue(self.tr._closing) self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertIsNotNone(self.tr._protocol) self.assertIsNotNone(self.tr._zmq_sock) self.assertIsNotNone(self.tr._loop) self.assertFalse(self.sock.close.called) self.loop.run_until_complete(dummy()) self.proto.connection_lost.assert_called_with(None) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.sock.close.assert_called_with() def test_write_eof(self): self.assertFalse(self.tr.can_write_eof()) def test_dns_address(self): async def go(): with self.assertRaises(ValueError): await self.tr.connect("tcp://example.com:8080") def test_write_none(self): self.tr.write(None) self.assertFalse(self.sock.send_multipart.called) def test_write_noniterable(self): self.assertRaises(TypeError, self.tr.write, 1) self.assertFalse(self.sock.send_multipart.called) def test_write_nonbytes(self): self.assertRaises(TypeError, self.tr.write, [1]) self.assertFalse(self.sock.send_multipart.called) def test_abort_with_empty_buffer(self): self.tr.abort() self.assertTrue(self.tr._closing) self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertIsNotNone(self.tr._protocol) self.assertIsNotNone(self.tr._zmq_sock) self.assertIsNotNone(self.tr._loop) self.assertFalse(self.sock.close.called) self.loop.run_until_complete(dummy()) self.proto.connection_lost.assert_called_with(None) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.sock.close.assert_called_with() def test_abort_with_waiting_buffer(self): self.tr._buffer = deque([(b"data",)]) self.tr._buffer_size = 4 self.loop.add_writer(self.sock, self.tr._write_ready) self.tr.abort() self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertEqual(1, self.loop.remove_writer_count[self.sock]) self.assertEqual([], list(self.tr._buffer)) self.assertEqual(0, self.tr._buffer_size) self.assertTrue(self.tr._closing) self.loop.run_until_complete(dummy()) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.assertTrue(self.proto.connection_lost.called) self.assertTrue(self.sock.close.called) def test_abort_with_close_on_waiting_buffer(self): self.tr._buffer = deque([(b"data",)]) self.tr._buffer_size = 4 self.loop.add_writer(self.sock, self.tr._write_ready) self.tr.close() self.tr.abort() self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertEqual(1, self.loop.remove_writer_count[self.sock]) self.assertEqual([], list(self.tr._buffer)) self.assertEqual(0, self.tr._buffer_size) self.assertTrue(self.tr._closing) self.loop.run_until_complete(dummy()) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.assertTrue(self.proto.connection_lost.called) self.assertTrue(self.sock.close.called) def test_abort_paused(self): self.tr.pause_reading() self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.tr.abort() self.assertTrue(self.tr._closing) self.assertEqual(1, self.loop.remove_reader_count[self.sock]) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertIsNotNone(self.tr._protocol) self.assertIsNotNone(self.tr._zmq_sock) self.assertIsNotNone(self.tr._loop) self.assertFalse(self.sock.close.called) self.loop.run_until_complete(dummy()) self.proto.connection_lost.assert_called_with(None) self.assertIsNone(self.tr._protocol) self.assertIsNone(self.tr._zmq_sock) self.assertIsNone(self.tr._loop) self.sock.close.assert_called_with() def test__read_ready_got_EAGAIN(self): self.sock.recv_multipart.side_effect = zmq.ZMQError(errno.EAGAIN) self.tr._fatal_error = mock.Mock() self.tr._read_ready() self.assertFalse(self.tr._fatal_error.called) self.assertFalse(self.proto.msg_received.called) def test__read_ready_got_fatal_error(self): self.sock.recv_multipart.side_effect = zmq.ZMQError(errno.EINVAL) self.tr._fatal_error = mock.Mock() self.tr._read_ready() self.assertFalse(self.proto.msg_received.called) exc = self.tr._fatal_error.call_args[0][0] self.assertIsInstance(exc, OSError) self.assertEqual(exc.errno, errno.EINVAL) def test_setsockopt_EINTR(self): self.sock.setsockopt.side_effect = [zmq.ZMQError(errno.EINTR), None] self.assertIsNone(self.tr.setsockopt("opt", "val")) self.assertEqual( [mock.call("opt", "val"), mock.call("opt", "val")], self.sock.setsockopt.call_args_list, ) def test_getsockopt_EINTR(self): self.sock.getsockopt.side_effect = [zmq.ZMQError(errno.EINTR), "val"] self.assertEqual("val", self.tr.getsockopt("opt")) self.assertEqual( [mock.call("opt"), mock.call("opt")], self.sock.getsockopt.call_args_list ) def test_write_EAGAIN(self): self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EAGAIN) self.tr.write((b"a", b"b")) self.sock.send_multipart.assert_called_once_with((b"a", b"b"), zmq.DONTWAIT) self.assertFalse(self.proto.pause_writing.called) self.assertEqual([(2, (b"a", b"b"))], list(self.tr._buffer)) self.assertEqual(2, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.loop.assert_writer(self.sock, self.tr._write_ready) def test_write_EINTR(self): self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EINTR) self.tr.write((b"a", b"b")) self.sock.send_multipart.assert_called_once_with((b"a", b"b"), zmq.DONTWAIT) self.assertFalse(self.proto.pause_writing.called) self.assertEqual([(2, (b"a", b"b"))], list(self.tr._buffer)) self.assertEqual(2, self.tr._buffer_size) self.assertFalse(self.exc_handler.called) self.loop.assert_writer(self.sock, self.tr._write_ready) def test_write_common_error(self): self.sock.send_multipart.side_effect = zmq.ZMQError(errno.ENOTSUP) self.tr.write((b"a", b"b")) self.sock.send_multipart.assert_called_once_with((b"a", b"b"), zmq.DONTWAIT) self.assertFalse(self.proto.pause_writing.called) self.assertFalse(self.tr._buffer) self.assertEqual(0, self.tr._buffer_size) self.assertNotIn(self.sock, self.loop.writers) check_errno(errno.ENOTSUP, self.exc_handler.call_args[0][1]["exception"]) def test_subscribe_invalid_socket_type(self): self.tr._zmq_type = zmq.PUB self.assertRaises(NotImplementedError, self.tr.subscribe, b"a") self.assertRaises(NotImplementedError, self.tr.unsubscribe, b"a") self.assertRaises(NotImplementedError, self.tr.subscriptions) def test_double_subscribe(self): self.tr.subscribe(b"val") self.tr.subscribe(b"val") self.assertEqual({b"val"}, self.tr.subscriptions()) self.sock.setsockopt.assert_called_once_with(zmq.SUBSCRIBE, b"val") def test_subscribe_bad_value_type(self): self.assertRaises(TypeError, self.tr.subscribe, "a") self.assertFalse(self.tr.subscriptions()) self.assertRaises(TypeError, self.tr.unsubscribe, "a") self.assertFalse(self.sock.setsockopt.called) self.assertFalse(self.tr.subscriptions()) def test_unsubscribe(self): self.tr.subscribe(b"val") self.tr.unsubscribe(b"val") self.assertFalse(self.tr.subscriptions()) self.sock.setsockopt.assert_called_with(zmq.UNSUBSCRIBE, b"val") def test__set_write_buffer_limits1(self): self.tr.set_write_buffer_limits(low=10) self.assertEqual(10, self.tr._low_water) self.assertEqual(40, self.tr._high_water) def test__set_write_buffer_limits2(self): self.tr.set_write_buffer_limits(high=60) self.assertEqual(15, self.tr._low_water) self.assertEqual(60, self.tr._high_water) def test__set_write_buffer_limits3(self): with self.assertRaises(ValueError): self.tr.set_write_buffer_limits(high=1, low=2) def test_get_write_buffer_limits(self): self.tr.set_write_buffer_limits(low=128, high=256) self.assertEqual((128, 256), self.tr.get_write_buffer_limits()) def test__maybe_pause_protocol(self): self.tr.set_write_buffer_limits(high=10) self.assertFalse(self.tr._protocol_paused) self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EAGAIN) self.tr.write([b"binary data"]) self.assertEqual(11, self.tr._buffer_size) self.assertTrue(self.tr._protocol_paused) self.proto.pause_writing.assert_called_with() def test__maybe_pause_protocol_err(self): self.tr.set_write_buffer_limits(high=10) ceh = self.loop.call_exception_handler = mock.Mock() self.assertFalse(self.tr._protocol_paused) self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EAGAIN) self.proto.pause_writing.side_effect = exc = RuntimeError() self.tr.write([b"binary data"]) self.assertEqual(11, self.tr._buffer_size) self.assertTrue(self.tr._protocol_paused) ceh.assert_called_with( { "transport": self.tr, "exception": exc, "protocol": self.proto, "message": "protocol.pause_writing() failed", } ) def test__maybe_pause_protocol_already_paused(self): self.tr.set_write_buffer_limits(high=10) self.tr._protocol_paused = True self.sock.send_multipart.side_effect = zmq.ZMQError(errno.EAGAIN) self.tr.write([b"binary data"]) self.assertEqual(11, self.tr._buffer_size) self.assertTrue(self.tr._protocol_paused) self.assertFalse(self.proto.pause_writing.called) def test__maybe_resume_protocol(self): self.tr.set_write_buffer_limits() self.tr._protocol_paused = True self.tr._buffer_size = 11 self.tr._buffer.append((11, [b"binary data"])) self.tr._write_ready() self.assertEqual(0, self.tr._buffer_size) self.assertFalse(self.tr._buffer) self.assertFalse(self.tr._protocol_paused) self.proto.resume_writing.assert_called_with() def test__maybe_resume_protocol_err(self): self.tr.set_write_buffer_limits() self.tr._protocol_paused = True self.tr._buffer_size = 11 self.tr._buffer.append((11, [b"binary data"])) ceh = self.loop.call_exception_handler = mock.Mock() self.proto.resume_writing.side_effect = exc = RuntimeError() self.tr._write_ready() self.assertEqual(0, self.tr._buffer_size) self.assertFalse(self.tr._buffer) self.assertFalse(self.tr._protocol_paused) ceh.assert_called_with( { "transport": self.tr, "exception": exc, "protocol": self.proto, "message": "protocol.resume_writing() failed", } ) def test_pause_resume_reading(self): self.assertFalse(self.tr._paused) self.loop.assert_reader(self.sock, self.tr._read_ready) self.tr.pause_reading() self.assertTrue(self.tr._paused) self.assertNotIn(self.sock, self.loop.readers) self.tr.resume_reading() self.assertFalse(self.tr._paused) self.loop.assert_reader(self.sock, self.tr._read_ready) def test_pause_closing(self): self.tr.close() with self.assertRaises(RuntimeError): self.tr.pause_reading() def test_pause_paused(self): self.tr.pause_reading() with self.assertRaises(RuntimeError): self.tr.pause_reading() def test_resume_not_paused(self): with self.assertRaises(RuntimeError): self.tr.resume_reading() def test_resume_closed(self): self.assertIn(self.sock, self.loop.readers) self.tr.pause_reading() self.tr.close() self.tr.resume_reading() self.assertNotIn(self.sock, self.loop.readers) def test_conn_lost_on_force_close(self): self.tr._conn_lost = 1 self.tr._force_close(RuntimeError()) self.assertEqual(1, self.tr._conn_lost) class LooplessTransportTests(unittest.TestCase): def setUp(self): self.loop = TestLoop() asyncio.set_event_loop(self.loop) self.sock = mock.Mock() self.sock.closed = False self.waiter = asyncio.Future() self.proto = make_test_protocol(aiozmq.ZmqProtocol) self.tr = _ZmqLooplessTransportImpl( self.loop, zmq.SUB, self.sock, self.proto, self.waiter ) self.exc_handler = mock.Mock() self.loop.set_exception_handler(self.exc_handler) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) def test_incomplete_read(self): self.sock.recv_multipart.side_effect = zmq.Again(errno.EAGAIN) self.tr._do_read() self.assertFalse(self.tr._closing) self.assertFalse(self.proto.msg_received.called) def test_bad_read(self): self.sock.recv_multipart.side_effect = zmq.ZMQError(errno.ENOTSOCK) self.tr._do_read() self.assertTrue(self.tr._closing) self.assertFalse(self.proto.msg_received.called) def test_pending_write_without_buffer(self): self.assertFalse(self.tr._buffer) self.tr._do_write() self.assertFalse(self.sock.send_multipart.called) def test_incomplete_pending_write(self): self.tr._buffer = [(4, [b"data"])] self.tr._buffer_size = 4 self.sock.send_multipart.side_effect = zmq.Again(errno.EAGAIN) self.tr._do_write() self.assertEqual(4, self.tr._buffer_size) self.assertEqual([(4, [b"data"])], self.tr._buffer) def test_bad_pending_write(self): self.tr._buffer = [(4, [b"data"])] self.tr._buffer_size = 4 self.sock.send_multipart.side_effect = zmq.ZMQError(errno.ENOTSOCK) self.tr._do_write() self.assertEqual(0, self.tr._buffer_size) self.assertEqual([], self.tr._buffer) self.assertTrue(self.tr._closing) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/version_test.py0000644000076600000240000000166000000000000016233 0ustar00jellestaffimport unittest from aiozmq import _parse_version class VersionTests(unittest.TestCase): def test_alpha(self): self.assertEqual((0, 1, 2, "alpha", 2), _parse_version("0.1.2a2")) self.assertEqual((1, 2, 3, "alpha", 0), _parse_version("1.2.3a")) def test_beta(self): self.assertEqual((0, 1, 2, "beta", 2), _parse_version("0.1.2b2")) self.assertEqual((0, 1, 2, "beta", 0), _parse_version("0.1.2b")) def test_rc(self): self.assertEqual((0, 1, 2, "candidate", 5), _parse_version("0.1.2c5")) self.assertEqual((0, 1, 2, "candidate", 0), _parse_version("0.1.2c")) def test_final(self): self.assertEqual((0, 1, 2, "final", 0), _parse_version("0.1.2")) def test_invalid(self): self.assertRaises(ImportError, _parse_version, "0.1") self.assertRaises(ImportError, _parse_version, "0.1.1.2") self.assertRaises(ImportError, _parse_version, "0.1.1z2") ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/zmq_events_test.py0000644000076600000240000006734600000000000016756 0ustar00jellestaffimport asyncio import errno import os import sys import unittest from unittest import mock import aiozmq import zmq from aiozmq._test_util import check_errno, find_unused_port class Protocol(aiozmq.ZmqProtocol): def __init__(self, loop): self.transport = None self.connected = asyncio.Future() self.closed = asyncio.Future() self.state = "INITIAL" self.received = asyncio.Queue() self.paused = False def connection_made(self, transport): self.transport = transport assert self.state == "INITIAL", self.state self.state = "CONNECTED" self.connected.set_result(None) def connection_lost(self, exc): assert self.state == "CONNECTED", self.state self.state = "CLOSED" self.transport = None self.closed.set_result(None) def pause_writing(self): self.paused = True def resume_writing(self): self.paused = False def msg_received(self, data): assert isinstance(data, list), data assert self.state == "CONNECTED", self.state self.received.put_nowait(data) class BaseZmqEventLoopTestsMixin: async def make_dealer_router(self): port = find_unused_port() tr1, pr1 = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port), ) self.assertEqual("CONNECTED", pr1.state) await pr1.connected tr2, pr2 = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.ROUTER, connect="tcp://127.0.0.1:{}".format(port), ) self.assertEqual("CONNECTED", pr2.state) await pr2.connected return tr1, pr1, tr2, pr2 async def make_pub_sub(self): port = find_unused_port() tr1, pr1 = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.PUB, bind="tcp://127.0.0.1:{}".format(port), ) self.assertEqual("CONNECTED", pr1.state) await pr1.connected tr2, pr2 = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, connect="tcp://127.0.0.1:{}".format(port), ) self.assertEqual("CONNECTED", pr2.state) await pr2.connected return tr1, pr1, tr2, pr2 def test_req_rep(self): async def connect_req(): tr1, pr1 = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="inproc://test", ) self.assertEqual("CONNECTED", pr1.state) await pr1.connected return tr1, pr1 tr1, pr1 = self.loop.run_until_complete(connect_req()) async def connect_rep(): tr2, pr2 = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REP, connect="inproc://test", ) self.assertEqual("CONNECTED", pr2.state) await pr2.connected return tr2, pr2 tr2, pr2 = self.loop.run_until_complete(connect_rep()) # Without this, this test hangs for some reason. tr2._zmq_sock.getsockopt(zmq.EVENTS) async def communicate(): tr1.write([b"request"]) request = await pr2.received.get() self.assertEqual([b"request"], request) tr2.write([b"answer"]) answer = await pr1.received.get() self.assertEqual([b"answer"], answer) self.loop.run_until_complete(communicate()) async def closing(): tr1.close() tr2.close() await pr1.closed self.assertEqual("CLOSED", pr1.state) await pr2.closed self.assertEqual("CLOSED", pr2.state) self.loop.run_until_complete(closing()) def test_pub_sub(self): async def go(): tr1, pr1, tr2, pr2 = await self.make_pub_sub() tr2.setsockopt(zmq.SUBSCRIBE, b"node_id") for i in range(5): tr1.write([b"node_id", b"publish"]) try: request = await asyncio.wait_for(pr2.received.get(), 0.1) self.assertEqual([b"node_id", b"publish"], request) break except asyncio.TimeoutError: pass else: raise AssertionError("Cannot get message in subscriber") tr1.close() tr2.close() await pr1.closed self.assertEqual("CLOSED", pr1.state) await pr2.closed self.assertEqual("CLOSED", pr2.state) self.loop.run_until_complete(go()) def test_getsockopt(self): port = find_unused_port() async def coro(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port), ) await pr.connected self.assertEqual(zmq.DEALER, tr.getsockopt(zmq.TYPE)) return tr, pr self.loop.run_until_complete(coro()) def test_dealer_router(self): async def go(): tr1, pr1, tr2, pr2 = await self.make_dealer_router() tr1.write([b"request"]) request = await pr2.received.get() self.assertEqual([mock.ANY, b"request"], request) tr2.write([request[0], b"answer"]) answer = await pr1.received.get() self.assertEqual([b"answer"], answer) tr1.close() tr2.close() await pr1.closed self.assertEqual("CLOSED", pr1.state) await pr2.closed self.assertEqual("CLOSED", pr2.state) self.loop.run_until_complete(go()) def test_binds(self): port1 = find_unused_port() port2 = find_unused_port() addr1 = "tcp://127.0.0.1:{}".format(port1) addr2 = "tcp://127.0.0.1:{}".format(port2) async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind=[addr1, addr2], ) await pr.connected self.assertEqual({addr1, addr2}, tr.bindings()) addr3 = await tr.bind("tcp://127.0.0.1:*") self.assertEqual({addr1, addr2, addr3}, tr.bindings()) await tr.unbind(addr2) self.assertEqual({addr1, addr3}, tr.bindings()) self.assertIn(addr1, tr.bindings()) self.assertRegex( repr(tr.bindings()), r"{tcp://127.0.0.1:.\d+, tcp://127.0.0.1:\d+}" ) tr.close() self.loop.run_until_complete(connect()) def test_connects(self): port1 = find_unused_port() port2 = find_unused_port() port3 = find_unused_port() addr1 = "tcp://127.0.0.1:{}".format(port1) addr2 = "tcp://127.0.0.1:{}".format(port2) addr3 = "tcp://127.0.0.1:{}".format(port3) async def go(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, connect=[addr1, addr2], ) await pr.connected self.assertEqual({addr1, addr2}, tr.connections()) await tr.connect(addr3) self.assertEqual({addr1, addr3, addr2}, tr.connections()) await tr.disconnect(addr1) self.assertEqual({addr2, addr3}, tr.connections()) tr.close() self.loop.run_until_complete(go()) def test_zmq_socket(self): zmq_sock = zmq.Context.instance().socket(zmq.PUB) async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.PUB, zmq_sock=zmq_sock ) await pr.connected return tr, pr tr, pr = self.loop.run_until_complete(connect()) self.assertIs(zmq_sock, tr._zmq_sock) self.assertFalse(zmq_sock.closed) tr.close() def test_zmq_socket_invalid_type(self): zmq_sock = zmq.Context.instance().socket(zmq.PUB) async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, zmq_sock=zmq_sock ) await pr.connected return tr, pr with self.assertRaises(ValueError): self.loop.run_until_complete(connect()) self.assertFalse(zmq_sock.closed) def test_create_zmq_connection_ZMQError(self): zmq_sock = zmq.Context.instance().socket(zmq.PUB) zmq_sock.close() async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, zmq_sock=zmq_sock ) await pr.connected return tr, pr with self.assertRaises(OSError) as ctx: self.loop.run_until_complete(connect()) self.assertIn(ctx.exception.errno, (zmq.ENOTSUP, zmq.ENOTSOCK)) def test_create_zmq_connection_invalid_bind(self): async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, bind=2 ) with self.assertRaises(ValueError): self.loop.run_until_complete(connect()) def test_create_zmq_connection_invalid_connect(self): async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, connect=2 ) with self.assertRaises(ValueError): self.loop.run_until_complete(connect()) @unittest.skipIf(sys.platform == "win32", "Windows calls abort() on bad socket") def test_create_zmq_connection_closes_socket_on_bad_bind(self): async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, bind="badaddr" ) await pr.connected return tr, pr with self.assertRaises(OSError): self.loop.run_until_complete(connect()) @unittest.skipIf(sys.platform == "win32", "Windows calls abort() on bad socket") def test_create_zmq_connection_closes_socket_on_bad_connect(self): async def connect(): with self.assertRaises(OSError): await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, connect="badaddr", ) self.loop.run_until_complete(connect()) def test_create_zmq_connection_dns_in_connect(self): port = find_unused_port() async def connect(): addr = "tcp://localhost:{}".format(port) tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, connect=addr ) await pr.connected self.assertEqual({addr}, tr.connections()) tr.close() self.loop.run_until_complete(connect()) def test_getsockopt_badopt(self): port = find_unused_port() async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, connect="tcp://127.0.0.1:{}".format(port), ) await pr.connected return tr, pr tr, pr = self.loop.run_until_complete(connect()) with self.assertRaises(OSError) as ctx: tr.getsockopt(1111) # invalid option self.assertEqual(zmq.EINVAL, ctx.exception.errno) def test_setsockopt_badopt(self): port = find_unused_port() async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, connect="tcp://127.0.0.1:{}".format(port), ) await pr.connected return tr, pr tr, pr = self.loop.run_until_complete(connect()) with self.assertRaises(OSError) as ctx: tr.setsockopt(1111, 1) # invalid option self.assertEqual(zmq.EINVAL, ctx.exception.errno) def test_unbind_from_nonbinded_addr(self): port = find_unused_port() addr = "tcp://127.0.0.1:{}".format(port) async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, bind=addr ) await pr.connected self.assertEqual({addr}, tr.bindings()) with self.assertRaises(OSError) as ctx: await tr.unbind("ipc:///some-addr") # non-bound addr # TODO: check travis build and remove skip when test passed. if ctx.exception.errno == zmq.EAGAIN and os.environ.get("TRAVIS"): raise unittest.SkipTest( "Travis has a bug, it returns " "EAGAIN for unknown endpoint" ) self.assertIn(ctx.exception.errno, (errno.ENOENT, zmq.EPROTONOSUPPORT)) self.assertEqual({addr}, tr.bindings()) self.loop.run_until_complete(connect()) def test_disconnect_from_nonbinded_addr(self): port = find_unused_port() addr = "tcp://127.0.0.1:{}".format(port) async def go(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, connect=addr ) await pr.connected self.assertEqual({addr}, tr.connections()) with self.assertRaises(OSError) as ctx: await tr.disconnect("ipc:///some-addr") # non-bound addr # TODO: check travis build and remove skip when test passed. if ctx.exception.errno == zmq.EAGAIN and os.environ.get("TRAVIS"): raise unittest.SkipTest( "Travis has a bug, it returns " "EAGAIN for unknown endpoint" ) self.assertIn(ctx.exception.errno, (errno.ENOENT, zmq.EPROTONOSUPPORT)) self.assertEqual({addr}, tr.connections()) self.loop.run_until_complete(go()) def test_subscriptions_of_invalid_socket(self): async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.PUSH, bind="tcp://127.0.0.1:*", ) await pr.connected return tr, pr tr, pr = self.loop.run_until_complete(connect()) self.assertRaises(NotImplementedError, tr.subscribe, b"a") self.assertRaises(NotImplementedError, tr.unsubscribe, b"a") self.assertRaises(NotImplementedError, tr.subscriptions) def test_double_subscribe(self): async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, bind="tcp://127.0.0.1:*", ) await pr.connected return tr, pr tr, pr = self.loop.run_until_complete(connect()) tr.subscribe(b"val") self.assertEqual({b"val"}, tr.subscriptions()) tr.subscribe(b"val") self.assertEqual({b"val"}, tr.subscriptions()) def test_double_unsubscribe(self): async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, bind="tcp://127.0.0.1:*", ) await pr.connected return tr, pr try: tr, pr = self.loop.run_until_complete(connect()) tr.subscribe(b"val") self.assertEqual({b"val"}, tr.subscriptions()) tr.unsubscribe(b"val") self.assertFalse(tr.subscriptions()) tr.unsubscribe(b"val") self.assertFalse(tr.subscriptions()) except OSError as exc: if exc.errno == errno.ENOTSOCK: # I'm sad but ZMQ sometimes throws that error raise unittest.SkipTest("Malformed answer") def test_unsubscribe_unknown_filter(self): async def connect(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.SUB, bind="tcp://127.0.0.1:*", ) await pr.connected return tr, pr tr, pr = self.loop.run_until_complete(connect()) tr.unsubscribe(b"val") self.assertFalse(tr.subscriptions()) tr.unsubscribe(b"val") self.assertFalse(tr.subscriptions()) def test_endpoint_is_not_a_str(self): async def go(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.PUSH, bind="tcp://127.0.0.1:*", ) await pr.connected with self.assertRaises(TypeError): await tr.bind(123) with self.assertRaises(TypeError): await tr.unbind(123) with self.assertRaises(TypeError): await tr.connect(123) with self.assertRaises(TypeError): await tr.disconnect(123) self.loop.run_until_complete(go()) def test_transfer_big_data(self): async def go(): tr1, pr1, tr2, pr2 = await self.make_dealer_router() start = 65 cnt = 26 data = [chr(i).encode("ascii") * 1000 for i in range(start, start + cnt)] for i in range(2000): tr1.write(data) request = await pr2.received.get() self.assertEqual([mock.ANY] + data, request) tr1.close() tr2.close() self.loop.run_until_complete(go()) def test_transfer_big_data_send_after_closing(self): async def go(): tr1, pr1, tr2, pr2 = await self.make_dealer_router() start = 65 cnt = 26 data = [chr(i).encode("ascii") * 1000 for i in range(start, start + cnt)] self.assertFalse(pr1.paused) for i in range(10000): tr1.write(data) self.assertTrue(tr1._buffer) self.assertTrue(pr1.paused) tr1.close() for i in range(10000): request = await pr2.received.get() self.assertEqual([mock.ANY] + data, request) tr2.close() self.loop.run_until_complete(go()) def test_default_event_loop(self): asyncio.set_event_loop(self.loop) port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) self.assertIs(self.loop, tr1._loop) tr1.close() def test_close_closing(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) tr1.close() self.assertTrue(tr1._closing) tr1.close() self.assertTrue(tr1._closing) def test_pause_reading(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) self.assertFalse(tr1._paused) tr1.pause_reading() self.assertTrue(tr1._paused) tr1.resume_reading() self.assertFalse(tr1._paused) tr1.close() def test_pause_reading_closed(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) tr1.close() with self.assertRaises(RuntimeError): tr1.pause_reading() def test_pause_reading_paused(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) tr1.pause_reading() self.assertTrue(tr1._paused) with self.assertRaises(RuntimeError): tr1.pause_reading() tr1.close() def test_resume_reading_not_paused(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) with self.assertRaises(RuntimeError): tr1.resume_reading() tr1.close() @mock.patch("aiozmq.core.logger") def test_warning_on_connection_lost(self, m_log): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) self.assertEqual(0, tr1._conn_lost) tr1.LOG_THRESHOLD_FOR_CONNLOST_WRITES = 2 tr1.close() self.assertEqual(1, tr1._conn_lost) tr1.write([b"data"]) self.assertEqual(2, tr1._conn_lost) self.assertFalse(m_log.warning.called) tr1.write([b"data"]) self.assertEqual(3, tr1._conn_lost) m_log.warning.assert_called_with("write to closed ZMQ socket.") def test_close_on_error(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) handler = mock.Mock() self.loop.set_exception_handler(handler) sock = tr1.get_extra_info("zmq_socket") sock.close() tr1.write([b"data"]) self.assertTrue(tr1._closing) handler.assert_called_with( self.loop, { "protocol": pr1, "exception": mock.ANY, "transport": tr1, "message": "Fatal write error on zmq socket transport", }, ) # expecting 'Socket operation on non-socket' if sys.platform == "darwin": errno = 38 else: errno = 88 check_errno(errno, handler.call_args[0][1]["exception"]) def test_double_force_close(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) handler = mock.Mock() self.loop.set_exception_handler(handler) err = RuntimeError("error") tr1._fatal_error(err) tr1._fatal_error(err) self.loop.run_until_complete(pr1.closed) def test___repr__(self): port = find_unused_port() async def coro(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port), ) await pr.connected self.assertRegex( repr(tr), "]+> " "type=DEALER read=idle write=>", ) tr.close() self.loop.run_until_complete(coro()) def test_extra_zmq_type(self): port = find_unused_port() async def coro(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port), ) await pr.connected self.assertEqual(zmq.DEALER, tr.get_extra_info("zmq_type")) tr.close() self.loop.run_until_complete(coro()) @unittest.skipIf( zmq.zmq_version_info() < (4,) or zmq.pyzmq_version_info() < ( 14, 4, ), "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4", ) def test_implicit_monitor_disable(self): async def go(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER ) await pr.connected await tr.enable_monitor() tr.close() await pr.closed self.assertIsNone(tr._monitor) self.loop.run_until_complete(go()) @unittest.skipIf( zmq.zmq_version_info() < (4,) or zmq.pyzmq_version_info() < ( 14, 4, ), "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4", ) def test_force_close_monitor(self): async def go(): tr, pr = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER ) await pr.connected await tr.enable_monitor() tr.abort() await pr.closed self.assertIsNone(tr._monitor) self.loop.run_until_complete(go()) class ZmqEventLoopTests(BaseZmqEventLoopTestsMixin, unittest.TestCase): def setUp(self): self.loop = aiozmq.ZmqEventLoop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() class ZmqLooplessTests(BaseZmqEventLoopTestsMixin, unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) # zmq.Context.instance().term() def test_unsubscribe_from_fd_on_error(self): port = find_unused_port() tr1, pr1 = self.loop.run_until_complete( aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.REQ, bind="tcp://127.0.0.1:{}".format(port), ) ) handler = mock.Mock() self.loop.set_exception_handler(handler) sock = tr1.get_extra_info("zmq_socket") sock.close() tr1.write([b"data"]) with self.assertRaises(KeyError): self.loop._selector.get_key(tr1._fd) class ZmqEventLoopExternalContextTests(unittest.TestCase): def setUp(self): self.ctx = zmq.Context() self.loop = aiozmq.ZmqEventLoop(zmq_context=self.ctx) asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) self.ctx.term() def test_using_external_zmq_context(self): port = find_unused_port() async def go(): st, sp = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.ROUTER, bind="tcp://127.0.0.1:{}".format(port), ) await sp.connected addr = list(st.bindings())[0] ct, cp = await aiozmq.create_zmq_connection( lambda: Protocol(self.loop), zmq.DEALER, connect=addr ) await cp.connected ct.close() await cp.closed st.close() await sp.closed self.loop.run_until_complete(go()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1661917169.0 aiozmq-1.0.0/tests/zmq_stream_test.py0000644000076600000240000003725100000000000016735 0ustar00jellestaffimport unittest import asyncio import aiozmq import zmq from unittest import mock from aiozmq.core import SocketEvent from aiozmq._test_util import check_errno, find_unused_port from aiozmq.rpc.base import ensure_future ZMQ_EVENTS = [getattr(zmq, attr) for attr in dir(zmq) if attr.startswith("EVENT_")] class ZmqStreamTests(unittest.TestCase): def setUp(self): self.loop = aiozmq.ZmqEventLoop() asyncio.set_event_loop(None) def tearDown(self): self.loop.close() def test_req_rep(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) s2 = await aiozmq.create_zmq_stream( zmq.ROUTER, connect="tcp://127.0.0.1:{}".format(port) ) s1.write([b"request"]) req = await s2.read() self.assertEqual([mock.ANY, b"request"], req) s2.write([req[0], b"answer"]) answer = await s1.read() self.assertEqual([b"answer"], answer) self.loop.run_until_complete(go()) def test_closed(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) s2 = await aiozmq.create_zmq_stream( zmq.ROUTER, connect="tcp://127.0.0.1:{}".format(port) ) self.assertFalse(s2.at_closing()) s2.close() s1.write([b"request"]) with self.assertRaises(aiozmq.ZmqStreamClosed): await s2.read() self.assertTrue(s2.at_closing()) self.loop.run_until_complete(go()) def test_transport(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") self.assertIsInstance(s1.transport, aiozmq.ZmqTransport) s1.close() with self.assertRaises(aiozmq.ZmqStreamClosed): await s1.read() self.assertIsNone(s1.transport) self.loop.run_until_complete(go()) def test_get_extra_info(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") self.assertIsInstance(s1.get_extra_info("zmq_socket"), zmq.Socket) self.loop.run_until_complete(go()) def test_exception(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") self.assertIsNone(s1.exception()) self.loop.run_until_complete(go()) def test_default_loop(self): asyncio.set_event_loop(self.loop) async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") s1.close() self.loop.run_until_complete(go()) def test_set_read_buffer_limits1(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") s1.set_read_buffer_limits(low=10) self.assertEqual(10, s1._low_water) self.assertEqual(40, s1._high_water) s1.close() self.loop.run_until_complete(go()) def test_set_read_buffer_limits2(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") s1.set_read_buffer_limits(high=60) self.assertEqual(15, s1._low_water) self.assertEqual(60, s1._high_water) s1.close() self.loop.run_until_complete(go()) def test_set_read_buffer_limits3(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") with self.assertRaises(ValueError): s1.set_read_buffer_limits(high=1, low=2) s1.close() self.loop.run_until_complete(go()) def test_pause_reading(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) s2 = await aiozmq.create_zmq_stream( zmq.ROUTER, connect="tcp://127.0.0.1:{}".format(port) ) s2.set_read_buffer_limits(high=5) s1.write([b"request"]) await asyncio.sleep(0.01) self.assertTrue(s2._paused) msg = await s2.read() self.assertEqual([mock.ANY, b"request"], msg) self.assertFalse(s2._paused) self.loop.run_until_complete(go()) def test_set_exception(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") exc = RuntimeError("some exc") s1.set_exception(exc) self.assertIs(exc, s1.exception()) with self.assertRaisesRegex(RuntimeError, "some exc"): await s1.read() self.loop.run_until_complete(go()) def test_set_exception_with_waiter(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") async def f(): await s1.read() t1 = ensure_future(f()) # to run f() up to await await asyncio.sleep(0.001) self.assertIsNotNone(s1._waiter) exc = RuntimeError("some exc") s1.set_exception(exc) self.assertIs(exc, s1.exception()) with self.assertRaisesRegex(RuntimeError, "some exc"): await s1.read() t1.cancel() self.loop.run_until_complete(go()) def test_set_exception_with_cancelled_waiter(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") async def f(): await s1.read() t1 = ensure_future(f()) # to run f() up to await await asyncio.sleep(0.001) self.assertIsNotNone(s1._waiter) t1.cancel() exc = RuntimeError("some exc") s1.set_exception(exc) self.assertIs(exc, s1.exception()) with self.assertRaisesRegex(RuntimeError, "some exc"): await s1.read() self.loop.run_until_complete(go()) def test_double_reading(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") async def f(): await s1.read() t1 = ensure_future(f()) # to run f() up to await await asyncio.sleep(0.001) with self.assertRaises(RuntimeError): await s1.read() t1.cancel() self.loop.run_until_complete(go()) def test_close_on_reading(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) async def f(): await s1.read() t1 = ensure_future(f()) # to run f() up to await await asyncio.sleep(0.001) s1.close() await asyncio.sleep(0.001) with self.assertRaises(aiozmq.ZmqStreamClosed): t1.result() self.loop.run_until_complete(go()) def test_close_on_cancelled_reading(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) async def f(): await s1.read() t1 = ensure_future(f()) # to run f() up to await await asyncio.sleep(0.001) t1.cancel() s1.feed_closing() await asyncio.sleep(0.001) with self.assertRaises(asyncio.CancelledError): t1.result() self.loop.run_until_complete(go()) def test_feed_cancelled_msg(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) async def f(): await s1.read() t1 = ensure_future(f()) # to run f() up to await await asyncio.sleep(0.001) t1.cancel() s1.feed_msg([b"data"]) await asyncio.sleep(0.001) with self.assertRaises(asyncio.CancelledError): t1.result() self.assertEqual(4, s1._queue_len) self.assertEqual((4, [b"data"]), s1._queue.popleft()) self.loop.run_until_complete(go()) def test_error_on_read(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.REP, bind="tcp://127.0.0.1:{}".format(port) ) handler = mock.Mock() self.loop.set_exception_handler(handler) s1.write([b"data"]) with self.assertRaises(OSError) as ctx: await s1.read() check_errno(zmq.EFSM, ctx.exception) with self.assertRaises(OSError) as ctx2: await s1.drain() check_errno(zmq.EFSM, ctx2.exception) self.loop.run_until_complete(go()) def test_drain(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.REP, bind="tcp://127.0.0.1:{}".format(port) ) await s1.drain() self.loop.run_until_complete(go()) def test_pause_resume_connection(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) self.assertFalse(s1._paused) s1._protocol.pause_writing() self.assertTrue(s1._protocol._paused) s1._protocol.resume_writing() self.assertFalse(s1._protocol._paused) s1.close() self.loop.run_until_complete(go()) def test_resume_paused_with_drain(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) self.assertFalse(s1._paused) s1._protocol.pause_writing() async def f(): await s1.drain() fut = ensure_future(f()) await asyncio.sleep(0.01) self.assertTrue(s1._protocol._paused) s1._protocol.resume_writing() self.assertFalse(s1._protocol._paused) await fut s1.close() self.loop.run_until_complete(go()) def test_close_paused_connection(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) s1._protocol.pause_writing() s1.close() self.loop.run_until_complete(go()) def test_close_paused_with_drain(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) self.assertFalse(s1._paused) s1._protocol.pause_writing() async def f(): await s1.drain() fut = ensure_future(f()) await asyncio.sleep(0.01) s1.close() await fut self.loop.run_until_complete(go()) def test_drain_after_closing(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) s1.close() await asyncio.sleep(0) with self.assertRaises(ConnectionResetError): await s1.drain() self.loop.run_until_complete(go()) def test_exception_after_drain(self): port = find_unused_port() async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:{}".format(port) ) self.assertFalse(s1._paused) s1._protocol.pause_writing() async def f(): await s1.drain() fut = ensure_future(f()) await asyncio.sleep(0.01) exc = RuntimeError("exception") s1._protocol.connection_lost(exc) with self.assertRaises(RuntimeError) as cm: await fut self.assertIs(cm.exception, exc) self.loop.run_until_complete(go()) def test_double_read_of_closed_stream(self): port = find_unused_port() async def go(): s2 = await aiozmq.create_zmq_stream( zmq.ROUTER, connect="tcp://127.0.0.1:{}".format(port) ) self.assertFalse(s2.at_closing()) s2.close() with self.assertRaises(aiozmq.ZmqStreamClosed): await s2.read() self.assertTrue(s2.at_closing()) with self.assertRaises(aiozmq.ZmqStreamClosed): await s2.read() self.assertTrue(s2.at_closing()) self.loop.run_until_complete(go()) @unittest.skipIf( zmq.zmq_version_info() < (4,) or zmq.pyzmq_version_info() < ( 14, 4, ), "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4", ) def test_monitor(self): port = find_unused_port() async def go(): addr = "tcp://127.0.0.1:{}".format(port) s1 = await aiozmq.create_zmq_stream(zmq.ROUTER, bind=addr) async def f(s, events): try: while True: event = await s.read_event() events.append(event) except aiozmq.ZmqStreamClosed: pass s2 = await aiozmq.create_zmq_stream(zmq.DEALER) events = [] t = ensure_future(f(s2, events)) await s2.transport.enable_monitor() await s2.transport.connect(addr) await s2.transport.disconnect(addr) await s2.transport.connect(addr) s2.write([b"request"]) req = await s1.read() self.assertEqual([mock.ANY, b"request"], req) s1.write([req[0], b"answer"]) answer = await s2.read() self.assertEqual([b"answer"], answer) s2.close() s1.close() await t # Confirm that the events received by the monitor were valid. self.assertGreater(len(events), 0) while len(events): event = events.pop() self.assertIsInstance(event, SocketEvent) self.assertIn(event.event, ZMQ_EVENTS) self.loop.run_until_complete(go()) def test_default_events_backlog(self): async def go(): s1 = await aiozmq.create_zmq_stream(zmq.DEALER, bind="tcp://127.0.0.1:*") self.assertEqual(100, s1._event_queue.maxlen) self.loop.run_until_complete(go()) def test_custom_events_backlog(self): async def go(): s1 = await aiozmq.create_zmq_stream( zmq.DEALER, bind="tcp://127.0.0.1:*", events_backlog=1 ) self.assertEqual(1, s1._event_queue.maxlen) self.loop.run_until_complete(go()) if __name__ == "__main__": unittest.main()