pax_global_header00006660000000000000000000000064130150363370014513gustar00rootroot0000000000000052 comment=3cc5bfb1ff43fc968558d608934fac1034bf5941 pyzmq-16.0.2/000077500000000000000000000000001301503633700127615ustar00rootroot00000000000000pyzmq-16.0.2/.gitignore000066400000000000000000000004571301503633700147570ustar00rootroot00000000000000*.pyc zmq/backend/cython/*.c zmq/devices/*.c zmq/utils/*.json zmq/include/*.h __pycache__ build dist conf bundled *.egg-info *.so *.pyd *.dll *.dylib docs/source/api/generated docs/gh-pages setup.cfg MANIFEST .tox examples/security/public_keys examples/security/private_keys wheelhouse .coverage .cache pyzmq-16.0.2/.mailmap000066400000000000000000000013341301503633700144030ustar00rootroot00000000000000Brian E. Granger Brian Granger Chris Laws Chris Laws Daniel Lundin Daniel Lundin Min Ragan-Kelley Min RK Min Ragan-Kelley MinRK Michel Pelletier Michel Pelletier Nicholas Piël nicholas Felipe Cruz felipecruz Felipe Cruz Felipe cruz Yannick Hold Yannick Hold pyzmq-16.0.2/.travis.yml000066400000000000000000000026031301503633700150730ustar00rootroot00000000000000language: python cache: apt pip python: - 2.7 - 3.5 - pypy env: - ZMQ= - ZMQ=bundled before_install: - sudo add-apt-repository -y ppa:anton+/dnscrypt - sudo apt-get update - | if [[ $ZMQ != bundled ]]; then sudo apt-get install -y -qq libzmq3-dev libsodium-dev fi - | if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then pip install -q cython --install-option="--no-cython-compile" fi - | if [[ ! -z "$ZMQ" && $ZMQ != bundled ]]; then wget https://github.com/zeromq/$ZMQ/archive/master.zip -O libzmq.zip unzip libzmq.zip pushd "$ZMQ-master" ./autogen.sh ./configure make -j sudo make install sudo ldconfig popd export ZMQ=/usr/local fi - pip install -r test-requirements.txt install: - python setup.py build_ext --inplace --zmq=$ZMQ matrix: include: - python: 3.6-dev env: ZMQ=bundled - python: 3.5 env: ZMQ=libzmq - python: 3.4 env: ZMQ=zeromq4-x - python: 3.4 env: ZMQ=zeromq4-1 - python: 3.4 env: ZMQ=zeromq3-x - python: 3.3 env: ZMQ= # FIXME: pypy3 is still on unsupported Python 3.2 # - python: pypy3 # env: ZMQ= - python: nightly env: ZMQ= - python: nightly env: ZMQ=bundled allow_failures: - env: ZMQ=libzmq - python: nightly script: travis_retry python setup.py test pyzmq-16.0.2/AUTHORS.md000066400000000000000000000113111301503633700144250ustar00rootroot00000000000000## Authors This project was started and continues to be led by Brian E. Granger (ellisonbg AT gmail DOT com). Min Ragan-Kelley (benjaminrk AT gmail DOT com) is the primary developer of pyzmq at this time. The following people have contributed to the project: - Alexander Else (alexander DOT else AT team DOT telstra DOT com) - Alexander Pyhalov (apyhalov AT gmail DOT com) - Alexandr Emelin (frvzmb AT gmail DOT com) - Amr Ali (amr AT ledgerx DOT com) - Andre Caron (andre DOT l DOT caron AT gmail DOT com) - Andrea Crotti (andrea DOT crotti DOT 0 AT gmail DOT com) - Andrew Gwozdziewycz (git AT apgwoz DOT com) - Baptiste Lepilleur (baptiste DOT lepilleur AT gmail DOT com) - Brandyn A. White (bwhite AT dappervision DOT com) - Brian E. Granger (ellisonbg AT gmail DOT com) - Brian Hoffman (hoffman_brian AT bah DOT com) - Carlos A. Rocha (carlos DOT rocha AT gmail DOT com) - Chris Laws (clawsicus AT gmail DOT com) - Christian Wyglendowski (christian AT bu DOT mp) - Christoph Gohlke (cgohlke AT uci DOT edu) - Curtis (curtis AT tinbrain DOT net) - Cyril Holweck (cyril DOT holweck AT free DOT fr) - Dan Colish (dcolish AT gmail DOT com) - Daniel Lundin (dln AT eintr DOT org) - Daniel Truemper (truemped AT googlemail DOT com) - Douglas Creager (douglas DOT creager AT redjack DOT com) - Eduardo Stalinho (eduardooc DOT 86 AT gmail DOT com) - Eren Güven (erenguven0 AT gmail DOT com) - Erick Tryzelaar (erick DOT tryzelaar AT gmail DOT com) - Erik Tollerud (erik DOT tollerud AT gmail DOT com) - FELD Boris (lothiraldan AT gmail DOT com) - Fantix King (fantix DOT king AT gmail DOT com) - Felipe Cruz (felipecruz AT loogica DOT net) - Fernando Perez (Fernando DOT Perez AT berkeley DOT edu) - Frank Wiles (frank AT revsys DOT com) - Félix-Antoine Fortin (felix DOT antoine DOT fortin AT gmail DOT com) - Gavrie Philipson (gavriep AT il DOT ibm DOT com) - Godefroid Chapelle (gotcha AT bubblenet DOT be) - Greg Banks (gbanks AT mybasis DOT com) - Greg Ward (greg AT gerg DOT ca) - Guido Goldstein (github AT a-nugget DOT de) - Ian Lee (IanLee1521 AT gmail DOT com) - Ionuț Arțăriși (ionut AT artarisi DOT eu) - Ivo Danihelka (ivo AT danihelka DOT net) - Iyed (iyed DOT bennour AT gmail DOT com) - Jim Garrison (jim AT garrison DOT cc) - John Gallagher (johnkgallagher AT gmail DOT com) - Julian Taylor (jtaylor DOT debian AT googlemail DOT com) - Justin Bronder (jsbronder AT gmail DOT com) - Justin Riley (justin DOT t DOT riley AT gmail DOT com) - Marc Abramowitz (marc AT marc-abramowitz DOT com) - Matthew Aburn (mattja6 AT gmail DOT com) - Michel Pelletier (pelletier DOT michel AT gmail DOT com) - Michel Zou (xantares09 AT hotmail DOT com) - Min Ragan-Kelley (benjaminrk AT gmail DOT com) - Nell Hardcastle (nell AT dev-nell DOT com) - Nicholas Pilkington (nicholas DOT pilkington AT gmail DOT com) - Nicholas Piël (nicholas AT nichol DOT as) - Nick Pellegrino (npellegrino AT mozilla DOT com) - Nicolas Delaby (nicolas DOT delaby AT ezeep DOT com) - Ondrej Certik (ondrej AT certik DOT cz) - Paul Colomiets (paul AT colomiets DOT name) - Pawel Jasinski (pawel DOT jasinski AT gmail DOT com) - Phus Lu (phus DOT lu AT gmail DOT com) - Robert Buchholz (rbu AT goodpoint DOT de) - Robert Jordens (jordens AT gmail DOT com) - Ryan Cox (ryan DOT a DOT cox AT gmail DOT com) - Ryan Kelly (ryan AT rfk DOT id DOT au) - Scott Maxwell (scott AT codecobblers DOT com) - Scott Sadler (github AT mashi DOT org) - Simon Knight (simon DOT knight AT gmail DOT com) - Stefan Friesel (sf AT cloudcontrol DOT de) - Stefan van der Walt (stefan AT sun DOT ac DOT za) - Stephen Diehl (stephen DOT m DOT diehl AT gmail DOT com) - Sylvain Corlay (scorlay AT bloomberg DOT net) - Thomas Kluyver (takowl AT gmail DOT com) - Thomas Spura (tomspur AT fedoraproject DOT org) - Tigger Bear (Tigger AT Tiggers-Mac-mini DOT local) - Torsten Landschoff (torsten DOT landschoff AT dynamore DOT de) - Vadim Markovtsev (v DOT markovtsev AT samsung DOT com) - Yannick Hold (yannickhold AT gmail DOT com) - Zbigniew Jędrzejewski-Szmek (zbyszek AT in DOT waw DOT pl) - hugo shi (hugoshi AT bleb2 DOT (none)) - jdgleeson (jdgleeson AT mac DOT com) - kyledj (kyle AT bucebuce DOT com) - spez (steve AT hipmunk DOT com) - stu (stuart DOT axon AT jpcreative DOT co DOT uk) - xantares (xantares AT fujitsu-l64 DOT (none)) as reported by: git log --all --format='- %aN (%aE)' | sort -u | sed 's/@/ AT /1' | sed -e 's/\.\([^ ]\)/ DOT \1/g' with some adjustments. ### Not in git log - Brandon Craig-Rhodes (brandon AT rhodesmill DOT org) - Eugene Chernyshov (chernyshov DOT eugene AT gmail DOT com) - Craig Austin (craig DOT austin AT gmail DOT com) ### gevent\_zeromq, now zmq.green - Travis Cline (travis DOT cline AT gmail DOT com) - Ryan Kelly (ryan AT rfk DOT id DOT au) - Zachary Voase (z AT zacharyvoase DOT com) pyzmq-16.0.2/CONTRIBUTING.md000066400000000000000000000061531301503633700152170ustar00rootroot00000000000000# Opening an Issue For a good bug report: 1. [Search][] for existing Issues, both on GitHub and in general with Google/Stack Overflow before posting a duplicate question. 2. Update to pyzmq master, if possible, especially if you are already using git. It's possible that the bug you are about to report has already been fixed. Many things reported as pyzmq Issues are often just libzmq-related, and don't have anything to do with pyzmq itself. These are better directed to [zeromq-dev][]. When making a bug report, it is helpful to tell us as much as you can about your system (such as pyzmq version, libzmq version, Python version, OS Version, how you built/installed pyzmq and libzmq, etc.) The basics: ```python import sys import zmq print "libzmq-%s" % zmq.zmq_version() print "pyzmq-%s" % zmq.pyzmq_version() print "Python-%s" % sys.version ``` Which will give something like: libzmq-3.3.0 pyzmq-2.2dev Python-2.7.2 (default, Jun 20 2012, 16:23:33) [GCC 4.2.1 Compatible Apple Clang 4.0 (tags/Apple/clang-418.0.60)] [search]: https://github.com/zeromq/pyzmq/issues [zeromq-dev]: mailto:zeromq-dev@zeromq.org # Licensing and contributing to PyZMQ PyZMQ uses different licenses for different parts of the code. The 'core' of PyZMQ (located in zmq/core) is licensed under LGPLv3. This just means that if you make any changes to how that code works, you must release those changes under the LGPL. If you just *use* pyzmq, then you can use any license you want for your own code. We don't feel that the restrictions imposed by the LGPL make sense for the 'non-core' functionality in pyzmq (derivative code must *also* be LGPL or GPL), especially for examples and utility code, so we have relicensed all 'non-core' code under the more permissive BSD (specifically Modified BSD aka New BSD aka 3-clause BSD), where possible. This means that you can copy this code and build your own apps without needing to license your own code with the LGPL or GPL. ## Your contributions **Pull Requests are welcome!** When you contribute to PyZMQ, your contributions are made under the same license as the file you are working on. Any new, original code should be BSD licensed. We don't enforce strict style, but when in doubt [PEP8][] is a good guideline. The only thing we really don't like is mixing up 'cleanup' in real work. Examples are copyright their respective authors, and BSD unless otherwise specified by the author. You can LGPL (or GPL or MIT or Apache, etc.) your own new examples if you like, but we strongly encourage using the default BSD license. [PEP8]: http://www.python.org/dev/peps/pep-0008 ## Inherited licenses in pyzmq Some code outside the core is taken from other open-source projects, and inherits that project's license. * zmq/eventloop contains files inherited and adapted from [tornado][], and inherits the Apache license * zmq/ssh/forward.py is from [paramiko][], and inherits LGPL * zmq/devices/monitoredqueue.pxd is derived from the zmq_device function in libzmq, and inherits LGPL * perf examples are (c) iMatix, and LGPL [tornado]: http://www.tornadoweb.org [paramiko]: http://www.lag.net/paramikopyzmq-16.0.2/COPYING.BSD000066400000000000000000000030741301503633700144270ustar00rootroot00000000000000PyZMQ is licensed under the terms of the Modified BSD License (also known as New or Revised BSD), as follows: Copyright (c) 2009-2012, Brian Granger, Min Ragan-Kelley All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of PyZMQ nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pyzmq-16.0.2/COPYING.LESSER000066400000000000000000000204601301503633700150120ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. -------------------------------------------------------------------------------- SPECIAL EXCEPTION GRANTED BY COPYRIGHT HOLDERS As a special exception, copyright holders give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you must extend this exception to your version of the library. Note: this exception relieves you of any obligations under sections 4 and 5 of this license, and section 6 of the GNU General Public License. pyzmq-16.0.2/MANIFEST.in000066400000000000000000000016751301503633700145300ustar00rootroot00000000000000include COPYING.BSD include COPYING.LESSER include CONTRIBUTING.md include MANIFEST.in include README.md include AUTHORS.md include setup.cfg.template include setup.cfg.android include setup.py include setupegg.py include zmqversion.py include tox.ini include .travis.yml graft docs graft tools prune docs/build prune docs/gh-pages include bundled/zeromq/COPYING graft bundled/zeromq/include graft bundled/zeromq/src graft bundled/zeromq/tweetnacl include bundled/zeromq/builds/msvc/platform.hpp exclude bundled/zeromq/src/Makefile* exclude bundled/zeromq/src/platform.hpp graft buildutils graft examples graft zmq graft perf exclude setup.cfg exclude zmq/libzmq* # exclude docs/_static # exclude docs/_templates global-exclude __pycache__/* global-exclude .deps/* global-exclude *.so global-exclude *.pyd global-exclude *.pyc global-exclude .git* global-exclude .DS_Store global-exclude .mailmap global-exclude Makefile.am global-exclude Makefile.in pyzmq-16.0.2/README.md000066400000000000000000000072531301503633700142470ustar00rootroot00000000000000# PyZMQ: Python bindings for ØMQ [![Build Status](https://travis-ci.org/zeromq/pyzmq.svg?branch=master)](https://travis-ci.org/zeromq/pyzmq) [![Windows Build status](https://ci.appveyor.com/api/projects/status/ugoid0r2fnq8sr56/branch/master?svg=true)](https://ci.appveyor.com/project/minrk/pyzmq/branch/master) This package contains Python bindings for [ØMQ](http://www.zeromq.org). ØMQ is a lightweight and fast messaging implementation. PyZMQ should work with any reasonable version of Python (≥ 3.4), as well as Python 2.7 and 3.3, as well as PyPy. The Cython backend used by CPython supports libzmq ≥ 2.1.4 (including 3.2.x and 4.x), but the CFFI backend used by PyPy only supports libzmq ≥ 3.2.2 (including 4.x). For a summary of changes to pyzmq, see our [changelog](https://pyzmq.readthedocs.org/en/latest/changelog.html). ### ØMQ 3.x, 4.x PyZMQ fully supports the 3.x and 4.x APIs of libzmq, developed at [zeromq/libzmq](https://github.com/zeromq/libzmq). No code to change, no flags to pass, just build pyzmq against the latest and it should work. PyZMQ does not support the old libzmq 2 API on PyPy. ## Documentation See PyZMQ's Sphinx-generated [documentation](https://zeromq.github.io/pyzmq) on GitHub for API details, and some notes on Python and Cython development. If you want to learn about using ØMQ in general, the excellent [ØMQ Guide](http://zguide.zeromq.org/py:all) is the place to start, which has a Python version of every example. We also have some information on our [wiki](https://github.com/zeromq/pyzmq/wiki). ## Downloading Unless you specifically want to develop PyZMQ, we recommend downloading the PyZMQ source code or wheels from [PyPI](https://pypi.io/projects/pyzmq), or install with conda. You can also get the latest source code from our GitHub repository, but building from the repository will require that you install recent Cython. ## Building and installation For more detail on building pyzmq, see [our Wiki](https://github.com/zeromq/pyzmq/wiki/Building-and-Installing-PyZMQ). We build wheels for OS X, Windows, and Linux, so you can get a binary on those platforms with: pip install pyzmq but compiling from source with `pip install pyzmq` should work in most environments. Especially on OS X, make sure you are using the latest pip (≥ 8), or it may not find the right wheels. If the wheel doesn't work for some reason, or you want to force pyzmq to be compiled (this is often preferable if you already have libzmq installed and configured the way you want it), you can force installation with: pip install --no-use-wheel pyzmq When compiling pyzmq (e.g. installing with pip on Linux), it is generally recommended that zeromq be installed separately, via homebrew, apt, yum, etc: # Debian-based sudo apt-get install libzmq3-dev # RHEL-based sudo yum install libzmq3-devel If this is not available, pyzmq will *try* to build libzmq as a Python Extension, though this is not guaranteed to work. Building pyzmq from the git repo (including release tags on GitHub) requires Cython. ## Old versions pyzmq 16 drops support Python 2.6 and 3.2. If you need to use one of those Python versions, you can pin your pyzmq version to before 16: pip install 'pyzmq<16' For libzmq 2.0.x, use 'pyzmq<2.1' pyzmq-2.1.11 was the last version of pyzmq to support Python 2.5, and pyzmq ≥ 2.2.0 requires Python ≥ 2.6. pyzmq-13.0.0 introduces PyPy support via CFFI, which only supports libzmq-3.2.2 and newer. PyZMQ releases ≤ 2.2.0 matched libzmq versioning, but this is no longer the case, starting with PyZMQ 13.0.0 (it was the thirteenth release, so why not?). PyZMQ ≥ 13.0 follows semantic versioning conventions accounting only for PyZMQ itself. pyzmq-16.0.2/appveyor.yml000066400000000000000000000047031301503633700153550ustar00rootroot00000000000000# mostly copied from environment: global: # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the # /E:ON and /V:ON options are not enabled in the batch script intepreter # See: http://stackoverflow.com/a/13751649/163740 CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\tools\\run_with_env.cmd" matrix: - PYTHON: "C:\\Python35" PYTHON_VERSION: "3.5.x" PYTHON_ARCH: 32 - PYTHON: "C:\\Python35-x64" PYTHON_VERSION: "3.5.x" PYTHON_ARCH: 64 - PYTHON: "C:\\Python27" PYTHON_VERSION: "2.7.x" PYTHON_ARCH: 32 - PYTHON: "C:\\Python27-x64" PYTHON_VERSION: "2.7.x" PYTHON_ARCH: 64 - PYTHON: "C:\\Python34" PYTHON_VERSION: "3.4.x" PYTHON_ARCH: 32 - PYTHON: "C:\\Python34-x64" PYTHON_VERSION: "3.4.x" PYTHON_ARCH: 64 matrix: fast_finish: true install: # If there is a newer build queued for the same PR, cancel this one. # The AppVeyor 'rollout builds' option is supposed to serve the same # purpose but it is problematic because it tends to cancel builds pushed # directly to master instead of just PR builds (or the converse). # credits: JuliaLang developers. - ECHO "Filesystem root:" - ps: "ls \"C:/\"" - ECHO "Installed SDKs:" - ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\"" # Prepend newly installed Python to the PATH of this build (this cannot be # done from inside the powershell script as it would require to restart # the parent CMD process). - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" # Check that we have the expected version and architecture for Python - "python --version" - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" # Upgrade to the latest version of pip to avoid it displaying warnings # about it being out of date. - "%CMD_IN_ENV% pip install --disable-pip-version-check --user --upgrade pip setuptools wheel cython" build_script: # Build the compiled extension - "%CMD_IN_ENV% python setup.py bdist_wheel --zmq=bundled" - ps: "ls dist" - forfiles /p dist /m *.whl /c "cmd /c pip install @path" test_script: # Run the project tests - ps: cmd /c 'cd dist & python -c "import zmq; print(zmq.zmq_version())"' after_test: # If tests are successful, create binary packages for the project. - ps: "ls dist" artifacts: # Archive the generated packages in the ci.appveyor.com build report. - path: dist\* #on_success: # - TODO: upload the content of dist/*.whl to a public wheelhouse #pyzmq-16.0.2/buildutils/000077500000000000000000000000001301503633700151415ustar00rootroot00000000000000pyzmq-16.0.2/buildutils/__init__.py000066400000000000000000000002761301503633700172570ustar00rootroot00000000000000"""utilities for building pyzmq. Largely adapted from h5py """ from .msg import * from .config import * from .detect import * from .bundle import * from .misc import * from .patch import *pyzmq-16.0.2/buildutils/bundle.py000066400000000000000000000147351301503633700167760ustar00rootroot00000000000000"""utilities for fetching build dependencies.""" #----------------------------------------------------------------------------- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. # # This bundling code is largely adapted from pyzmq-static's get.sh by # Brandon Craig-Rhodes, which is itself BSD licensed. #----------------------------------------------------------------------------- import os import shutil import stat import sys import tarfile import hashlib from subprocess import Popen, PIPE try: # py2 from urllib2 import urlopen except ImportError: # py3 from urllib.request import urlopen from .msg import fatal, debug, info, warn pjoin = os.path.join #----------------------------------------------------------------------------- # Constants #----------------------------------------------------------------------------- bundled_version = (4,1,6) vs = '%i.%i.%i' % bundled_version libzmq = "zeromq-%s.tar.gz" % vs libzmq_url = "https://github.com/zeromq/zeromq{major}-{minor}/releases/download/v{vs}/{libzmq}".format( major=bundled_version[0], minor=bundled_version[1], vs=vs, libzmq=libzmq, ) libzmq_checksum = "sha256:02ebf60a43011e770799336365bcbce2eb85569e9b5f52aa0d8cc04672438a0a" HERE = os.path.dirname(__file__) ROOT = os.path.dirname(HERE) #----------------------------------------------------------------------------- # Utilities #----------------------------------------------------------------------------- def untgz(archive): return archive.replace('.tar.gz', '') def localpath(*args): """construct an absolute path from a list relative to the root pyzmq directory""" plist = [ROOT] + list(args) return os.path.abspath(pjoin(*plist)) def checksum_file(scheme, path): """Return the checksum (hex digest) of a file""" h = getattr(hashlib, scheme)() with open(path, 'rb') as f: chunk = f.read(65535) while chunk: h.update(chunk) chunk = f.read(65535) return h.hexdigest() def fetch_archive(savedir, url, fname, checksum, force=False): """download an archive to a specific location""" dest = pjoin(savedir, fname) scheme, digest_ref = checksum.split(':') if os.path.exists(dest) and not force: info("already have %s" % dest) digest = checksum_file(scheme, fname) if digest == digest_ref: return dest else: warn("but checksum %s != %s, redownloading." % (digest, digest_ref)) os.remove(fname) info("fetching %s into %s" % (url, savedir)) if not os.path.exists(savedir): os.makedirs(savedir) req = urlopen(url) with open(dest, 'wb') as f: f.write(req.read()) digest = checksum_file(scheme, dest) if digest != digest_ref: fatal("%s %s mismatch:\nExpected: %s\nActual : %s" % ( dest, scheme, digest_ref, digest)) return dest #----------------------------------------------------------------------------- # libzmq #----------------------------------------------------------------------------- def fetch_libzmq(savedir): """download and extract libzmq""" dest = pjoin(savedir, 'zeromq') if os.path.exists(dest): info("already have %s" % dest) return path = fetch_archive(savedir, libzmq_url, fname=libzmq, checksum=libzmq_checksum) tf = tarfile.open(path) with_version = pjoin(savedir, tf.firstmember.path) tf.extractall(savedir) tf.close() # remove version suffix: shutil.move(with_version, dest) def stage_platform_hpp(zmqroot): """stage platform.hpp into libzmq sources Tries ./configure first (except on Windows), then falls back on included platform.hpp previously generated. """ platform_hpp = pjoin(zmqroot, 'src', 'platform.hpp') if os.path.exists(platform_hpp): info("already have platform.hpp") return if os.name == 'nt': # stage msvc platform header platform_dir = pjoin(zmqroot, 'builds', 'msvc') else: info("attempting ./configure to generate platform.hpp") p = Popen('./configure', cwd=zmqroot, shell=True, stdout=PIPE, stderr=PIPE, ) o,e = p.communicate() if p.returncode: warn("failed to configure libzmq:\n%s" % e) if sys.platform == 'darwin': platform_dir = pjoin(HERE, 'include_darwin') elif sys.platform.startswith('freebsd'): platform_dir = pjoin(HERE, 'include_freebsd') elif sys.platform.startswith('linux-armv'): platform_dir = pjoin(HERE, 'include_linux-armv') else: platform_dir = pjoin(HERE, 'include_linux') else: return info("staging platform.hpp from: %s" % platform_dir) shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp) def copy_and_patch_libzmq(ZMQ, libzmq): """copy libzmq into source dir, and patch it if necessary. This command is necessary prior to running a bdist on Linux or OS X. """ if sys.platform.startswith('win'): return # copy libzmq into zmq for bdist local = localpath('zmq',libzmq) if not ZMQ and not os.path.exists(local): fatal("Please specify zmq prefix via `setup.py configure --zmq=/path/to/zmq` " "or copy libzmq into zmq/ manually prior to running bdist.") try: # resolve real file through symlinks lib = os.path.realpath(pjoin(ZMQ, 'lib', libzmq)) print ("copying %s -> %s"%(lib, local)) shutil.copy(lib, local) except Exception: if not os.path.exists(local): fatal("Could not copy libzmq into zmq/, which is necessary for bdist. " "Please specify zmq prefix via `setup.py configure --zmq=/path/to/zmq` " "or copy libzmq into zmq/ manually.") if sys.platform == 'darwin': # chmod u+w on the lib, # which can be user-read-only for some reason mode = os.stat(local).st_mode os.chmod(local, mode | stat.S_IWUSR) # patch install_name on darwin, instead of using rpath cmd = ['install_name_tool', '-id', '@loader_path/../%s'%libzmq, local] try: p = Popen(cmd, stdout=PIPE,stderr=PIPE) except OSError: fatal("install_name_tool not found, cannot patch libzmq for bundling.") out,err = p.communicate() if p.returncode: fatal("Could not patch bundled libzmq install_name: %s"%err, p.returncode) pyzmq-16.0.2/buildutils/check_sys_un.c000066400000000000000000000002531301503633700177620ustar00rootroot00000000000000#include #include "sys/un.h" int main(int argc, char **argv) { struct sockaddr_un *dummy; printf("%lu\n", sizeof(dummy->sun_path) - 1); return 0; } pyzmq-16.0.2/buildutils/config.py000066400000000000000000000106031301503633700167600ustar00rootroot00000000000000"""Config functions""" #----------------------------------------------------------------------------- # Copyright (C) PyZMQ Developers # # This file is part of pyzmq, copied and adapted from h5py. # h5py source used under the New BSD license # # h5py: # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys import os import json try: from configparser import ConfigParser except: from ConfigParser import ConfigParser pjoin = os.path.join from .msg import debug, fatal, warn #----------------------------------------------------------------------------- # Utility functions (adapted from h5py: http://h5py.googlecode.com) #----------------------------------------------------------------------------- def load_config(name, base='conf'): """Load config dict from JSON""" fname = pjoin(base, name + '.json') if not os.path.exists(fname): return {} try: with open(fname) as f: cfg = json.load(f) except Exception as e: warn("Couldn't load %s: %s" % (fname, e)) cfg = {} return cfg def save_config(name, data, base='conf'): """Save config dict to JSON""" if not os.path.exists(base): os.mkdir(base) fname = pjoin(base, name+'.json') with open(fname, 'w') as f: json.dump(data, f, indent=2) def v_str(v_tuple): """turn (2,0,1) into '2.0.1'.""" return ".".join(str(x) for x in v_tuple) def get_env_args(): """ Look for options in environment vars """ settings = {} zmq = os.environ.get("ZMQ_PREFIX", None) if zmq is not None: debug("Found environ var ZMQ_PREFIX=%s" % zmq) settings['zmq_prefix'] = zmq return settings def cfg2dict(cfg): """turn a ConfigParser into a nested dict because ConfigParser objects are dumb. """ d = {} for section in cfg.sections(): d[section] = dict(cfg.items(section)) return d def get_cfg_args(): """ Look for options in setup.cfg """ if not os.path.exists('setup.cfg'): return {} cfg = ConfigParser() cfg.read('setup.cfg') cfg = cfg2dict(cfg) g = cfg.setdefault('global', {}) # boolean keys: for key in ['libzmq_extension', 'bundle_libzmq_dylib', 'no_libzmq_extension', 'have_sys_un_h', 'skip_check_zmq', 'bundle_msvcp', ]: if key in g: g[key] = eval(g[key]) # globals go to top level cfg.update(cfg.pop('global')) return cfg def config_from_prefix(prefix): """Get config from zmq prefix""" settings = {} if prefix.lower() in ('default', 'auto', ''): settings['zmq_prefix'] = '' settings['libzmq_extension'] = False settings['no_libzmq_extension'] = False elif prefix.lower() in ('bundled', 'extension'): settings['zmq_prefix'] = '' settings['libzmq_extension'] = True settings['no_libzmq_extension'] = False else: settings['zmq_prefix'] = prefix settings['libzmq_extension'] = False settings['no_libzmq_extension'] = True settings['allow_legacy_libzmq'] = True # explicit zmq prefix allows legacy return settings def merge(into, d): """merge two containers into is updated, d has priority """ if isinstance(into, dict): for key in d.keys(): if key not in into: into[key] = d[key] else: into[key] = merge(into[key], d[key]) return into elif isinstance(into, list): return into + d else: return d def discover_settings(conf_base=None): """ Discover custom settings for ZMQ path""" settings = { 'zmq_prefix': '', 'libzmq_extension': False, 'no_libzmq_extension': False, 'skip_check_zmq': False, 'allow_legacy_libzmq': False, 'bundle_msvcp': None, 'build_ext': {}, 'bdist_egg': {}, } if sys.platform.startswith('win'): settings['have_sys_un_h'] = False if conf_base: # lowest priority merge(settings, load_config('config', conf_base)) merge(settings, get_cfg_args()) merge(settings, get_env_args()) return settings pyzmq-16.0.2/buildutils/constants.py000066400000000000000000000050221301503633700175260ustar00rootroot00000000000000""" script for generating files that involve repetitive updates for zmq constants. Run this after updating utils/constant_names Currently generates the following files from templates: - constant_enums.pxi - constants.pxi - zmq_constants.h """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import os import sys from . import info pjoin = os.path.join root = os.path.abspath(pjoin(os.path.dirname(__file__), os.path.pardir)) sys.path.insert(0, pjoin(root, 'zmq', 'utils')) from constant_names import all_names, no_prefix ifndef_t = """#ifndef {0} #define {0} (_PYZMQ_UNDEFINED) #endif """ def cython_enums(): """generate `enum: ZMQ_CONST` block for constant_enums.pxi""" lines = [] for name in all_names: if no_prefix(name): lines.append('enum: ZMQ_{0} "{0}"'.format(name)) else: lines.append('enum: ZMQ_{0}'.format(name)) return dict(ZMQ_ENUMS='\n '.join(lines)) def ifndefs(): """generate `#ifndef ZMQ_CONST` block for zmq_constants.h""" lines = ['#define _PYZMQ_UNDEFINED (-9999)'] for name in all_names: if not no_prefix(name): name = 'ZMQ_%s' % name lines.append(ifndef_t.format(name)) return dict(ZMQ_IFNDEFS='\n'.join(lines)) def constants_pyx(): """generate CONST = ZMQ_CONST and __all__ for constants.pxi""" all_lines = [] assign_lines = [] for name in all_names: if name == "NULL": # avoid conflict with NULL in Cython assign_lines.append("globals()['NULL'] = ZMQ_NULL") else: assign_lines.append('{0} = ZMQ_{0}'.format(name)) all_lines.append(' "{0}",'.format(name)) return dict(ASSIGNMENTS='\n'.join(assign_lines), ALL='\n'.join(all_lines)) def generate_file(fname, ns_func, dest_dir="."): """generate a constants file from its template""" with open(pjoin(root, 'buildutils', 'templates', '%s' % fname), 'r') as f: tpl = f.read() out = tpl.format(**ns_func()) dest = pjoin(dest_dir, fname) info("generating %s from template" % dest) with open(dest, 'w') as f: f.write(out) def render_constants(): """render generated constant files from templates""" generate_file("constant_enums.pxi", cython_enums, pjoin(root, 'zmq', 'backend', 'cython')) generate_file("constants.pxi", constants_pyx, pjoin(root, 'zmq', 'backend', 'cython')) generate_file("zmq_constants.h", ifndefs, pjoin(root, 'zmq', 'utils')) if __name__ == '__main__': render_constants() pyzmq-16.0.2/buildutils/detect.py000066400000000000000000000107651301503633700167740ustar00rootroot00000000000000"""Detect zmq version""" #----------------------------------------------------------------------------- # Copyright (C) PyZMQ Developers # # This file is part of pyzmq, copied and adapted from h5py. # h5py source used under the New BSD license # # h5py: # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import shutil import sys import os import logging import platform from distutils import ccompiler from subprocess import Popen, PIPE from .misc import get_compiler, get_output_error from .patch import patch_lib_paths pjoin = os.path.join #----------------------------------------------------------------------------- # Utility functions (adapted from h5py: http://h5py.googlecode.com) #----------------------------------------------------------------------------- def test_compilation(cfile, compiler=None, **compiler_attrs): """Test simple compilation with given settings""" cc = get_compiler(compiler, **compiler_attrs) efile, ext = os.path.splitext(cfile) cpreargs = lpreargs = None if sys.platform == 'darwin': # use appropriate arch for compiler if platform.architecture()[0]=='32bit': if platform.processor() == 'powerpc': cpu = 'ppc' else: cpu = 'i386' cpreargs = ['-arch', cpu] lpreargs = ['-arch', cpu, '-undefined', 'dynamic_lookup'] else: # allow for missing UB arch, since it will still work: lpreargs = ['-undefined', 'dynamic_lookup'] if sys.platform == 'sunos5': if platform.architecture()[0]=='32bit': lpreargs = ['-m32'] else: lpreargs = ['-m64'] extra = compiler_attrs.get('extra_compile_args', None) objs = cc.compile([cfile], extra_preargs=cpreargs, extra_postargs=extra) cc.link_executable(objs, efile, extra_preargs=lpreargs) return efile def compile_and_run(basedir, src, compiler=None, **compiler_attrs): if not os.path.exists(basedir): os.makedirs(basedir) cfile = pjoin(basedir, os.path.basename(src)) shutil.copy(src, cfile) try: cc = get_compiler(compiler, **compiler_attrs) efile = test_compilation(cfile, compiler=cc) patch_lib_paths(efile, cc.library_dirs) result = Popen(efile, stdout=PIPE, stderr=PIPE) so, se = result.communicate() # for py3k: so = so.decode() se = se.decode() finally: shutil.rmtree(basedir) return result.returncode, so, se def detect_zmq(basedir, compiler=None, **compiler_attrs): """Compile, link & execute a test program, in empty directory `basedir`. The C compiler will be updated with any keywords given via setattr. Parameters ---------- basedir : path The location where the test program will be compiled and run compiler : str The distutils compiler key (e.g. 'unix', 'msvc', or 'mingw32') **compiler_attrs : dict Any extra compiler attributes, which will be set via ``setattr(cc)``. Returns ------- A dict of properties for zmq compilation, with the following two keys: vers : tuple The ZMQ version as a tuple of ints, e.g. (2,2,0) settings : dict The compiler options used to compile the test function, e.g. `include_dirs`, `library_dirs`, `libs`, etc. """ cfile = pjoin(basedir, 'vers.c') shutil.copy(pjoin(os.path.dirname(__file__), 'vers.c'), cfile) # check if we need to link against Realtime Extensions library if sys.platform.startswith('linux'): cc = ccompiler.new_compiler(compiler=compiler) cc.output_dir = basedir if not cc.has_function('timer_create'): compiler_attrs['libraries'].append('rt') cc = get_compiler(compiler=compiler, **compiler_attrs) efile = test_compilation(cfile, compiler=cc) patch_lib_paths(efile, cc.library_dirs) rc, so, se = get_output_error([efile]) if rc: msg = "Error running version detection script:\n%s\n%s" % (so,se) logging.error(msg) raise IOError(msg) handlers = {'vers': lambda val: tuple(int(v) for v in val.split('.'))} props = {} for line in (x for x in so.split('\n') if x): key, val = line.split(':') props[key] = handlers[key](val) return props pyzmq-16.0.2/buildutils/dummy.c000066400000000000000000000001321301503633700164340ustar00rootroot00000000000000// empty file, just to test compilation int main(int argc, char **argv){ return 0; } pyzmq-16.0.2/buildutils/include_darwin/000077500000000000000000000000001301503633700201305ustar00rootroot00000000000000pyzmq-16.0.2/buildutils/include_darwin/platform.hpp000066400000000000000000000174301301503633700224720ustar00rootroot00000000000000/* src/platform.hpp. Generated from platform.hpp.in by configure. */ /* src/platform.hpp.in. Generated from configure.ac by autoheader. */ /* Define to 1 if you have the header file. */ #define HAVE_ALLOCA_H 1 /* Define to 1 if you have the header file. */ #define HAVE_ARPA_INET_H 1 /* Define to 1 if you have the `clock_gettime' function. */ /* #undef HAVE_CLOCK_GETTIME */ /* Define to 1 if you have the declaration of `LOCAL_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_LOCAL_PEERCRED 0 /* Define to 1 if you have the declaration of `SO_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_SO_PEERCRED 0 /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_ERRNO_H 1 /* Define to 1 if you have the `fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the `freeifaddrs' function. */ #define HAVE_FREEIFADDRS 1 /* Define to 1 if you have the `gethrtime' function. */ /* #undef HAVE_GETHRTIME */ /* Define to 1 if you have the `getifaddrs' function. */ #define HAVE_GETIFADDRS 1 /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define to 1 if you have the header file. */ #define HAVE_IFADDRS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `gssapi_krb5' library (-lgssapi_krb5). */ /* #undef HAVE_LIBGSSAPI_KRB5 */ /* Define to 1 if you have the `iphlpapi' library (-liphlpapi). */ /* #undef HAVE_LIBIPHLPAPI */ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ /* Define to 1 if you have the `pthread' library (-lpthread). */ #define HAVE_LIBPTHREAD 1 /* Define to 1 if you have the `rpcrt4' library (-lrpcrt4). */ /* #undef HAVE_LIBRPCRT4 */ /* Define to 1 if you have the `rt' library (-lrt). */ /* #undef HAVE_LIBRT */ /* Define to 1 if you have the `socket' library (-lsocket). */ /* #undef HAVE_LIBSOCKET */ /* The libsodium library is to be used. */ /* #undef HAVE_LIBSODIUM */ /* Define to 1 if you have the `ws2_32' library (-lws2_32). */ /* #undef HAVE_LIBWS2_32 */ /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `memset' function. */ #define HAVE_MEMSET 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_IN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_TCP_H 1 /* Define to 1 if you have the `perror' function. */ #define HAVE_PERROR 1 /* Define to 1 if you have the `socket' function. */ #define HAVE_SOCKET 1 /* Define to 1 if stdbool.h conforms to C99. */ #define HAVE_STDBOOL_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDDEF_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_EVENTFD_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_SOCKET_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_UIO_H 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if the system has the type `_Bool'. */ /* #undef HAVE__BOOL */ /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #define LT_OBJDIR ".libs/" /* Name of package */ #define PACKAGE "zeromq" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "zeromq-dev@lists.zeromq.org" /* Define to the full name of this package. */ #define PACKAGE_NAME "zeromq" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "zeromq 4.1.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "zeromq" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "4.1.1" /* Define as the return type of signal handlers (`int' or `void'). */ #define RETSIGTYPE void /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Version number of package */ #define VERSION "4.1.1" /* Enable militant API assertions */ /* #undef ZMQ_ACT_MILITANT */ /* Force to use mutexes */ /* #undef ZMQ_FORCE_MUTEXES */ /* Have AIX OS */ /* #undef ZMQ_HAVE_AIX */ /* Have Android OS */ /* #undef ZMQ_HAVE_ANDROID */ /* Have Cygwin */ /* #undef ZMQ_HAVE_CYGWIN */ /* Have eventfd extension. */ /* #undef ZMQ_HAVE_EVENTFD */ /* Have FreeBSD OS */ /* #undef ZMQ_HAVE_FREEBSD */ /* Have HPUX OS */ /* #undef ZMQ_HAVE_HPUX */ /* Have ifaddrs.h header. */ #define ZMQ_HAVE_IFADDRS 1 /* Have Linux OS */ /* #undef ZMQ_HAVE_LINUX */ /* Have LOCAL_PEERCRED socket option */ /* #undef ZMQ_HAVE_LOCAL_PEERCRED */ /* Have MinGW32 */ /* #undef ZMQ_HAVE_MINGW32 */ /* Have NetBSD OS */ /* #undef ZMQ_HAVE_NETBSD */ /* Have NORM protocol extension */ /* #undef ZMQ_HAVE_NORM */ /* Have OpenBSD OS */ /* #undef ZMQ_HAVE_OPENBSD */ /* Have OpenPGM extension */ /* #undef ZMQ_HAVE_OPENPGM */ /* Have DarwinOSX OS */ #define ZMQ_HAVE_OSX 1 /* Have QNX Neutrino OS */ /* #undef ZMQ_HAVE_QNXNTO */ /* Whether SOCK_CLOEXEC is defined and functioning. */ /* #undef ZMQ_HAVE_SOCK_CLOEXEC */ /* Have Solaris OS */ /* #undef ZMQ_HAVE_SOLARIS */ /* Whether SO_KEEPALIVE is supported. */ #define ZMQ_HAVE_SO_KEEPALIVE 1 /* Have SO_PEERCRED socket option */ /* #undef ZMQ_HAVE_SO_PEERCRED */ /* Whether TCP_KEEPALIVE is supported. */ #define ZMQ_HAVE_TCP_KEEPALIVE 1 /* Whether TCP_KEEPCNT is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPCNT */ /* Whether TCP_KEEPIDLE is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPIDLE */ /* Whether TCP_KEEPINTVL is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPINTVL */ /* Have TIPC support */ /* #undef ZMQ_HAVE_TIPC */ /* Have uio.h header. */ #define ZMQ_HAVE_UIO 1 /* Have Windows OS */ /* #undef ZMQ_HAVE_WINDOWS */ /* Define for Solaris 2.5.1 so the uint32_t typedef from , , or is not used. If the typedef were allowed, the #define below would cause a syntax error. */ /* #undef _UINT32_T */ /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `unsigned int' if does not define. */ /* #undef size_t */ /* Define to `int' if does not define. */ /* #undef ssize_t */ /* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ /* #undef uint32_t */ /* Define to empty if the keyword `volatile' does not work. Warning: valid code using `volatile' can become incorrect without. Disable with care. */ /* #undef volatile */ pyzmq-16.0.2/buildutils/include_freebsd/000077500000000000000000000000001301503633700202565ustar00rootroot00000000000000pyzmq-16.0.2/buildutils/include_freebsd/platform.hpp000066400000000000000000000174171301503633700226250ustar00rootroot00000000000000/* src/platform.hpp. Generated from platform.hpp.in by configure. */ /* src/platform.hpp.in. Generated from configure.ac by autoheader. */ /* Define to 1 if you have the header file. */ /* #undef HAVE_ALLOCA_H */ /* Define to 1 if you have the header file. */ #define HAVE_ARPA_INET_H 1 /* Define to 1 if you have the `clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define to 1 if you have the declaration of `LOCAL_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_LOCAL_PEERCRED 0 /* Define to 1 if you have the declaration of `SO_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_SO_PEERCRED 0 /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_ERRNO_H 1 /* Define to 1 if you have the `fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the `freeifaddrs' function. */ #define HAVE_FREEIFADDRS 1 /* Define to 1 if you have the `gethrtime' function. */ /* #undef HAVE_GETHRTIME */ /* Define to 1 if you have the `getifaddrs' function. */ #define HAVE_GETIFADDRS 1 /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define to 1 if you have the header file. */ #define HAVE_IFADDRS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `gssapi_krb5' library (-lgssapi_krb5). */ /* #undef HAVE_LIBGSSAPI_KRB5 */ /* Define to 1 if you have the `iphlpapi' library (-liphlpapi). */ /* #undef HAVE_LIBIPHLPAPI */ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ /* Define to 1 if you have the `pthread' library (-lpthread). */ #define HAVE_LIBPTHREAD 1 /* Define to 1 if you have the `rpcrt4' library (-lrpcrt4). */ /* #undef HAVE_LIBRPCRT4 */ /* Define to 1 if you have the `rt' library (-lrt). */ #define HAVE_LIBRT 1 /* Define to 1 if you have the `socket' library (-lsocket). */ /* #undef HAVE_LIBSOCKET */ /* The libsodium library is to be used. */ /* #undef HAVE_LIBSODIUM */ /* Define to 1 if you have the `ws2_32' library (-lws2_32). */ /* #undef HAVE_LIBWS2_32 */ /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `memset' function. */ #define HAVE_MEMSET 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_IN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_TCP_H 1 /* Define to 1 if you have the `perror' function. */ #define HAVE_PERROR 1 /* Define to 1 if you have the `socket' function. */ #define HAVE_SOCKET 1 /* Define to 1 if stdbool.h conforms to C99. */ /* #undef HAVE_STDBOOL_H */ /* Define to 1 if you have the header file. */ #define HAVE_STDDEF_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_EVENTFD_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_SOCKET_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_UIO_H 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if the system has the type `_Bool'. */ /* #undef HAVE__BOOL */ /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #define LT_OBJDIR ".libs/" /* Name of package */ #define PACKAGE "zeromq" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "zeromq-dev@lists.zeromq.org" /* Define to the full name of this package. */ #define PACKAGE_NAME "zeromq" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "zeromq 4.1.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "zeromq" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "4.1.1" /* Define as the return type of signal handlers (`int' or `void'). */ #define RETSIGTYPE void /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Version number of package */ #define VERSION "4.1.1" /* Enable militant API assertions */ /* #undef ZMQ_ACT_MILITANT */ /* Force to use mutexes */ /* #undef ZMQ_FORCE_MUTEXES */ /* Have AIX OS */ /* #undef ZMQ_HAVE_AIX */ /* Have Android OS */ /* #undef ZMQ_HAVE_ANDROID */ /* Have Cygwin */ /* #undef ZMQ_HAVE_CYGWIN */ /* Have eventfd extension. */ /* #undef ZMQ_HAVE_EVENTFD */ /* Have FreeBSD OS */ #define ZMQ_HAVE_FREEBSD 1 /* Have HPUX OS */ /* #undef ZMQ_HAVE_HPUX */ /* Have ifaddrs.h header. */ #define ZMQ_HAVE_IFADDRS 1 /* Have Linux OS */ /* #undef ZMQ_HAVE_LINUX */ /* Have LOCAL_PEERCRED socket option */ /* #undef ZMQ_HAVE_LOCAL_PEERCRED */ /* Have MinGW32 */ /* #undef ZMQ_HAVE_MINGW32 */ /* Have NetBSD OS */ /* #undef ZMQ_HAVE_NETBSD */ /* Have NORM protocol extension */ /* #undef ZMQ_HAVE_NORM */ /* Have OpenBSD OS */ /* #undef ZMQ_HAVE_OPENBSD */ /* Have OpenPGM extension */ /* #undef ZMQ_HAVE_OPENPGM */ /* Have DarwinOSX OS */ /* #undef ZMQ_HAVE_OSX */ /* Have QNX Neutrino OS */ /* #undef ZMQ_HAVE_QNXNTO */ /* Whether SOCK_CLOEXEC is defined and functioning. */ #define ZMQ_HAVE_SOCK_CLOEXEC 1 /* Have Solaris OS */ /* #undef ZMQ_HAVE_SOLARIS */ /* Whether SO_KEEPALIVE is supported. */ #define ZMQ_HAVE_SO_KEEPALIVE 1 /* Have SO_PEERCRED socket option */ /* #undef ZMQ_HAVE_SO_PEERCRED */ /* Whether TCP_KEEPALIVE is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPALIVE */ /* Whether TCP_KEEPCNT is supported. */ #define ZMQ_HAVE_TCP_KEEPCNT 1 /* Whether TCP_KEEPIDLE is supported. */ #define ZMQ_HAVE_TCP_KEEPIDLE 1 /* Whether TCP_KEEPINTVL is supported. */ #define ZMQ_HAVE_TCP_KEEPINTVL 1 /* Have TIPC support */ /* #undef ZMQ_HAVE_TIPC */ /* Have uio.h header. */ #define ZMQ_HAVE_UIO 1 /* Have Windows OS */ /* #undef ZMQ_HAVE_WINDOWS */ /* Define for Solaris 2.5.1 so the uint32_t typedef from , , or is not used. If the typedef were allowed, the #define below would cause a syntax error. */ /* #undef _UINT32_T */ /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `unsigned int' if does not define. */ /* #undef size_t */ /* Define to `int' if does not define. */ /* #undef ssize_t */ /* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ /* #undef uint32_t */ /* Define to empty if the keyword `volatile' does not work. Warning: valid code using `volatile' can become incorrect without. Disable with care. */ /* #undef volatile */ pyzmq-16.0.2/buildutils/include_linux-armv/000077500000000000000000000000001301503633700207465ustar00rootroot00000000000000pyzmq-16.0.2/buildutils/include_linux-armv/platform.hpp000066400000000000000000000172261301503633700233130ustar00rootroot00000000000000/* src/platform.hpp. Generated from platform.hpp.in by configure. */ /* src/platform.hpp.in. Generated from configure.ac by autoheader. */ /* Define to 1 if you have the header file. */ #define HAVE_ALLOCA_H 1 /* Define to 1 if you have the header file. */ #define HAVE_ARPA_INET_H 1 /* Define to 1 if you have the `clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define to 1 if you have the declaration of `LOCAL_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_LOCAL_PEERCRED 0 /* Define to 1 if you have the declaration of `SO_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_SO_PEERCRED 1 /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_ERRNO_H 1 /* Define to 1 if you have the `freeifaddrs' function. */ /* #undef HAVE_FREEIFADDRS */ /* Define to 1 if you have the `gethrtime' function. */ /* #undef HAVE_GETHRTIME */ /* Define to 1 if you have the `getifaddrs' function. */ /* #undef HAVE_GETIFADDRS */ /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_IFADDRS_H */ /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `gssapi_krb5' library (-lgssapi_krb5). */ /* #undef HAVE_LIBGSSAPI_KRB5 */ /* Define to 1 if you have the `iphlpapi' library (-liphlpapi). */ /* #undef HAVE_LIBIPHLPAPI */ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ /* Define to 1 if you have the `pthread' library (-lpthread). */ /* #undef HAVE_LIBPTHREAD */ /* Define to 1 if you have the `rpcrt4' library (-lrpcrt4). */ /* #undef HAVE_LIBRPCRT4 */ /* Define to 1 if you have the `rt' library (-lrt). */ /* #undef HAVE_LIBRT */ /* Define to 1 if you have the `socket' library (-lsocket). */ /* #undef HAVE_LIBSOCKET */ /* The libsodium library is to be used. */ /* #undef HAVE_LIBSODIUM */ /* Define to 1 if you have the `ws2_32' library (-lws2_32). */ /* #undef HAVE_LIBWS2_32 */ /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `memset' function. */ #define HAVE_MEMSET 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_IN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_TCP_H 1 /* Define to 1 if you have the `perror' function. */ #define HAVE_PERROR 1 /* Define to 1 if you have the `socket' function. */ #define HAVE_SOCKET 1 /* Define to 1 if stdbool.h conforms to C99. */ #define HAVE_STDBOOL_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDDEF_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_EVENTFD_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_SOCKET_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_UIO_H 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if the system has the type `_Bool'. */ /* #undef HAVE__BOOL */ /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #define LT_OBJDIR ".libs/" /* Name of package */ #define PACKAGE "zeromq" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "zeromq-dev@lists.zeromq.org" /* Define to the full name of this package. */ #define PACKAGE_NAME "zeromq" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "zeromq 4.1.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "zeromq" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "4.1.1" /* Define as the return type of signal handlers (`int' or `void'). */ #define RETSIGTYPE void /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Version number of package */ #define VERSION "4.1.1" /* Enable militant API assertions */ /* #undef ZMQ_ACT_MILITANT */ /* Force to use mutexes */ /* #undef ZMQ_FORCE_MUTEXES */ /* Have AIX OS */ /* #undef ZMQ_HAVE_AIX */ /* Have Android OS */ #define ZMQ_HAVE_ANDROID 1 /* Have Cygwin */ /* #undef ZMQ_HAVE_CYGWIN */ /* Have eventfd extension. */ /* #undef ZMQ_HAVE_EVENTFD */ /* Have FreeBSD OS */ /* #undef ZMQ_HAVE_FREEBSD */ /* Have HPUX OS */ /* #undef ZMQ_HAVE_HPUX */ /* Have ifaddrs.h header. */ /* #undef ZMQ_HAVE_IFADDRS */ /* Have Linux OS */ #define ZMQ_HAVE_LINUX 1 /* Have LOCAL_PEERCRED socket option */ /* #undef ZMQ_HAVE_LOCAL_PEERCRED */ /* Have MinGW32 */ /* #undef ZMQ_HAVE_MINGW32 */ /* Have NetBSD OS */ /* #undef ZMQ_HAVE_NETBSD */ /* Have NORM protocol extension */ /* #undef ZMQ_HAVE_NORM */ /* Have OpenBSD OS */ /* #undef ZMQ_HAVE_OPENBSD */ /* Have OpenPGM extension */ /* #undef ZMQ_HAVE_OPENPGM */ /* Have DarwinOSX OS */ /* #undef ZMQ_HAVE_OSX */ /* Have QNX Neutrino OS */ /* #undef ZMQ_HAVE_QNXNTO */ /* Whether SOCK_CLOEXEC is defined and functioning. */ /* #undef ZMQ_HAVE_SOCK_CLOEXEC */ /* Have Solaris OS */ /* #undef ZMQ_HAVE_SOLARIS */ /* Whether SO_KEEPALIVE is supported. */ /* #undef ZMQ_HAVE_SO_KEEPALIVE */ /* Whether TCP_KEEPALIVE is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPALIVE */ /* Whether TCP_KEEPCNT is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPCNT */ /* Whether TCP_KEEPIDLE is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPIDLE */ /* Whether TCP_KEEPINTVL is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPINTVL */ /* Have TIPC support */ /* #undef ZMQ_HAVE_TIPC */ /* Have uio.h header. */ #define ZMQ_HAVE_UIO 1 /* Have Windows OS */ /* #undef ZMQ_HAVE_WINDOWS */ /* Define for Solaris 2.5.1 so the uint32_t typedef from , , or is not used. If the typedef were allowed, the #define below would cause a syntax error. */ /* #undef _UINT32_T */ /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `unsigned int' if does not define. */ /* #undef size_t */ /* Define to `int' if does not define. */ /* #undef ssize_t */ /* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ /* #undef uint32_t */ /* Define to empty if the keyword `volatile' does not work. Warning: valid code using `volatile' can become incorrect without. Disable with care. */ /* #undef volatile */ pyzmq-16.0.2/buildutils/include_linux/000077500000000000000000000000001301503633700200035ustar00rootroot00000000000000pyzmq-16.0.2/buildutils/include_linux/platform.hpp000066400000000000000000000174001301503633700223420ustar00rootroot00000000000000/* src/platform.hpp. Generated from platform.hpp.in by configure. */ /* src/platform.hpp.in. Generated from configure.ac by autoheader. */ /* Define to 1 if you have the header file. */ #define HAVE_ALLOCA_H 1 /* Define to 1 if you have the header file. */ #define HAVE_ARPA_INET_H 1 /* Define to 1 if you have the `clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define to 1 if you have the declaration of `LOCAL_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_LOCAL_PEERCRED 0 /* Define to 1 if you have the declaration of `SO_PEERCRED', and to 0 if you don't. */ #define HAVE_DECL_SO_PEERCRED 1 /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_ERRNO_H 1 /* Define to 1 if you have the `fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the `freeifaddrs' function. */ #define HAVE_FREEIFADDRS 1 /* Define to 1 if you have the `gethrtime' function. */ /* #undef HAVE_GETHRTIME */ /* Define to 1 if you have the `getifaddrs' function. */ #define HAVE_GETIFADDRS 1 /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define to 1 if you have the header file. */ #define HAVE_IFADDRS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `gssapi_krb5' library (-lgssapi_krb5). */ /* #undef HAVE_LIBGSSAPI_KRB5 */ /* Define to 1 if you have the `iphlpapi' library (-liphlpapi). */ /* #undef HAVE_LIBIPHLPAPI */ /* Define to 1 if you have the `nsl' library (-lnsl). */ /* #undef HAVE_LIBNSL */ /* Define to 1 if you have the `pthread' library (-lpthread). */ #define HAVE_LIBPTHREAD 1 /* Define to 1 if you have the `rpcrt4' library (-lrpcrt4). */ /* #undef HAVE_LIBRPCRT4 */ /* Define to 1 if you have the `rt' library (-lrt). */ #define HAVE_LIBRT 1 /* Define to 1 if you have the `socket' library (-lsocket). */ /* #undef HAVE_LIBSOCKET */ /* The libsodium library is to be used. */ /* #undef HAVE_LIBSODIUM */ /* Define to 1 if you have the `ws2_32' library (-lws2_32). */ /* #undef HAVE_LIBWS2_32 */ /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `memset' function. */ #define HAVE_MEMSET 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_IN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_NETINET_TCP_H 1 /* Define to 1 if you have the `perror' function. */ #define HAVE_PERROR 1 /* Define to 1 if you have the `socket' function. */ #define HAVE_SOCKET 1 /* Define to 1 if stdbool.h conforms to C99. */ #define HAVE_STDBOOL_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDDEF_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_EVENTFD_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_SOCKET_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_UIO_H 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if the system has the type `_Bool'. */ /* #undef HAVE__BOOL */ /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #define LT_OBJDIR ".libs/" /* Name of package */ #define PACKAGE "zeromq" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "zeromq-dev@lists.zeromq.org" /* Define to the full name of this package. */ #define PACKAGE_NAME "zeromq" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "zeromq 4.1.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "zeromq" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "4.1.1" /* Define as the return type of signal handlers (`int' or `void'). */ #define RETSIGTYPE void /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Version number of package */ #define VERSION "4.1.1" /* Enable militant API assertions */ /* #undef ZMQ_ACT_MILITANT */ /* Force to use mutexes */ /* #undef ZMQ_FORCE_MUTEXES */ /* Have AIX OS */ /* #undef ZMQ_HAVE_AIX */ /* Have Android OS */ /* #undef ZMQ_HAVE_ANDROID */ /* Have Cygwin */ /* #undef ZMQ_HAVE_CYGWIN */ /* Have eventfd extension. */ #define ZMQ_HAVE_EVENTFD 1 /* Have FreeBSD OS */ /* #undef ZMQ_HAVE_FREEBSD */ /* Have HPUX OS */ /* #undef ZMQ_HAVE_HPUX */ /* Have ifaddrs.h header. */ #define ZMQ_HAVE_IFADDRS 1 /* Have Linux OS */ #define ZMQ_HAVE_LINUX 1 /* Have LOCAL_PEERCRED socket option */ /* #undef ZMQ_HAVE_LOCAL_PEERCRED */ /* Have MinGW32 */ /* #undef ZMQ_HAVE_MINGW32 */ /* Have NetBSD OS */ /* #undef ZMQ_HAVE_NETBSD */ /* Have NORM protocol extension */ /* #undef ZMQ_HAVE_NORM */ /* Have OpenBSD OS */ /* #undef ZMQ_HAVE_OPENBSD */ /* Have OpenPGM extension */ /* #undef ZMQ_HAVE_OPENPGM */ /* Have DarwinOSX OS */ /* #undef ZMQ_HAVE_OSX */ /* Have QNX Neutrino OS */ /* #undef ZMQ_HAVE_QNXNTO */ /* Whether SOCK_CLOEXEC is defined and functioning. */ #define ZMQ_HAVE_SOCK_CLOEXEC 1 /* Have Solaris OS */ /* #undef ZMQ_HAVE_SOLARIS */ /* Whether SO_KEEPALIVE is supported. */ #define ZMQ_HAVE_SO_KEEPALIVE 1 /* Have SO_PEERCRED socket option */ #define ZMQ_HAVE_SO_PEERCRED 1 /* Whether TCP_KEEPALIVE is supported. */ /* #undef ZMQ_HAVE_TCP_KEEPALIVE */ /* Whether TCP_KEEPCNT is supported. */ #define ZMQ_HAVE_TCP_KEEPCNT 1 /* Whether TCP_KEEPIDLE is supported. */ #define ZMQ_HAVE_TCP_KEEPIDLE 1 /* Whether TCP_KEEPINTVL is supported. */ #define ZMQ_HAVE_TCP_KEEPINTVL 1 /* Have TIPC support */ /* #undef ZMQ_HAVE_TIPC */ /* Have uio.h header. */ #define ZMQ_HAVE_UIO 1 /* Have Windows OS */ /* #undef ZMQ_HAVE_WINDOWS */ /* Define for Solaris 2.5.1 so the uint32_t typedef from , , or is not used. If the typedef were allowed, the #define below would cause a syntax error. */ /* #undef _UINT32_T */ /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `unsigned int' if does not define. */ /* #undef size_t */ /* Define to `int' if does not define. */ /* #undef ssize_t */ /* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ /* #undef uint32_t */ /* Define to empty if the keyword `volatile' does not work. Warning: valid code using `volatile' can become incorrect without. Disable with care. */ /* #undef volatile */ pyzmq-16.0.2/buildutils/include_win32/000077500000000000000000000000001301503633700176065ustar00rootroot00000000000000pyzmq-16.0.2/buildutils/include_win32/stdint.h000066400000000000000000000003531301503633700212650ustar00rootroot00000000000000#ifndef _stdint_h__ #define _stdint_h__ #include "../../bundled/zeromq/src/stdint.hpp" #define UINT8_MAX 0xff #define UINT16_MAX 0xffff #define UINT32_MAX 0xffffffff #define UINT64_MAX 0xffffffffffffffffull #endif /* _stdint_h__ */ pyzmq-16.0.2/buildutils/initlibzmq.c000066400000000000000000000014311301503633700174660ustar00rootroot00000000000000/* This file is from pyzmq-static by Brandon Craig-Rhodes, and used under the BSD license py3compat from http://wiki.python.org/moin/PortingExtensionModulesToPy3k Provide the init function that Python expects when we compile libzmq by pretending it is a Python extension. */ #include "Python.h" static PyMethodDef Methods[] = { {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "libzmq", NULL, -1, Methods, NULL, NULL, NULL, NULL }; PyMODINIT_FUNC PyInit_libzmq(void) { PyObject *module = PyModule_Create(&moduledef); return module; } #else // py2 PyMODINIT_FUNC initlibzmq(void) { (void) Py_InitModule("libzmq", Methods); } #endif pyzmq-16.0.2/buildutils/misc.py000066400000000000000000000033351301503633700164520ustar00rootroot00000000000000"""misc build utility functions""" # Copyright (c) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import os import sys import logging from distutils import ccompiler from distutils.sysconfig import customize_compiler from pipes import quote from subprocess import Popen, PIPE pjoin = os.path.join if sys.version_info[0] >= 3: u = lambda x: x else: u = lambda x: x.decode('utf8', 'replace') def customize_mingw(cc): # strip -mno-cygwin from mingw32 (Python Issue #12641) for cmd in [cc.compiler, cc.compiler_cxx, cc.compiler_so, cc.linker_exe, cc.linker_so]: if '-mno-cygwin' in cmd: cmd.remove('-mno-cygwin') # remove problematic msvcr90 if 'msvcr90' in cc.dll_libraries: cc.dll_libraries.remove('msvcr90') def get_compiler(compiler, **compiler_attrs): """get and customize a compiler""" if compiler is None or isinstance(compiler, str): cc = ccompiler.new_compiler(compiler=compiler) customize_compiler(cc) if cc.compiler_type == 'mingw32': customize_mingw(cc) else: cc = compiler for name, val in compiler_attrs.items(): setattr(cc, name, val) return cc def get_output_error(cmd): """Return the exit status, stdout, stderr of a command""" if not isinstance(cmd, list): cmd = [cmd] logging.debug("Running: %s", ' '.join(map(quote, cmd))) try: result = Popen(cmd, stdout=PIPE, stderr=PIPE) except IOError as e: return -1, u(''), u('Failed to run %r: %r' % (cmd, e)) so, se = result.communicate() # unicode: so = so.decode('utf8', 'replace') se = se.decode('utf8', 'replace') return result.returncode, so, se pyzmq-16.0.2/buildutils/msg.py000066400000000000000000000015161301503633700163040ustar00rootroot00000000000000"""logging""" # Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. from __future__ import division import os import sys import logging #----------------------------------------------------------------------------- # Logging (adapted from h5py: http://h5py.googlecode.com) #----------------------------------------------------------------------------- logger = logging.getLogger() if os.environ.get('DEBUG'): logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler(sys.stderr)) def debug(msg): logger.debug(msg) def info(msg): logger.info(msg) def fatal(msg, code=1): logger.error("Fatal: " + msg) exit(code) def warn(msg): logger.error("Warning: " + msg) def line(c='*', width=48): print(c * (width // len(c))) pyzmq-16.0.2/buildutils/patch.py000066400000000000000000000031141301503633700166110ustar00rootroot00000000000000"""utils for patching libraries""" # Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. import re import sys import os import logging from .misc import get_output_error pjoin = os.path.join # LIB_PAT from delocate LIB_PAT = re.compile(r"\s*(.*) \(compatibility version (\d+\.\d+\.\d+), " r"current version (\d+\.\d+\.\d+)\)") def _get_libs(fname): rc, so, se = get_output_error(['otool', '-L', fname]) if rc: logging.error("otool -L %s failed: %r" % (fname, se)) return for line in so.splitlines()[1:]: m = LIB_PAT.match(line) if m: yield m.group(1) def _find_library(lib, path): """Find a library""" for d in path[::-1]: real_lib = os.path.join(d, lib) if os.path.exists(real_lib): return real_lib def _install_name_change(fname, lib, real_lib): rc, so, se = get_output_error(['install_name_tool', '-change', lib, real_lib, fname]) if rc: logging.error("Couldn't update load path: %s", se) def patch_lib_paths(fname, library_dirs): """Load any weakly-defined libraries from their real location (only on OS X) - Find libraries with `otool -L` - Update with `install_name_tool -change` """ if sys.platform != 'darwin': return libs = _get_libs(fname) for lib in libs: if not lib.startswith(('@', '/')): real_lib = _find_library(lib, library_dirs) if real_lib: _install_name_change(fname, lib, real_lib) __all__ = ['patch_lib_paths']pyzmq-16.0.2/buildutils/templates/000077500000000000000000000000001301503633700171375ustar00rootroot00000000000000pyzmq-16.0.2/buildutils/templates/constant_enums.pxi000066400000000000000000000001121301503633700227130ustar00rootroot00000000000000cdef extern from "zmq.h" nogil: enum: PYZMQ_DRAFT_API {ZMQ_ENUMS} pyzmq-16.0.2/buildutils/templates/constants.pxi000066400000000000000000000007001301503633700216720ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Python module level constants #----------------------------------------------------------------------------- DRAFT_API = PYZMQ_DRAFT_API {ASSIGNMENTS} #----------------------------------------------------------------------------- # Symbols to export #----------------------------------------------------------------------------- __all__ = [ "DRAFT_API", {ALL} ] pyzmq-16.0.2/buildutils/templates/zmq_constants.h000066400000000000000000000003261301503633700222140ustar00rootroot00000000000000#ifndef _PYZMQ_CONSTANT_DEFS #define _PYZMQ_CONSTANT_DEFS #ifdef ZMQ_BUILD_DRAFT_API #define PYZMQ_DRAFT_API 1 #else #define PYZMQ_DRAFT_API 0 #endif {ZMQ_IFNDEFS} #endif // ifndef _PYZMQ_CONSTANT_DEFS pyzmq-16.0.2/buildutils/vers.c000066400000000000000000000003631301503633700162660ustar00rootroot00000000000000// check libzmq version #include #include "zmq.h" int main(int argc, char **argv){ int major, minor, patch; zmq_version(&major, &minor, &patch); fprintf(stdout, "vers: %d.%d.%d\n", major, minor, patch); return 0; } pyzmq-16.0.2/docs/000077500000000000000000000000001301503633700137115ustar00rootroot00000000000000pyzmq-16.0.2/docs/Makefile000066400000000000000000000070601301503633700153540ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build SRCDIR = source # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SRCDIR) .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest default: html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* -rm -rf $(SRCDIR)/api/generated/* -rm -rf dist -rm -rf gh-pages api: $(SRCDIR)/api/generated/gen.rst $(SRCDIR)/api/generated/gen.rst: python autogen_api.py @echo "Build API docs finished." dist: all mkdir -p dist rm -rf dist/* ln $(BUILDDIR)/latex/PyZMQ.pdf dist/ cp -a $(BUILDDIR)/html dist/ @echo "Build finished. Final docs are in dist/" pdf: latex cd $(BUILDDIR)/latex && make all-pdf all: html pdf html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyZMQ.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyZMQ.qhc" latex: api $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." gh-pages: html sh update_ghpages.sh pyzmq-16.0.2/docs/autogen_api.py000077500000000000000000000026611301503633700165660ustar00rootroot00000000000000#!/usr/bin/env python """Script to auto-generate our API docs. """ # stdlib imports import os import sys # local imports sys.path.append(os.path.abspath('sphinxext')) # import sphinx_cython from apigen import ApiDocWriter #***************************************************************************** if __name__ == '__main__': pjoin = os.path.join package = 'zmq' outdir = pjoin('source','api','generated') docwriter = ApiDocWriter(package,rst_extension='.rst') # You have to escape the . here because . is a special char for regexps. # You must do make clean if you change this! docwriter.package_skip_patterns += [ r'\.tests$', r'\.backend$', r'\.auth$', r'\.eventloop\.minitornado$', r'\.green\.eventloop$', r'\.sugar$', r'\.devices$', ] docwriter.module_skip_patterns += [ r'\.eventloop\.stack_context$', r'\.eventloop\.future$', r'\.error$', r'\.green\..+$', r'\.utils\.initthreads$', r'\.utils\.constant_names$', r'\.utils\.garbage$', r'\.utils\.rebuffer$', r'\.utils\.strtypes$', r'\.zmq$', ] # Now, generate the outputs docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'gen', relative_to = pjoin('source','api') ) print('%d files written' % len(docwriter.written_modules)) pyzmq-16.0.2/docs/requirements.txt000066400000000000000000000001361301503633700171750ustar00rootroot00000000000000cython>=0.20 sphinx>=1.3 https://pypi.python.org/packages/source/g/gevent/gevent-1.1b6.tar.gz pyzmq-16.0.2/docs/source/000077500000000000000000000000001301503633700152115ustar00rootroot00000000000000pyzmq-16.0.2/docs/source/_static/000077500000000000000000000000001301503633700166375ustar00rootroot00000000000000pyzmq-16.0.2/docs/source/_static/default.css000066400000000000000000000213301301503633700207740ustar00rootroot00000000000000/** * Alternate Sphinx design * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl. */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-size: 14px; letter-spacing: -0.01em; line-height: 150%; text-align: center; /*background-color: #AFC1C4; */ background-color: #BFD1D4; color: black; padding: 0; border: 1px solid #aaa; margin: 0px 80px 0px 80px; min-width: 740px; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } pre { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } td.linenos pre { padding: 0.5em 0; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } hr { border: 1px solid #abc; margin: 2em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; border: 0; } tt.descclassname { background-color: transparent; border: 0; } tt.xref { background-color: transparent; font-weight: bold; border: 0; } a tt { background-color: transparent; font-weight: bold; border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } dl { margin-bottom: 15px; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .refcount { color: #060; } dt:target, .highlight { background-color: #fbe54e; } dl.class, dl.function { border-top: 2px solid #888; } dl.method, dl.attribute { border-top: 1px solid #aaa; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } pre { line-height: 120%; } pre a { color: inherit; text-decoration: underline; } .first { margin-top: 0 !important; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } /* div.documentwrapper { width: 100%; } */ div.clearer { clear: both; } div.related h3 { display: none; } div.related ul { background-image: url(navigation.png); height: 2em; list-style: none; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 0; padding-left: 10px; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; } div.related ul li a:hover { color: #3CA8E7; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.bodywrapper { margin: 0 240px 0 0; border-right: 1px solid #ccc; } div.body a { text-decoration: underline; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: 210px; float: right; text-align: left; /* margin-left: -100%; */ } div.sphinxsidebar h4, div.sphinxsidebar h3 { margin: 1em 0 0.5em 0; font-size: 0.9em; padding: 0.1em 0 0.1em 0.5em; color: white; border: 1px solid #86989B; background-color: #AFC1C4; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; list-style: none; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { list-style: square; margin-left: 20px; } p { margin: 0.8em 0 0.5em 0; } p.rubric { font-weight: bold; } h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; color: #11557C; } h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.2em; } h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.footer { background-color: #E3EFF1; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } div.pagination { margin-top: 2em; padding-top: 0.5em; border-top: 1px solid black; text-align: center; } div.sphinxsidebar ul.toc { margin: 1em 0 1em 0; padding: 0 0 0 0.5em; list-style: none; } div.sphinxsidebar ul.toc li { margin: 0.5em 0 0.5em 0; font-size: 0.9em; line-height: 130%; } div.sphinxsidebar ul.toc li p { margin: 0; padding: 0; } div.sphinxsidebar ul.toc ul { margin: 0.2em 0 0.2em 0; padding: 0 0 0 1.8em; } div.sphinxsidebar ul.toc ul li { padding: 0; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 0 0; border: 1px solid #86989B; background-color: #f7f7f7; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #ccc; color: white!important; } table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } img.inheritance { border: 0px } form.pfform { margin: 10px 0 20px 0; } table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } /* from numpy:*/ td.field-body > blockquote { margin-top: 0.1em; margin-bottom: 0.5em; } /* spacing in see also definition lists */ dl.last > dd { margin-top: 1px; margin-bottom: 5px; margin-left: 30px; } .field-list th { color: rgb(0,102,204); background: #eee; } table.field-list th { /* border-left: 1px solid #aaa !important;*/ padding-left: 5px; white-space: nowrap; } table.field-list { border-collapse: separate; border-spacing: 10px; } th.field-name { /* border-left: 1px solid #aaa !important;*/ padding-left: .5em; padding-right: .5em; text-align: right; } .rubric { color: rgb(0,50,150); background-color: ; font-style: italic; } .versionadded { font-style: italic; background: #ffe; }pyzmq-16.0.2/docs/source/_static/logo.png000066400000000000000000000207561301503633700203170ustar00rootroot00000000000000PNG  IHDR<?0tEXtSoftwareAdobe ImageReadyqe<fiTXtXML:com.adobe.xmp 7He%IDATx]|ToIv )h*RD.E|R`C1R>BФ@Hmw{7Iv޻{{6sN3gΜsF²,7 ]\TRz"##6  AFx 4ܼWs )W뀖Qh]MB*$spO?| .A3auѴzuBirlڀC]}GKOG#R8~Iq22Sl=L~OݟD.lԓYB.Sz^[ygood--(Ut (]ޝS[p1Gﰩ笕ΰhCr$'۳w9zڒ:;OHva2GFA2d4"{NrzѩbUqTUIp_J#RI|<]xlMk{‡oc`@h_:I*xeضώֹӧ;u}#i7?Axdw)OGZ1:a:qqR?y'Hˑ\y{_5Kb?L((hv60VWZC^kV EB_AϮ{~JdaAr|O?/xA>cЎ EX R'Oa <^ K{9) ZBlmR[,K8¹c8rIHޏOۄw':p-`MfF3X= ֍9`T7J,s Ғ-[mzf̙HHpvT֮Ìaİ8AYg`tv%w|VdLW`Taj^I`ǹ /wT.>IL22iu\) #Y-'kSUj4$(!Q8+4uO0f$2ʕ" ɽ{h|(㜲$#>)UqG}֬2ζ+e>M&-"c{3aY]Ţ;`L }m~MjjK ɦ=qjI%YVq*Cڵq=wWQ{O%poSԂ{afՁ'-m{1< Py=K&Te3X|W{㵹#gϦ]/o<)q/M!>vK.$1x0WD"JZw: ܹv~^L,Qȥ&X}ƪ W+a.jJI5Y JPv*ܻ\+a}$U!s%8uX1>;Fk<@]u酑 Ҙ1h֜#υK7YR XZ词lwLAWEt"<*"+k{z0%/`:c9!8( AvwΜofO'c|ҁKйJw4Pa.ž#?c./tۓcqh 5M%jIBur9[x-qLd@oJOa,wiD m0U)\Lt 6Yc o{l ^2&sqӨa٦ІH^F.y4iQך䱧ANhő w[6k!_fBcQc=AvJ~{I,k3_5٨KWr{4NxM0?ַl`=" (U#}&Ef~>?rs 2QlQObD9̭{jVFab[{Z̰9ԈRKúD#ohi6Ϋ,F߿Ps'[)7oEQ;2h.pކ1h2>l5IY|eh5-Xڅ Erd($L +"= c qYMBhJZSL4%*Y?wúǠW ڠ拹4٧M;w|;]4,^%ؾmxsbp r_{/y yV]Oa5,,0@gw/ƏǢEt8ޔ#$h[2<2f{  UJOE^Aܠ*3!20m #-(Y `zSP:)bi'rHQK[Ԅ'*i#F `w))6*ڷǮxYII0S{4*>7} * 6O TJGi@/ؙo$R&cjeGp {"j`Q˄DsQHFfŸ b֨+iw l[j43~ʷXq7n4'`I$v[We8 >E Z>yJpX 1bj,\H=Gm 6n[šW+ RvmDViX{;4KWCPU(mXΚeuP]6tta.ّM l|4^E^pX0 0r<$k@_ cysZrTlrxx9R_c8XFd!ܝBommpbA K.nݡjz-TzPGfR3 keMu1{pwɞ T[Y^u֭\*j q"rl^KbψHtBM%%ĈPF]@RxoP̟@w+!.CF#lku@C\G-@©)|MpWQhp#Ʈ:^~.7uXM yRvӶ < iCȽ.roW"-j5Yv؉ؼOZ@SHZw{qÆ$C͹<RTau )ᮃ5U [WO X_av/)#Sh1E8uV&Udp_9b(x1wjV`ڎѼ-:B揀n&ʊ4b-ڌ#c$c-.]ڵ`$ۉmBΥ$4 ?ow7G SbMZV0x]$_W0]OzEgXOHK ̶hXJo;ш] Cro/~H¹*?!W;R§5""z%lbO!Nuxq>_<<>f4cGi|'W:vte2̙ͫ;mt[x Bg*XJcj]P"< |WNcn j("xpmɌ8vfR%~ea<*|Fj>.ֳzΝ) DC;I{$bi1mRE%4a#كJ|0 wVG9JԻ5sMkqHXqu$ޞ]&yj3]0s&G2,Z? a駸y-AŬY4Phԣ-MjH6<z94X˯-CMVV>}n m%.@dK{~$NuށRN=={<'dX4l,0I2\ %(ˮ]be EPէ7낖QPnJK%\U O]FMIENDB`pyzmq-16.0.2/docs/source/_static/zeromq.ico000066400000000000000000000021761301503633700206560ustar00rootroot00000000000000 h(  KN-1,1HJQSAA WZAA+.z{UW[] VV23suHJZZUX7;+/[[UW+/eeVWHKeeRU7;$%ee suMRee $% 37il EJPPKM-1,1HJp0 pyzmq-16.0.2/docs/source/_templates/000077500000000000000000000000001301503633700173465ustar00rootroot00000000000000pyzmq-16.0.2/docs/source/_templates/layout.html000066400000000000000000000012271301503633700215530ustar00rootroot00000000000000{% extends "!layout.html" %} {% block rootrellink %}
  • home
  • search
  • API »
  • {% endblock %} {% block relbar1 %}
    PyZMQ Documentation
    {{ super() }} {% endblock %} {# put the sidebar before the body #} {% block sidebar1 %}{{ sidebar() }}{% endblock %} {% block sidebar2 %}{% endblock %} pyzmq-16.0.2/docs/source/api/000077500000000000000000000000001301503633700157625ustar00rootroot00000000000000pyzmq-16.0.2/docs/source/api/index.rst000066400000000000000000000007021301503633700176220ustar00rootroot00000000000000.. _api-index: ################### The PyZMQ API ################### :Release: |release| :Date: |today| .. toctree:: zmq zmq.devices zmq.decorators zmq.green zmq.eventloop.ioloop zmq.eventloop.future zmq.asyncio zmq.eventloop.zmqstream zmq.auth zmq.auth.thread zmq.auth.ioloop zmq.log.handlers zmq.ssh.tunnel zmq.utils.jsonapi zmq.utils.monitor zmq.utils.z85 zmq.utils.win32 pyzmq-16.0.2/docs/source/api/zmq.asyncio.rst000066400000000000000000000040701301503633700207700ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! asyncio ======= Module: :mod:`zmq.asyncio` -------------------------- .. automodule:: zmq.asyncio .. currentmodule:: zmq.asyncio .. versionadded:: 15.0 As of 15.0, pyzmq now supports :mod:`asyncio`, via :mod:`zmq.asyncio`. When imported from this module, blocking methods such as :meth:`zmq.asyncio.Socket.recv_multipart`, :meth:`zmq.asyncio.Socket.poll`, and :meth:`zmq.asyncio.Poller.poll` return :class:`~.asyncio.Future` s. It also provides a :class:`zmq.asyncio.ZMQEventLoop`. .. sourcecode:: python import asyncio import zmq import zmq.asyncio ctx = zmq.asyncio.Context() loop = zmq.asyncio.ZMQEventLoop() asyncio.set_event_loop(loop) @asyncio.coroutine def recv_and_process(): sock = ctx.socket(zmq.PULL) sock.bind(url) msg = yield from sock.recv_multipart() # waits for msg to be ready reply = yield from async_process(msg) yield from sock.send_multipart(reply) loop.run_until_complete(recv_and_process()) Classes ------- :class:`ZMQEventLoop` ~~~~~~~~~~~~~~~~~~~~~ An asyncio event loop using zmq_poll for zmq socket support. .. autoclass:: ZMQEventLoop :class:`Context` ~~~~~~~~~~~~~~~~ Context class that creates Future-returning sockets. See :class:`zmq.Context` for more info. .. autoclass:: Context :noindex: :class:`Socket` ~~~~~~~~~~~~~~~ Socket subclass that returns :class:`asyncio.Future` s from blocking methods, for use in coroutines and async applications. .. seealso:: :class:`zmq.Socket` for the inherited API. .. autoclass:: Socket :noindex: .. automethod:: recv :noindex: .. automethod:: recv_multipart :noindex: .. automethod:: send :noindex: .. automethod:: send_multipart :noindex: .. automethod:: poll :noindex: :class:`Poller` ~~~~~~~~~~~~~~~ Poller subclass that returns :class:`asyncio.Future` s from poll, for use in coroutines and async applications. .. seealso:: :class:`zmq.Poller` for the inherited API. .. autoclass:: Poller :noindex: .. automethod:: poll :noindex: pyzmq-16.0.2/docs/source/api/zmq.auth.ioloop.rst000066400000000000000000000005341301503633700215650ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! auth.ioloop =========== Module: :mod:`auth.ioloop` -------------------------- .. automodule:: zmq.auth.ioloop :noindex: .. currentmodule:: zmq.auth.ioloop :class:`IOLoopAuthenticator` ---------------------------- .. autoclass:: IOLoopAuthenticator :members: :undoc-members: :inherited-members: pyzmq-16.0.2/docs/source/api/zmq.auth.rst000066400000000000000000000005641301503633700202700ustar00rootroot00000000000000auth ==== Module: :mod:`auth` ------------------- .. automodule:: zmq.auth .. currentmodule:: zmq.auth :class:`Authenticator` ---------------------- .. autoclass:: Authenticator :members: :undoc-members: :inherited-members: Functions --------- .. autofunction:: create_certificates .. autofunction:: load_certificate .. autofunction:: load_certificates pyzmq-16.0.2/docs/source/api/zmq.auth.thread.rst000066400000000000000000000005421301503633700215320ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! auth.thread =========== Module: :mod:`auth.thread` -------------------------- .. automodule:: zmq.auth.thread .. currentmodule:: zmq.auth.thread Classes ------- :class:`ThreadAuthenticator` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: ThreadAuthenticator :members: :undoc-members: :inherited-members: pyzmq-16.0.2/docs/source/api/zmq.decorators.rst000066400000000000000000000004031301503633700214640ustar00rootroot00000000000000decorators ========== Module: :mod:`zmq.decorators` ----------------------------- .. automodule:: zmq.decorators .. currentmodule:: zmq.decorators Decorators ---------- .. autofunction:: zmq.decorators.context .. autofunction:: zmq.decorators.socket pyzmq-16.0.2/docs/source/api/zmq.devices.rst000066400000000000000000000024111301503633700207420ustar00rootroot00000000000000devices ======= Functions --------- .. autofunction:: zmq.device :noindex: .. autofunction:: zmq.proxy :noindex: Module: :mod:`zmq.devices` -------------------------- .. automodule:: zmq.devices .. currentmodule:: zmq.devices Base Devices ------------ :class:`Device` *************** .. autoclass:: Device :members: :exclude-members: context_factory, run, run_device :class:`ThreadDevice` ********************* .. autoclass:: ThreadDevice :members: :class:`ProcessDevice` ********************** .. autoclass:: ProcessDevice :members: Proxy Devices ------------- :class:`Proxy` ******************** .. autoclass:: Proxy :members: bind_mon, connect_mon, setsockopt_mon :class:`ThreadProxy` ******************** .. autoclass:: ThreadProxy :members: :class:`ProcessProxy` ********************* .. autoclass:: ProcessProxy :members: MonitoredQueue Devices ---------------------- .. autofunction:: zmq.devices.monitored_queue :class:`MonitoredQueue` ***************************** .. autoclass:: MonitoredQueue :members: :class:`ThreadMonitoredQueue` ***************************** .. autoclass:: ThreadMonitoredQueue :members: :class:`ProcessMonitoredQueue` ****************************** .. autoclass:: ProcessMonitoredQueue :members: pyzmq-16.0.2/docs/source/api/zmq.eventloop.future.rst000066400000000000000000000036471301503633700226600ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! eventloop.future ================ Module: :mod:`eventloop.future` ------------------------------- .. automodule:: zmq.eventloop.future .. currentmodule:: zmq.eventloop.future .. versionadded:: 15.0 As of pyzmq 15, there is a new Socket subclass that returns Futures for recv methods, which can be found at :class:`zmq.eventloop.future.Socket`. You can create these sockets by instantiating a :class:`~zmq.eventloop.future.Context` from the same module. These sockets let you easily use zmq with tornado's coroutines. .. seealso:: :mod:`tornado:tornado.gen` .. sourcecode:: python from tornado import gen from zmq.eventloop.future import Context ctx = Context() @gen.coroutine def recv_and_process(): sock = ctx.socket(zmq.PULL) sock.bind(url) msg = yield sock.recv_multipart() # waits for msg to be ready reply = yield async_process(msg) yield sock.send_multipart(reply) Classes ------- :class:`Context` ~~~~~~~~~~~~~~~~ Context class that creates Future-returning sockets. See :class:`zmq.Context` for more info. .. autoclass:: Context :noindex: :class:`Socket` ~~~~~~~~~~~~~~~ Socket subclass that returns :class:`~tornado.concurrent.Future` s from blocking methods, for use in coroutines and async applications. .. seealso:: :class:`zmq.Socket` for the inherited API. .. autoclass:: Socket :noindex: .. automethod:: recv :noindex: .. automethod:: recv_multipart :noindex: .. automethod:: send :noindex: .. automethod:: send_multipart :noindex: .. automethod:: poll :noindex: :class:`Poller` ~~~~~~~~~~~~~~~ Poller subclass that returns :class:`~tornado.concurrent.Future` s from poll, for use in coroutines and async applications. .. seealso:: :class:`zmq.Poller` for the inherited API. .. autoclass:: Poller :noindex: .. automethod:: poll :noindex: pyzmq-16.0.2/docs/source/api/zmq.eventloop.ioloop.rst000066400000000000000000000012461301503633700226400ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! eventloop.ioloop ================ Module: :mod:`eventloop.ioloop` ------------------------------- .. automodule:: zmq.eventloop.ioloop .. currentmodule:: zmq.eventloop.ioloop Classes ------- :class:`DelayedCallback` ~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: DelayedCallback :members: :undoc-members: :inherited-members: :class:`ZMQIOLoop` ~~~~~~~~~~~~~~~~~~ .. autoclass:: ZMQIOLoop :members: :undoc-members: :inherited-members: :class:`ZMQPoller` ~~~~~~~~~~~~~~~~~~ .. autoclass:: ZMQPoller :members: :undoc-members: :inherited-members: Function -------- .. autofunction:: zmq.eventloop.ioloop.install pyzmq-16.0.2/docs/source/api/zmq.eventloop.zmqstream.rst000066400000000000000000000005421301503633700233600ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! eventloop.zmqstream =================== Module: :mod:`eventloop.zmqstream` ---------------------------------- .. automodule:: zmq.eventloop.zmqstream .. currentmodule:: zmq.eventloop.zmqstream :class:`ZMQStream` ------------------ .. autoclass:: ZMQStream :members: :undoc-members: :inherited-members: pyzmq-16.0.2/docs/source/api/zmq.green.rst000066400000000000000000000001221301503633700204150ustar00rootroot00000000000000green ===== Module: :mod:`green` -------------------- .. automodule:: zmq.green pyzmq-16.0.2/docs/source/api/zmq.log.handlers.rst000066400000000000000000000007111301503633700217010ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! log.handlers ============ Module: :mod:`log.handlers` --------------------------- .. automodule:: zmq.log.handlers .. currentmodule:: zmq.log.handlers Classes ------- :class:`PUBHandler` ~~~~~~~~~~~~~~~~~~~ .. autoclass:: PUBHandler :members: :undoc-members: :inherited-members: :class:`TopicLogger` ~~~~~~~~~~~~~~~~~~~~ .. autoclass:: TopicLogger :members: :undoc-members: :inherited-members: pyzmq-16.0.2/docs/source/api/zmq.rst000066400000000000000000000040721301503633700173260ustar00rootroot00000000000000zmq === .. automodule:: zmq .. currentmodule:: zmq Basic Classes ------------- :class:`Context` **************** .. autoclass:: Context :members: :inherited-members: :exclude-members: sockopts, closed, __del__, __enter__, __exit__, __copy__, __deepcopy__, __delattr__, __getattr__, __setattr__, .. attribute:: closed boolean - whether the context has been terminated. If True, you can no longer use this Context. :class:`Socket` *************** .. autoclass:: Socket :members: :inherited-members: :exclude-members: closed, context, getsockopt_unicode, recv_unicode, setsockopt_unicode, send_unicode, __del__, __enter__, __exit__, __copy__, __deepcopy__, __delattr__, __getattr__, __setattr__, .. attribute:: closed boolean - whether the socket has been closed. If True, you can no longer use this Socket. :class:`Frame` ************** .. autoclass:: Frame :members: :inherited-members: :class:`MessageTracker` *********************** .. autoclass:: MessageTracker :members: :inherited-members: Polling ------- :class:`Poller` *************** .. autoclass:: Poller :members: :inherited-members: .. autofunction:: zmq.select Exceptions ---------- :class:`ZMQError` ***************** .. autoclass:: ZMQError :members: :inherited-members: :class:`ZMQVersionError` ************************ .. autoclass:: ZMQVersionError :members: :inherited-members: :class:`Again` ************** .. autoclass:: Again :class:`ContextTerminated` ************************** .. autoclass:: ContextTerminated :class:`NotDone` **************** .. autoclass:: NotDone :class:`ZMQBindError` ********************* .. autoclass:: ZMQBindError Functions --------- .. autofunction:: zmq.zmq_version .. autofunction:: zmq.pyzmq_version .. autofunction:: zmq.zmq_version_info .. autofunction:: zmq.pyzmq_version_info .. autofunction:: zmq.has .. autofunction:: zmq.device .. autofunction:: zmq.proxy .. autofunction:: zmq.curve_keypair .. autofunction:: zmq.get_includes .. autofunction:: zmq.get_library_dirs pyzmq-16.0.2/docs/source/api/zmq.ssh.tunnel.rst000066400000000000000000000010031301503633700214150ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! ssh.tunnel ========== Module: :mod:`ssh.tunnel` ------------------------- .. automodule:: zmq.ssh.tunnel .. currentmodule:: zmq.ssh.tunnel Functions --------- .. autofunction:: zmq.ssh.tunnel.open_tunnel .. autofunction:: zmq.ssh.tunnel.openssh_tunnel .. autofunction:: zmq.ssh.tunnel.paramiko_tunnel .. autofunction:: zmq.ssh.tunnel.select_random_ports .. autofunction:: zmq.ssh.tunnel.try_passwordless_ssh .. autofunction:: zmq.ssh.tunnel.tunnel_connection pyzmq-16.0.2/docs/source/api/zmq.utils.jsonapi.rst000066400000000000000000000004651301503633700221310ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! utils.jsonapi ============= Module: :mod:`utils.jsonapi` ---------------------------- .. automodule:: zmq.utils.jsonapi .. currentmodule:: zmq.utils.jsonapi Functions --------- .. autofunction:: zmq.utils.jsonapi.dumps .. autofunction:: zmq.utils.jsonapi.loads pyzmq-16.0.2/docs/source/api/zmq.utils.monitor.rst000066400000000000000000000005241301503633700221510ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! utils.monitor ============= Module: :mod:`utils.monitor` ---------------------------- .. automodule:: zmq.utils.monitor .. currentmodule:: zmq.utils.monitor Functions --------- .. autofunction:: zmq.utils.monitor.parse_monitor_message .. autofunction:: zmq.utils.monitor.recv_monitor_message pyzmq-16.0.2/docs/source/api/zmq.utils.win32.rst000066400000000000000000000003641301503633700214260ustar00rootroot00000000000000utils.win32 =========== Module: :mod:`zmq.utils.win32` ------------------------------ .. automodule:: zmq.utils.win32 .. currentmodule:: zmq.utils.win32 :class:`allow_interrupt` ------------------------ .. autoclass:: allow_interrupt pyzmq-16.0.2/docs/source/api/zmq.utils.z85.rst000066400000000000000000000004271301503633700211120ustar00rootroot00000000000000.. AUTO-GENERATED FILE -- DO NOT EDIT! utils.z85 ========= Module: :mod:`utils.z85` ------------------------ .. automodule:: zmq.utils.z85 .. currentmodule:: zmq.utils.z85 Functions --------- .. autofunction:: zmq.utils.z85.decode .. autofunction:: zmq.utils.z85.encode pyzmq-16.0.2/docs/source/changelog.rst000066400000000000000000000514121301503633700176750ustar00rootroot00000000000000.. PyZMQ changelog summary, started by Min Ragan-Kelley, 2011 .. _changelog: ================ Changes in PyZMQ ================ This is a coarse summary of changes in pyzmq versions. For a full changelog, consult the `git log `_. 16.0.2 ====== - Workaround bug in libzmq-4.2.0 causing EINVAL on poll. 16.0.1 ====== - Fix erroneous EAGAIN that could happen on async sockets - Bundle libzmq 4.1.6 16.0 ==== - Support for Python 2.6 and Python 3.2 is dropped. For old Pythons, use :command:`pip install "pyzmq<16"` to get the last version of pyzmq that supports these versions. - Include zmq.h - Deprecate ``zmq.Stopwatch``. Native Python timing tools can be used instead. - Better support for using pyzmq as a Cython library - bundle zmq.h when pyzmq bundles libzmq as an extension - add :func:`zmq.get_library_dirs` to find bundled libzmq - Updates to setup.py for Cython 0.25 compatibility - Various asyncio/future fixes: - support raw sockets in pollers - allow cancelling async sends - Fix :meth:`IOLoop.current` in :mod:`zmq.green` 15.4 ==== - Load bundled libzmq extension with import rather than CDLL, which should fix some manifest issues in certain cases on Windows. - Avoid installing asyncio sources on Python 2, which confuses some tools that run `python -m compileall`, which reports errors on the Python 3-only files. - Bundle msvcp.dll in Windows wheels on CPython 3.5, which should fix wheel compatibility systems without Visual C++ 2015 redistributable. - :meth:`zmq.Context.instance` is now threadsafe. - FIX: sync some behavior in zmq_poll and setting LINGER on close/destroy with the CFFI backend. - PERF: resolve send/recv immediately if events are available in async Sockets - Async Sockets (asyncio, tornado) now support ``send_json``, ``send_pyobj``, etc. - add preliminary support for ``zmq.DRAFT_API`` reflecting ZMQ_BUILD_DRAFT_API, which indicates whether new APIs in prereleases are available. 15.3 ==== - Bump bundled libzmq to 4.1.5, using tweetnacl for bundled curve support instead of libsodium - FIX: include .pxi includes in installation for consumers of Cython API - FIX: various fixes in new async sockets - Introduce :mod:`zmq.decorators` API for decorating functions to create sockets or contexts - Add :meth:`zmq.Socket.subscribe` and :meth:`zmq.Socket.unsubscribe` methods to sockets, so that assignment is no longer needed for subscribing. Verbs should be methods! Assignment is still supported for backward-compatibility. - Accept text (unicode) input to z85 encoding, not just bytes - :meth:`zmq.Context.socket` forwards keyword arguments to the :class:`Socket` constructor 15.2 ==== - FIX: handle multiple events in a single register call in :mod:`zmq.asyncio` - FIX: unicode/bytes bug in password prompt in :mod:`zmq.ssh` on Python 3 - FIX: workaround gevent monkeypatches in garbage collection thread - update bundled minitornado from tornado-4.3. - improved inspection by setting ``binding=True`` in cython compile options - add asyncio Authenticator implementation in :mod:`zmq.auth.asyncio` - workaround overflow bug in libzmq preventing receiving messages larger than ``MAX_INT`` 15.1 ==== - FIX: Remove inadvertant tornado dependency when using :mod:`zmq.asyncio` - FIX: 15.0 Python 3.5 wheels didn't work on Windows - Add GSSAPI support to Authenticators - Support new constants defined in upcoming libzmq-4.2.dev 15.0 ==== PyZMQ 15 adds Future-returning sockets and pollers for both :mod:`asyncio` and :mod:`tornado`. - add :mod:`asyncio` support via :mod:`zmq.asyncio` - add :mod:`tornado` future support via :mod:`zmq.eventloop.future` - trigger bundled libzmq if system libzmq is found to be < 3. System libzmq 2 can be forced by explicitly requesting ``--zmq=/prefix/``. 14.7.0 ====== Changes: - Update bundled libzmq to 4.1.2. - Following the `lead of Python 3.5 `_, interrupted system calls will be retried. Fixes: - Fixes for CFFI backend on Python 3 + support for PyPy 3. - Verify types of all frames in :meth:`~zmq.Socket.send_multipart` before sending, to avoid partial messages. - Fix build on Windows when both debug and release versions of libzmq are found. - Windows build fixes for Python 3.5. 14.6.0 ====== Changes: - improvements in :meth:`zmq.Socket.bind_to_random_port`: - use system to allocate ports by default - catch EACCES on Windows - include libsodium when building bundled libzmq on Windows (includes wheels on PyPI) - pyzmq no longer bundles external libzmq when making a bdist. You can use `delocate `_ to do this. Bugfixes: - add missing :attr:`ndim` on memoryviews of Frames - allow :func:`copy.copy` and :func:`copy.deepcopy` on Sockets, Contexts 14.5.0 ====== Changes: - use pickle.DEFAULT_PROTOCOL by default in send_pickle - with the release of pip-6, OS X wheels are only marked as 10.6-intel, indicating that they should be installable on any newer or single-arch Python. - raise SSHException on failed check of host key Bugfixes: - fix method name in utils.wi32.allow_interrupt - fork-related fixes in garbage collection thread - add missing import in ``zmq.__init__``, causing failure to import in some circumstances 14.4.1 ====== Bugfixes for 14.4 - SyntaxError on Python 2.6 in zmq.ssh - Handle possible bug in garbage collection after fork 14.4.0 ====== New features: - Experimental support for libzmq-4.1.0 rc (new constants, plus :func:`zmq.has`). - Update bundled libzmq to 4.0.5 - Update bundled libsodium to 1.0.0 - Fixes for SSH dialogs when using :mod:`zmq.ssh` to create tunnels - More build/link/load fixes on OS X and Solaris - Get Frame metadata via dict access (libzmq 4) - Contexts and Sockets are context managers (term/close on ``__exit__``) - Add :class:`zmq.utils.win32.allow_interrupt` context manager for catching SIGINT on Windows Bugs fixed: - Bundled libzmq should not trigger recompilation after install on PyPy 14.3.1 ====== .. note:: pyzmq-14.3.1 is the last version to include bdists for Python 3.3 Minor bugfixes to pyzmq 14.3: - Fixes to building bundled libzmq on OS X < 10.9 - Fixes to import-failure warnings on Python 3.4 - Fixes to tests - Pull upstream fixes to zmq.ssh for ssh multiplexing 14.3.0 ====== - PyZMQ no longer calls :meth:`.Socket.close` or :meth:`.Context.term` during process cleanup. Changes to garbage collection in Python 3.4 make this impossible to do sensibly. - :meth:`ZMQStream.close` closes its socket immediately, rather than scheduling a timeout. - Raise the original ImportError when importing zmq fails. Should be more informative than `no module cffi...`. .. warning:: Users of Python 3.4 should not use pyzmq < 14.3, due to changes in garbage collection. 14.2.0 ====== New Stuff --------- - Raise new ZMQVersionError when a requested method is not supported by the linked libzmq. For backward compatibility, this subclasses NotImplementedError. Bugs Fixed ---------- - Memory leak introduced in pyzmq-14.0 in zero copy. - OverflowError on 32 bit systems in zero copy. 14.1.0 ====== Security -------- The headline features for 14.1 are adding better support for libzmq's security features. - When libzmq is bundled as a Python extension (e.g. wheels, eggs), libsodium is also bundled (excluding Windows), ensuring that libzmq security is available to users who install from wheels - New :mod:`zmq.auth`, implementing zeromq's ZAP authentication, modeled on czmq zauth. For more information, see the `examples `_. Other New Stuff --------------- - Add PYZMQ_BACKEND for enabling use of backends outside the pyzmq codebase. - Add :attr:`~.Context.underlying` property and :meth:`~.Context.shadow` method to Context and Socket, for handing off sockets and contexts. between pyzmq and other bindings (mainly pyczmq_). - Add TOS, ROUTER_HANDOVER, and IPC_FILTER constants from libzmq-4.1-dev. - Add Context option support in the CFFI backend. - Various small unicode and build fixes, as always. - :meth:`~.Socket.send_json` and :meth:`~.Socket.recv_json` pass any extra kwargs to ``json.dumps/loads``. .. _pyczmq: https://github.com/zeromq/pyczmq Deprecations ------------ - ``Socket.socket_type`` is deprecated, in favor of ``Socket.type``, which has been available since 2.1. 14.0.1 ====== Bugfix release - Update bundled libzmq to current (4.0.3). - Fix bug in :meth:`.Context.destroy` with no open sockets. - Threadsafety fixes in the garbage collector. - Python 3 fixes in :mod:`zmq.ssh`. 14.0.0 ====== * Update bundled libzmq to current (4.0.1). * Backends are now implemented in ``zmq.backend`` instead of ``zmq.core``. This has no effect on public APIs. * Various build improvements for Cython and CFFI backends (PyPy compiles at build time). * Various GIL-related performance improvements - the GIL is no longer touched from a zmq IO thread. * Adding a constant should now be a bit easier - only zmq/sugar/constant_names should need updating, all other constant-related files should be automatically updated by ``setup.py constants``. * add support for latest libzmq-4.0.1 (includes ZMQ_CURVE security and socket event monitoring). New stuff --------- - :meth:`.Socket.monitor` - :meth:`.Socket.get_monitor_socket` - :func:`zmq.curve_keypair` - :mod:`zmq.utils.monitor` - :mod:`zmq.utils.z85` 13.1.0 ====== The main new feature is improved tornado 3 compatibility. PyZMQ ships a 'minitornado' submodule, which contains a small subset of tornado 3.0.1, in order to get the IOLoop base class. zmq.eventloop.ioloop.IOLoop is now a simple subclass, and if the system tornado is ≥ 3.0, then the zmq IOLoop is a proper registered subclass of the tornado one itself, and minitornado is entirely unused. 13.0.2 ====== Bugfix release! A few things were broken in 13.0.0, so this is a quick bugfix release. * **FIXED** EAGAIN was unconditionally turned into KeyboardInterrupt * **FIXED** we used totally deprecated ctypes_configure to generate constants in CFFI backend * **FIXED** memory leak in CFFI backend for PyPy * **FIXED** typo prevented IPC_PATH_MAX_LEN from ever being defined * **FIXED** various build fixes - linking with librt, Cython compatibility, etc. 13.0.1 ====== defunct bugfix. We do not speak of this... 13.0.0 ====== PyZMQ now officially targets libzmq-3 (3.2.2), 0MQ ≥ 2.1.4 is still supported for the indefinite future, but 3.x is recommended. PyZMQ has detached from libzmq versioning, and will just follow its own regular versioning scheme from now on. PyZMQ bdists will include whatever is the latest stable libzmq release (3.2.2 for pyzmq-13.0). .. note:: set/get methods are exposed via get/setattr on all Context, Socket, and Frame classes. This means that subclasses of these classes that require extra attributes **must declare these attributes at the class level**. Experiments Removed ------------------- * The Threadsafe ZMQStream experiment in 2.2.0.1 was deemed inappropriate and not useful, and has been removed. * The :mod:`zmq.web` experiment has been removed, to be developed as a `standalone project `_. New Stuff --------- * Support for PyPy via CFFI backend (requires py, ctypes-configure, and cffi). * Add support for new APIs in libzmq-3 - :meth:`.Socket.disconnect` - :meth:`.Socket.unbind` - :meth:`.Context.set` - :meth:`.Context.get` - :meth:`.Frame.set` - :meth:`.Frame.get` - :func:`zmq.proxy` - :class:`zmq.devices.Proxy` - Exceptions for common zmq errnos: :class:`zmq.Again`, :class:`zmq.ContextTerminated` (subclass :class:`ZMQError`, so fully backward-compatible). * Setting and getting :attr:`.Socket.hwm` sets or gets *both* SNDHWM/RCVHWM for libzmq-3. * Implementation splits core Cython bindings from pure-Python subclasses with sugar methods (send/recv_multipart). This should facilitate non-Cython backends and PyPy support [spoiler: it did!]. Bugs Fixed ---------- * Unicode fixes in log and monitored queue * MinGW, ppc, cross-compilation, and HP-UX build fixes * :mod:`zmq.green` should be complete - devices and tornado eventloop both work in gevent contexts. 2.2.0.1 ======= This is a tech-preview release, to try out some new features. It is expected to be short-lived, as there are likely to be issues to iron out, particularly with the new pip-install support. Experimental New Stuff ---------------------- These features are marked 'experimental', which means that their APIs are not set in stone, and may be removed or changed in incompatible ways in later releases. Threadsafe ZMQStream ******************** With the IOLoop inherited from tornado, there is exactly one method that is threadsafe: :meth:`.IOLoop.add_callback`. With this release, we are trying an experimental option to pass all IOLoop calls via this method, so that ZMQStreams can be used from one thread while the IOLoop runs in another. To try out a threadsafe stream: .. sourcecode:: python stream = ZMQStream(socket, threadsafe=True) pip install pyzmq ***************** PyZMQ should now be pip installable, even on systems without libzmq. In these cases, when pyzmq fails to find an appropriate libzmq to link against, it will try to build libzmq as a Python extension. This work is derived from `pyzmq_static `_. To this end, PyZMQ source distributions include the sources for libzmq (2.2.0) and libuuid (2.21), both used under the LGPL. zmq.green ********* The excellent `gevent_zeromq `_ socket subclass which provides `gevent `_ compatibility has been merged as :mod:`zmq.green`. .. seealso:: :ref:`zmq_green` Bugs Fixed ---------- * TIMEO sockopts are properly included for libzmq-2.2.0 * avoid garbage collection of sockets after fork (would cause ``assert (mailbox.cpp:79)``). 2.2.0 ===== Some effort has gone into refining the pyzmq API in this release to make it a model for other language bindings. This is principally made in a few renames of objects and methods, all of which leave the old name for backwards compatibility. .. note:: As of this release, all code outside ``zmq.core`` is BSD licensed (where possible), to allow more permissive use of less-critical code and utilities. Name Changes ------------ * The :class:`~.Message` class has been renamed to :class:`~.Frame`, to better match other zmq bindings. The old Message name remains for backwards-compatibility. Wherever pyzmq docs say "Message", they should refer to a complete zmq atom of communication (one or more Frames, connected by ZMQ_SNDMORE). Please report any remaining instances of Message==MessagePart with an Issue (or better yet a Pull Request). * All ``foo_unicode`` methods are now called ``foo_string`` (``_unicode`` remains for backwards compatibility). This is not only for cross-language consistency, but it makes more sense in Python 3, where native strings are unicode, and the ``_unicode`` suffix was wedded too much to Python 2. Other Changes and Removals -------------------------- * ``prefix`` removed as an unused keyword argument from :meth:`~.Socket.send_multipart`. * ZMQStream :meth:`~.ZMQStream.send` default has been changed to `copy=True`, so it matches Socket :meth:`~.Socket.send`. * ZMQStream :meth:`~.ZMQStream.on_err` is deprecated, because it never did anything. * Python 2.5 compatibility has been dropped, and some code has been cleaned up to reflect no-longer-needed hacks. * Some Cython files in :mod:`zmq.core` have been split, to reduce the amount of Cython-compiled code. Much of the body of these files were pure Python, and thus did not benefit from the increased compile time. This change also aims to ease maintaining feature parity in other projects, such as `pyzmq-ctypes `_. New Stuff --------- * :class:`~.Context` objects can now set default options when they create a socket. These are set and accessed as attributes to the context. Socket options that do not apply to a socket (e.g. SUBSCRIBE on non-SUB sockets) will simply be ignored. * :meth:`~.ZMQStream.on_recv_stream` has been added, which adds the stream itself as a second argument to the callback, making it easier to use a single callback on multiple streams. * A :attr:`~Frame.more` boolean attribute has been added to the :class:`~.Frame` (née Message) class, so that frames can be identified as terminal without extra queires of :attr:`~.Socket.rcvmore`. Experimental New Stuff ---------------------- These features are marked 'experimental', which means that their APIs are not set in stone, and may be removed or changed in incompatible ways in later releases. * :mod:`zmq.web` added for load-balancing requests in a tornado webapp with zeromq. 2.1.11 ====== * remove support for LABEL prefixes. A major feature of libzmq-3.0, the LABEL prefix, has been removed from libzmq, prior to the first stable libzmq 3.x release. * The prefix argument to :meth:`~.Socket.send_multipart` remains, but it continue to behave in exactly the same way as it always has on 2.1.x, simply prepending message parts. * :meth:`~.Socket.recv_multipart` will always return a list, because prefixes are once again indistinguishable from regular message parts. * add :meth:`.Socket.poll` method, for simple polling of events on a single socket. * no longer require monkeypatching tornado IOLoop. The :class:`.ioloop.ZMQPoller` class is a poller implementation that matches tornado's expectations, and pyzmq sockets can be used with any tornado application just by specifying the use of this poller. The pyzmq IOLoop implementation now only trivially differs from tornado's. It is still recommended to use :func:`.ioloop.install`, which sets *both* the zmq and tornado global IOLoop instances to the same object, but it is no longer necessary. .. warning:: The most important part of this change is that the ``IOLoop.READ/WRITE/ERROR`` constants now match tornado's, rather than being mapped directly to the zmq ``POLLIN/OUT/ERR``. So applications that used the low-level :meth:`IOLoop.add_handler` code with ``POLLIN/OUT/ERR`` directly (used to work, but was incorrect), rather than using the IOLoop class constants will no longer work. Fixing these to use the IOLoop constants should be insensitive to the actual value of the constants. 2.1.10 ====== * Add support for libzmq-3.0 LABEL prefixes: .. warning:: This feature has been removed from libzmq, and thus removed from future pyzmq as well. * send a message with label-prefix with: .. sourcecode:: python send_multipart([b'msg', b'parts'], prefix=[b'label', b'prefix']) * :meth:`recv_multipart` returns a tuple of ``(prefix,msg)`` if a label prefix is detected * ZMQStreams and devices also respect the LABEL prefix * add czmq-style close&term as :meth:`ctx.destroy`, so that :meth:`ctx.term` remains threadsafe and 1:1 with libzmq. * :meth:`Socket.close` takes optional linger option, for setting linger prior to closing. * add :func:`~zmq.core.version.zmq_version_info` and :func:`~zmq.core.version.pyzmq_version_info` for getting libzmq and pyzmq versions as tuples of numbers. This helps with the fact that version string comparison breaks down once versions get into double-digits. * ioloop changes merged from upstream `Tornado `_ 2.1 2.1.9 ===== * added zmq.ssh tools for tunneling socket connections, copied from IPython * Expanded sockopt support to cover changes in libzmq-4.0 dev. * Fixed an issue that prevented :exc:`KeyboardInterrupts` from being catchable. * Added attribute-access for set/getsockopt. Setting/Getting attributes of :class:`Sockets` with the names of socket options is mapped to calls of set/getsockopt. .. sourcecode:: python s.hwm = 10 s.identity = b'whoda' s.linger # -1 * Terminating a :class:`~Context` closes the sockets it created, matching the behavior in `czmq `_. * :class:`ThreadDevices` use :meth:`Context.instance` to create sockets, so they can use inproc connections to sockets in other threads. * fixed units error on :func:`zmq.select`, where the poll timeout was 1000 times longer than expected. * Add missing ``DEALER/ROUTER`` socket type names (currently aliases, to be replacements for ``XREP/XREQ``). * base libzmq dependency raised to 2.1.4 (first stable release) from 2.1.0. 2.1.7.1 ======= * bdist for 64b Windows only. This fixed a type mismatch on the ``ZMQ_FD`` sockopt that only affected that platform. 2.1.7 ===== * Added experimental support for libzmq-3.0 API * Add :func:`zmq.eventloop.ioloop.install` for using pyzmq's IOLoop in a tornado application. 2.1.4 ===== * First version with binary distribution support * Added :meth:`~Context.instance()` method for using a single Context throughout an application without passing references around. pyzmq-16.0.2/docs/source/conf.py000066400000000000000000000160301301503633700165100ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # PyZMQ documentation build configuration file, created by # sphinx-quickstart on Sat Feb 20 23:31:19 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os import string # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../sphinxext')) sys.path.append(os.path.abspath('../..')) # set target libzmq version from buildutils.bundle import bundled_version target_libzmq = '%i.%i.%i' % bundled_version rst_epilog = """ .. |target_libzmq| replace:: {target_libzmq} """.format(**locals()) # patch autodoc to work with Cython Sources import sphinx_cython # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyZMQ' copyright = u"""Brian E. Granger & Min Ragan-Kelley. ØMQ logo © iMatix Corportation, used under the Creative Commons Attribution-Share Alike 3.0 License. Python logo ™ of the Python Software Foundation, used by Min RK with permission from the Foundation""" intersphinx_mapping = { 'python': ('http://docs.python.org/3', None), 'http://www.tornadoweb.org/en/stable': None } # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. import zmq # The short X.Y version. version = zmq.__version__.split('-')[0] # The full version, including alpha/beta/rc tags. release = zmq.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/zeromq.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'PyZMQdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'PyZMQ.tex', u'PyZMQ Documentation', u'Brian E. Granger \\and Min Ragan-Kelley', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True pyzmq-16.0.2/docs/source/devices.rst000066400000000000000000000076721301503633700174010ustar00rootroot00000000000000.. PyZMQ devices doc, by Min Ragan-Kelley, 2011 .. _devices: Devices in PyZMQ ================ .. seealso:: ØMQ Guide `Device coverage `_. ØMQ has a notion of Devices - simple programs that manage a send-recv pattern for connecting two or more sockets. Being full programs, devices include a ``while(True)`` loop and thus block execution permanently once invoked. We have provided in the :mod:`devices` subpackage some facilities for running these devices in the background, as well as a custom three-socket MonitoredQueue_ device. BackgroundDevices ----------------- It seems fairly rare that in a Python program one would actually want to create a zmq device via :func:`.device` in the main thread, since such a call would block execution forever. The most likely model for launching devices is in background threads or processes. We have provided classes for launching devices in a background thread with :class:`.ThreadDevice` and via multiprocessing with :class:`.ProcessDevice`. For threadsafety and running across processes, these methods do not take Socket objects as arguments, but rather socket types, and then the socket creation and configuration happens via the BackgroundDevice's :meth:`foo_in` proxy methods. For each configuration method (bind/connect/setsockopt), there are proxy methods for calling those methods on the Socket objects created in the background thread or process, prefixed with 'in\_' or 'out\_', corresponding to the `in_socket` and `out_socket`:: from zmq.devices import ProcessDevice pd = ProcessDevice(zmq.QUEUE, zmq.ROUTER, zmq.DEALER) pd.bind_in('tcp://*:12345') pd.connect_out('tcp://127.0.0.1:12543') pd.setsockopt_in(zmq.IDENTITY, 'ROUTER') pd.setsockopt_out(zmq.IDENTITY, 'DEALER') pd.start() # it will now be running in a background process MonitoredQueue -------------- One of ØMQ's builtin devices is the ``QUEUE``. This is a symmetric two-socket device that fully supports passing messages in either direction via any pattern. We saw a logical extension of the ``QUEUE`` as one that behaves in the same way with respect to the in/out sockets, but also sends every message in either direction *also* on a third `monitor` socket. For performance reasons, this :func:`.monitored_queue` function is written in Cython, so the loop does not involve Python, and should have the same performance as the basic ``QUEUE`` device. One shortcoming of the ``QUEUE`` device is that it does not support having ``ROUTER`` sockets as both input and output. This is because ``ROUTER`` sockets, when they receive a message, prepend the ``IDENTITY`` of the socket that sent the message (for use in routing the reply). The result is that the output socket will always try to route the incoming message back to the original sender, which is presumably not the intended pattern. In order for the queue to support a ROUTER-ROUTER connection, it must swap the first two parts of the message in order to get the right message out the other side. To invoke a monitored queue is similar to invoking a regular ØMQ device:: from zmq.devices import monitored_queue ins = ctx.socket(zmq.ROUTER) outs = ctx.socket(zmq.DEALER) mons = ctx.socket(zmq.PUB) configure_sockets(ins,outs,mons) monitored_queue(ins, outs, mons, in_prefix='in', out_prefix='out') The `in_prefix` and `out_prefix` default to 'in' and 'out' respectively, and a PUB socket is most logical for the monitor socket, since it will never receive messages, and the in/out prefix is well suited to the PUB/SUB topic subscription model. All messages sent on `mons` will be multipart, the first part being the prefix corresponding to the socket that received the message. Or for launching an MQ in the background, there are :class:`.ThreadMonitoredQueue` and :class:`.ProcessMonitoredQueue`, which function just like the base BackgroundDevice objects, but add :meth:`foo_mon` methods for configuring the monitor socket. pyzmq-16.0.2/docs/source/eventloop.rst000066400000000000000000000200621301503633700177560ustar00rootroot00000000000000.. PyZMQ eventloop doc, by Min Ragan-Kelley, 2011 .. _eventloop: ==================== Eventloops and PyZMQ ==================== Integrating zmq with eventloops is *almost* really easy, since most eventloops happily support sockets. What gets messy is that zmq sockets aren't regular sockets, so they need special handling. libzmq provides a :func:`zmq_poll` function that is the same as regular polling, but **also** support zmq sockets. PyZMQ wrapps this in a :class:`~.Poller` class. Most of pyzmq's eventloop support involves setting up existing eventloops (tornado, asyncio) to use :func:`zmq_poll` as the inner poller, rather than the default select/poll/etc. Once that's done, zmq sockets can be happily treated like regular sockets, and regular sockets should continue to work as before. .. note:: It *is* possible to integrate zmq sockets into existing eventloops without modifying the poller by using the ``socket.FD`` attribute. The incredibly unfortunate aspect of this is that it was implemented as an edge-triggered fd, which is highly error prone, and I wouldn't recommend using unless absolutely necessary. This is used in :mod:`zmq.green`, and has been the source of many problems. Tornado IOLoop ============== Facebook's `Tornado`_ includes an eventloop for handing poll events on filedescriptors and native sockets. We have included a small part of Tornado (specifically its :mod:`.ioloop`), and adapted its :class:`IOStream` class into :class:`.ZMQStream` for handling poll events on ØMQ sockets. A ZMQStream object works much like a Socket object, but instead of calling :meth:`~.Socket.recv` directly, you register a callback with :meth:`~.ZMQStream.on_recv`. Callbacks can also be registered for send events with :meth:`~.ZMQStream.on_send`. :func:`install()` ----------------- With PyZMQ's ioloop, you can use zmq sockets in any tornado application. You can tell tornado to use zmq's poller by calling the :func:`.ioloop.install` function: .. sourcecode:: python from zmq.eventloop import ioloop ioloop.install() You can also do the same thing by requesting the global instance from pyzmq: .. sourcecode:: python from zmq.eventloop.ioloop import IOLoop loop = IOLoop.current() This configures tornado's :class:`tornado.ioloop.IOLoop` to use zmq's poller, and registers the current instance. Either ``install()`` or retrieving the zmq instance must be done before the global * instance is registered, else there will be a conflict. It is possible to use PyZMQ sockets with tornado *without* registering as the global instance, but it is less convenient. First, you must instruct the tornado IOLoop to use the zmq poller: .. sourcecode:: python from zmq.eventloop.ioloop import ZMQIOLoop loop = ZMQIOLoop() Then, when you instantiate tornado and ZMQStream objects, you must pass the `io_loop` argument to ensure that they use this loop, instead of the global instance. This is especially useful for writing tests, such as this: .. sourcecode:: python from tornado.testing import AsyncTestCase from zmq.eventloop.ioloop import ZMQIOLoop from zmq.eventloop.zmqstream import ZMQStream class TestZMQBridge(AsyncTestCase): # Use a ZMQ-compatible I/O loop so that we can use `ZMQStream`. def get_new_ioloop(self): return ZMQIOLoop() You can also manually install this IOLoop as the global tornado instance, with: .. sourcecode:: python from zmq.eventloop.ioloop import ZMQIOLoop loop = ZMQIOLoop() loop.install() .. _futures: Futures and coroutines ---------------------- PyZMQ 15 adds :mod:`zmq.eventloop.future`, containing a Socket subclass that returns :class:`~.tornado.concurrent.Future` objects for use in :mod:`tornado` coroutines. :class:`ZMQStream` ------------------ :class:`ZMQStream` objects let you register callbacks to handle messages as they arrive, for use with the tornado eventloop. :meth:`send` ************ ZMQStream objects do have :meth:`~.ZMQStream.send` and :meth:`~.ZMQStream.send_multipart` methods, which behaves the same way as :meth:`.Socket.send`, but instead of sending right away, the :class:`.IOLoop` will wait until socket is able to send (for instance if ``HWM`` is met, or a ``REQ/REP`` pattern prohibits sending at a certain point). Messages sent via send will also be passed to the callback registered with :meth:`~.ZMQStream.on_send` after sending. :meth:`on_recv` *************** :meth:`.ZMQStream.on_recv` is the primary method for using a ZMQStream. It registers a callback to fire with messages as they are received, which will *always* be multipart, even if its length is 1. You can easily use this to build things like an echo socket: .. sourcecode:: python s = ctx.socket(zmq.REP) s.bind('tcp://localhost:12345') stream = ZMQStream(s) def echo(msg): stream.send_multipart(msg) stream.on_recv(echo) ioloop.IOLoop.instance().start() on_recv can also take a `copy` flag, just like :meth:`.Socket.recv`. If `copy=False`, then callbacks registered with on_recv will receive tracked :class:`.Frame` objects instead of bytes. .. note:: A callback must be registered using either :meth:`.ZMQStream.on_recv` or :meth:`.ZMQStream.on_recv_stream` before any data will be received on the underlying socket. This allows you to temporarily pause processing on a socket by setting both callbacks to None. Processing can later be resumed by restoring either callback. :meth:`on_recv_stream` ********************** :meth:`.ZMQStream.on_recv_stream` is just like on_recv above, but the callback will be passed both the message and the stream, rather than just the message. This is meant to make it easier to use a single callback with multiple streams. .. sourcecode:: python s1 = ctx.socket(zmq.REP) s1.bind('tcp://localhost:12345') stream1 = ZMQStream(s1) s2 = ctx.socket(zmq.REP) s2.bind('tcp://localhost:54321') stream2 = ZMQStream(s2) def echo(stream, msg): stream.send_multipart(msg) stream1.on_recv_stream(echo) stream2.on_recv_stream(echo) ioloop.IOLoop.instance().start() :meth:`flush` ************* Sometimes with an eventloop, there can be multiple events ready on a single iteration of the loop. The :meth:`~.ZMQStream.flush` method allows developers to pull messages off of the queue to enforce some priority over the event loop ordering. flush pulls any pending events off of the queue. You can specify to flush only recv events, only send events, or any events, and you can specify a limit for how many events to flush in order to prevent starvation. .. _Tornado: https://github.com/facebook/tornado .. _asyncio: AsyncIO ======= PyZMQ 15 adds support for :mod:`asyncio` via :mod:`zmq.asyncio`. .. _zmq_green: PyZMQ and gevent ================ PyZMQ ≥ 2.2.0.1 ships with a `gevent `_ compatible API as :mod:`zmq.green`. To use it, simply: .. sourcecode:: python import zmq.green as zmq Then write your code as normal. Socket.send/recv and zmq.Poller are gevent-aware. In PyZMQ ≥ 2.2.0.2, green.device and green.eventloop should be gevent-friendly as well. .. note:: The green device does *not* release the GIL, unlike the true device in zmq.core. zmq.green.eventloop includes minimally patched IOLoop/ZMQStream in order to use the gevent-enabled Poller, so you should be able to use the ZMQStream interface in gevent apps as well, though using two eventloops simultaneously (tornado + gevent) is not recommended. .. warning:: There is a `known issue `_ in gevent ≤ 1.0 or libevent, which can cause zeromq socket events to be missed. PyZMQ works around this by adding a timeout so it will not wait forever for gevent to notice events. The only known solution for this is to use gevent ≥ 1.0, which is currently at 1.0b3, and does not exhibit this behavior. .. seealso:: zmq.green examples `on GitHub `_. :mod:`zmq.green` is simply `gevent_zeromq `_, merged into the pyzmq project. pyzmq-16.0.2/docs/source/index.rst000066400000000000000000000053241301503633700170560ustar00rootroot00000000000000.. PyZMQ documentation master file, created by sphinx-quickstart on Sat Feb 20 23:31:19 2010. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. PyZMQ Documentation =================== :Release: |release| :Date: |today| PyZMQ is the Python bindings for ØMQ_. This documentation currently contains notes on some important aspects of developing PyZMQ and an overview of what the ØMQ API looks like in Python. For information on how to use ØMQ in general, see the many examples in the excellent `ØMQ Guide`_, all of which have a version in Python. PyZMQ works with Python 3 (≥ 3.3), and Python 2.7, with no transformations or 2to3, as well as PyPy (at least 2.0 beta), via CFFI. Please don't hesitate to report pyzmq-specific issues to our tracker_ on GitHub. General questions about ØMQ are better sent to the `ØMQ tracker`_ or `mailing list`_. :doc:`changelog` Supported LibZMQ ================ PyZMQ aims to support all stable ( ≥2.1.4, ≥ 3.2.2, ≥ 4.0.1 ) and active development ( ≥ 4.2.0 ) versions of libzmq. Building the same pyzmq against various versions of libzmq is supported, but only the functionality of the linked libzmq will be available. .. note:: libzmq 3.0-3.1 are not, and will never be supported. There never was a stable release of either. Binary distributions (wheels on `PyPI `__ or `GitHub `__) of PyZMQ ship with the stable version of libzmq at the time of release, built with default configuration, and include CURVE support provided by tweetnacl. For pyzmq-|release|, this is |target_libzmq|. Using PyZMQ =========== .. toctree:: :maxdepth: 2 api/index.rst changelog.rst morethanbindings.rst serialization.rst devices.rst eventloop.rst logging.rst ssh.rst Notes from developing PyZMQ =========================== .. toctree:: :maxdepth: 2 pyversions.rst unicode.rst Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` Links ===== * ØMQ_ Home * The `ØMQ Guide`_ * `PyZMQ Installation`_ notes on the ZeroMQ website * PyZMQ on GitHub_ * Issue Tracker_ .. _ØMQ: http://www.zeromq.org .. _ØMQ Guide: http://zguide.zeromq.org .. _ØMQ Tracker: https://github.com/zeromq/libzmq/issues .. _mailing list: http://www.zeromq.org/docs:mailing-lists .. _IRC Channel: http://www.zeromq.org/chatroom .. _Cython: http://cython.org/ .. _GitHub: https://www.github.com/zeromq/pyzmq .. _ØMQ Manual: http://www.zeromq.org/intro:read-the-manual .. _PyZMQ Installation: http://www.zeromq.org/bindings:python .. _tracker: https://www.github.com/zeromq/pyzmq/issues pyzmq-16.0.2/docs/source/logging.rst000066400000000000000000000065241301503633700174000ustar00rootroot00000000000000.. PyZMQ logging doc, by Min Ragan-Kelley, 2011 .. _logging: Asynchronous Logging via PyZMQ ============================== .. seealso:: * The ØMQ guide `coverage `_ of PUB/SUB messaging * Python logging module `documentation `_ Python provides extensible logging facilities through its :py:mod:`logging` module. This module allows for easily extensible logging functionality through the use of :py:class:`~logging.Handler` objects. The most obvious case for hooking up pyzmq to logging would be to broadcast log messages over a PUB socket, so we have provided a :class:`.PUBHandler` class for doing just that. PUB/SUB and Topics ------------------ The ØMQ PUB/SUB pattern consists of a PUB socket broadcasting messages, and a collection of SUB sockets that receive those messages. Each PUB message is a multipart-message, where the first part is interpreted as a topic. SUB sockets can subscribe to topics by setting their ``SUBSCRIBE`` sockopt, e.g.:: sub = ctx.socket(zmq.SUB) sub.setsockopt(zmq.SUBSCRIBE, 'topic1') sub.setsockopt(zmq.SUBSCRIBE, 'topic2') When subscribed, the SUB socket will only receive messages where the first part *starts with* one of the topics set via ``SUBSCRIBE``. The default behavior is to exclude all messages, and subscribing to the empty string '' will receive all messages. PUBHandler ---------- The :class:`.PUBHandler` object is created for allowing the python logging to be emitted on a PUB socket. The main difference between a PUBHandler and a regular logging Handler is the inclusion of topics. For the most basic logging, you can simply create a PUBHandler with an interface or a configured PUB socket, and just let it go:: pub = context.socket(zmq.PUB) pub.bind('tcp://*:12345') handler = PUBHandler(pub) logger = logging.getLogger() logger.addHandler(handler) At this point, all messages logged with the default logger will be broadcast on the pub socket. the PUBHandler does work with topics, and the handler has an attribute ``root_topic``:: handler.root_topic = 'myprogram' Python loggers also have loglevels. The base topic of messages emitted by the PUBHandler will be of the form: ``.``, e.g. 'myprogram.INFO' or 'whatever.ERROR'. This way, subscribers can easily subscribe to subsets of the logging messages. Log messages are always two-part, where the first part is the topic tree, and the second part is the actual log message. >>> logger.info('hello there') >>> print sub.recv_multipart() ['myprogram.INFO', 'hello there'] Subtopics ********* You can also add to the topic tree below the loglevel on an individual message basis. Assuming your logger is connected to a PUBHandler, you can add as many additional topics on the front of the message, which will be added always after the loglevel. A special delimiter defined at ``zmq.log.handlers.TOPIC_DELIM`` is scanned by the PUBHandler, so if you pass your own subtopics prior to that symbol, they will be stripped from the message and added to the topic tree:: >>> log_msg = "hello there" >>> subtopic = "sub.topic" >>> msg = zmq.log.handlers.TOPIC_DELIM.join([subtopic, log_msg]) >>> logger.warn(msg) >>> print sub.recv_multipart() ['myprogram.WARN.sub.topic', 'hello there'] pyzmq-16.0.2/docs/source/morethanbindings.rst000066400000000000000000000155311301503633700213030ustar00rootroot00000000000000.. PyZMQ Bindings doc, by Min Ragan-Kelley, 2011 .. _bindings: More Than Just Bindings ======================= PyZMQ is ostensibly the Python bindings for `ØMQ`_, but the project, following Python's 'batteries included' philosophy, provides more than just Python methods and objects for calling into the ØMQ C++ library. The Core as Bindings -------------------- PyZMQ is currently broken up into four subpackages. First, is the Core. :mod:`zmq.core` contains the actual bindings for ZeroMQ, and no extended functionality beyond the very basic. The core modules are split, such that each basic ZeroMQ object (or function, if no object is associated) is a separate module, e.g. :mod:`zmq.core.context` contains the :class:`.Context` object, :mod:`zmq.core.poll` contains a :class:`.Poller` object, as well as the :func:`.select` function, etc. ZMQ constants are, for convenience, all kept together in :mod:`zmq.core.constants`. There are two reasons for breaking the core into submodules: *recompilation* and *derivative projects*. The monolithic PyZMQ became quite tedious to have to recompile everything for a small change to a single object. With separate files, that's no longer necessary. The second reason has to do with Cython. PyZMQ is written in Cython, a tool for efficiently writing C-extensions for Python. By separating out our objects into individual `pyx` files, each with their declarations in a `pxd` header, other projects can write extensions in Cython and call directly to ZeroMQ at the C-level without the penalty of going through our Python objects. Thread Safety ------------- In ØMQ, Contexts are threadsafe objects, but Sockets are **not**. It is safe to use a single Context (e.g. via :meth:`zmq.Context.instance`) in your entire multithreaded application, but you should create sockets on a per-thread basis. If you share sockets across threads, you are likely to encounter uncatchable c-level crashes of your application unless you use judicious application of :py:class:`threading.Lock`, but this approach is not recommended. .. seealso:: ZeroMQ API note on threadsafety on `2.2 `_ or `3.2 `_ Socket Options as Attributes ---------------------------- .. versionadded:: 2.1.9 In 0MQ, socket options are set/retrieved with the :meth:`set/getsockopt` methods. With the class-based approach in pyzmq, it would be logical to perform these operations with simple attribute access, and this has been added in pyzmq 2.1.9. Simply assign to or request a Socket attribute with the (case-insensitive) name of a sockopt, and it should behave just as you would expect: .. sourcecode:: python s = ctx.socket(zmq.DEALER) s.identity = b'dealer' s.hwm = 10 s.events # 0 s.fd # 16 Default Options on the Context ****************************** .. versionadded:: 2.1.11 Just like setting socket options as attributes on Sockets, you can do the same on Contexts. This affects the default options of any *new* sockets created after the assignment. .. sourcecode:: python ctx = zmq.Context() ctx.linger = 0 rep = ctx.socket(zmq.REP) req = ctx.socket(zmq.REQ) Socket options that do not apply to a socket (e.g. SUBSCRIBE on non-SUB sockets) will simply be ignored. Core Extensions --------------- We have extended the core functionality in two ways that appear inside the :mod:`core` bindings, and are not general ØMQ features. Builtin Serialization ********************* First, we added common serialization with the builtin :py:mod:`json` and :py:mod:`pickle` as first-class methods to the :class:`Socket` class. A socket has the methods :meth:`~.Socket.send_json` and :meth:`~.Socket.send_pyobj`, which correspond to sending an object over the wire after serializing with :mod:`json` and :mod:`pickle` respectively, and any object sent via those methods can be reconstructed with the :meth:`~.Socket.recv_json` and :meth:`~.Socket.recv_pyobj` methods. Unicode strings are other objects that are not unambiguously sendable over the wire, so we include :meth:`~.Socket.send_string` and :meth:`~.Socket.recv_string` that simply send bytes after encoding the message ('utf-8' is the default). .. seealso:: * :ref:`Further information ` on serialization in pyzmq. * :ref:`Our Unicode discussion ` for more information on the trials and tribulations of working with Unicode in a C extension while supporting Python 2 and 3. MessageTracker ************** The second extension of basic ØMQ functionality is the :class:`MessageTracker`. The MessageTracker is an object used to track when the underlying ZeroMQ is done with a message buffer. One of the main use cases for ØMQ in Python is the ability to perform non-copying sends. Thanks to Python's buffer interface, many objects (including NumPy arrays) provide the buffer interface, and are thus directly sendable. However, as with any asynchronous non-copying messaging system like ØMQ or MPI, it can be important to know when the message has actually been sent, so it is safe again to edit the buffer without worry of corrupting the message. This is what the MessageTracker is for. The MessageTracker is a simple object, but there is a penalty to its use. Since by its very nature, the MessageTracker must involve threadsafe communication (specifically a builtin :py:class:`~Queue.Queue` object), instantiating a MessageTracker takes a modest amount of time (10s of µs), so in situations instantiating many small messages, this can actually dominate performance. As a result, tracking is optional, via the ``track`` flag, which is optionally passed, always defaulting to ``False``, in each of the three places where a Frame object (the pyzmq object for wrapping a segment of a message) is instantiated: The :class:`.Frame` constructor, and non-copying sends and receives. A MessageTracker is very simple, and has just one method and one attribute. The property :attr:`MessageTracker.done` will be ``True`` when the Frame(s) being tracked are no longer in use by ØMQ, and :meth:`.MessageTracker.wait` will block, waiting for the Frame(s) to be released. .. Note:: A Frame cannot be tracked after it has been instantiated without tracking. If a Frame is to even have the *option* of tracking, it must be constructed with ``track=True``. Extensions ---------- So far, PyZMQ includes four extensions to core ØMQ that we found basic enough to be included in PyZMQ itself: * :ref:`zmq.log ` : Logging handlers for hooking Python logging up to the network * :ref:`zmq.devices ` : Custom devices and objects for running devices in the background * :ref:`zmq.eventloop ` : The `Tornado`_ event loop, adapted for use with ØMQ sockets. * :ref:`zmq.ssh ` : Simple tools for tunneling zeromq connections via ssh. .. _ØMQ: http://www.zeromq.org .. _Tornado: https://github.com/facebook/tornado pyzmq-16.0.2/docs/source/pyversions.rst000066400000000000000000000163561301503633700201770ustar00rootroot00000000000000.. PyZMQ Version compatibility doc, by Min Ragan-Kelley, 2010 .. _pyversions: PyZMQ, Python2.5, and Python3 ============================= PyZMQ is a fairly light, low-level library, so supporting as many versions as is reasonable is our goal. Currently, we support at least Python 2.5-3.1. Making the changes to the codebase required a few tricks, which are documented here for future reference, either by us or by other developers looking to support several versions of Python. .. Note:: It is far simpler to support 2.6-3.x than to include 2.5. Many of the significant syntax changes have been backported to 2.6, so just writing new-style code would work in many cases. I will try to note these points as they come up. pyversion_compat.h ------------------ Many functions we use, primarily involved in converting between C-buffers and Python objects, are not available on all supported versions of Python. In order to resolve missing symbols, we added a header :file:`utils/pyversion_compat.h` that defines missing symbols with macros. Some of these macros alias new names to old functions (e.g. ``PyBytes_AsString``), so that we can call new-style functions on older versions, and some simply define the function as an empty exception raiser. The important thing is that the symbols are defined to prevent compiler warnings and linking errors. Everywhere we use C-API functions that may not be available in a supported version, at the top of the file is the code: .. sourcecode:: guess cdef extern from "pyversion_compat.h": pass This ensures that the symbols are defined in the Cython generated C-code. Higher level switching logic exists in the code itself, to prevent actually calling unavailable functions, but the symbols must still be defined. Bytes and Strings ----------------- .. Note:: If you are using Python >= 2.6, to prepare your PyZMQ code for Python3 you should use the ``b'message'`` syntax to ensure all your string literal messages will still be :class:`bytes` after you make the upgrade. The most cumbersome part of PyZMQ compatibility from a user's perspective is the fact that, since ØMQ uses C-strings, and would like to do so without copying, we must use the Py3k :class:`bytes` object, which is backported to 2.6. In order to do this in a Python-version independent way, we added a small utility that unambiguously defines the string types: :class:`bytes`, :class:`unicode`, :obj:`basestring`. This is important, because :class:`str` means different things on 2.x and 3.x, and :class:`bytes` is undefined on 2.5, and both :class:`unicode` and :obj:`basestring` are undefined on 3.x. All typechecking in PyZMQ is done against these types: ================= ================= ==================== Explicit Type 2.x 3.x ================= ================= ==================== :obj:`bytes` :obj:`str` :obj:`bytes` :obj:`unicode` :obj:`unicode` :obj:`str` :obj:`basestring` :obj:`basestring` :obj:`(str, bytes)` ================= ================= ==================== .. Note:: 2.5 specific Where we really noticed the issue of :class:`bytes` vs :obj:`strings` coming up for users was in updating the tests to run on every version. Since the ``b'bytes literal'`` syntax was not backported to 2.5, we must call ``"message".encode()`` for *every* string in the test suite. .. seealso:: :ref:`Unicode discussion ` for more information on strings/bytes. ``PyBytes_*`` ************* The standard C-API function for turning a C-string into a Python string was a set of functions with the prefix ``PyString_*``. However, with the Unicode changes made in Python3, this was broken into ``PyBytes_*`` for bytes objects and ``PyUnicode_*`` for unicode objects. We changed all our ``PyString_*`` code to ``PyBytes_*``, which was backported to 2.6. .. Note:: 2.5 Specific: Since Python 2.5 doesn't support the ``PyBytes_*`` functions, we had to alias them to the ``PyString_*`` methods in utils/pyversion_compat.h. .. sourcecode:: c++ #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromString PyString_FromString #define PyBytes_AsString PyString_AsString #define PyBytes_Size PyString_Size Buffers ------- The layer that is most complicated for developers, but shouldn't trouble users, is the Python C-Buffer APIs. These are the methods for converting between Python objects and C buffers. The reason it is complicated is that it keeps changing. There are two buffer interfaces for converting an object to a C-buffer, known as new-style and old-style. Old-style buffers were introduced long ago, but the new-style is only backported to 2.6. The old-style buffer interface is not available in 3.x. There is also an old- and new-style interface for creating Python objects that view C-memory. The old-style object is called a :class:`buffer`, and the new-style object is :class:`memoryview`. Unlike the new-style buffer interface for objects, :class:`memoryview` has only been backported to *2.7*. This means that the available buffer-related functions are not the same in any two versions of Python 2.5, 2.6, 2.7, or 3.1. We have a :file:`utils/buffers.pxd` file that defines our :func:`asbuffer` and :func:`frombuffer` functions. :file:`utils/buffers.pxd` was adapted from mpi4py_'s :file:`asbuffer.pxi`. The :func:`frombuffer` functionality was added. These functions internally switch based on Python version to call the appropriate C-API functions. .. seealso:: `Python Buffer API `_ .. _bufferapi: http://docs.python.org/c-api/buffer.html ``__str__`` ----------- As discussed, :class:`str` is not a platform independent type. The two places where we are required to return native str objects are :func:`error.strerror`, and :func:`Message.__str__`. In both of these cases, the natural return is actually a :class:`bytes` object. In the methods, the native :class:`str` type is checked, and if the native str is actually unicode, then we decode the bytes into unicode: .. sourcecode:: py # ... b = natural_result() if str is unicode: return b.decode() else: return b Exceptions ---------- .. Note:: This section is only relevant for supporting Python 2.5 and 3.x, not for 2.6-3.x. The syntax for handling exceptions has `changed `_ in Python 3. The old syntax: .. sourcecode:: py try: s.send(msg) except zmq.ZMQError, e: handle(e) is no longer valid in Python 3. Instead, the new syntax for this is: .. sourcecode:: py try: s.send(msg) except zmq.ZMQError as e: handle(e) This new syntax is backported to Python 2.6, but is invalid on 2.5. For 2.6-3.x compatible code, we could just use the new syntax. However, the only method we found to catch an exception for handling on both 2.5 and 3.1 is to get the exception object inside the exception block: .. sourcecode:: py try: s.send(msg) except zmq.ZMQError: e = sys.exc_info()[1] handle(e) This is certainly not as elegant as either the old or new syntax, but it's the only way we have found to work everywhere. .. seealso:: PEP-3110_ .. _PEP-3110: http://www.python.org/dev/peps/pep-3110/ .. _mpi4py: http://mpi4py.googlecode.compyzmq-16.0.2/docs/source/serialization.rst000066400000000000000000000072041301503633700206230ustar00rootroot00000000000000.. PyZMQ serialization doc, by Min Ragan-Kelley, 2011 .. _serialization: Serializing messages with PyZMQ =============================== When sending messages over a network, you often need to marshall your data into bytes. Builtin serialization --------------------- PyZMQ is primarily bindings for libzmq, but we do provide three builtin serialization methods for convenience, to help Python developers learn libzmq. Python has two primary packages for serializing objects: :py:mod:`json` and :py:mod:`pickle`, so we provide simple convenience methods for sending and receiving objects serialized with these modules. A socket has the methods :meth:`~.Socket.send_json` and :meth:`~.Socket.send_pyobj`, which correspond to sending an object over the wire after serializing with json and pickle respectively, and any object sent via those methods can be reconstructed with the :meth:`~.Socket.recv_json` and :meth:`~.Socket.recv_pyobj` methods. These methods designed for convenience, not for performance, so developers who do want to emphasize performance should use their own serialized send/recv methods. Using your own serialization ---------------------------- In general, you will want to provide your own serialization that is optimized for your application or library availability. This may include using your own preferred serialization ([msgpack]_, [protobuf]_), or adding compression via [zlib]_ in the standard library, or the super fast [blosc]_ library. There are two simple models for implementing your own serialization: write a function that takes the socket as an argument, or subclass Socket for use in your own apps. For instance, pickles can often be reduced substantially in size by compressing the data. The following will send *compressed* pickles over the wire: .. sourcecode:: python import zlib, cPickle as pickle def send_zipped_pickle(socket, obj, flags=0, protocol=-1): """pickle an object, and zip the pickle before sending it""" p = pickle.dumps(obj, protocol) z = zlib.compress(p) return socket.send(z, flags=flags) def recv_zipped_pickle(socket, flags=0, protocol=-1): """inverse of send_zipped_pickle""" z = socket.recv(flags) p = zlib.decompress(z) return pickle.loads(p) A common data structure in Python is the numpy array. PyZMQ supports sending numpy arrays without copying any data, since they provide the Python buffer interface. However just the buffer is not enough information to reconstruct the array on the receiving side. Here is an example of a send/recv that allow non-copying sends/recvs of numpy arrays including the dtype/shape data necessary for reconstructing the array. .. sourcecode:: python import numpy def send_array(socket, A, flags=0, copy=True, track=False): """send a numpy array with metadata""" md = dict( dtype = str(A.dtype), shape = A.shape, ) socket.send_json(md, flags|zmq.SNDMORE) return socket.send(A, flags, copy=copy, track=track) def recv_array(socket, flags=0, copy=True, track=False): """recv a numpy array""" md = socket.recv_json(flags=flags) msg = socket.recv(flags=flags, copy=copy, track=track) buf = buffer(msg) A = numpy.frombuffer(buf, dtype=md['dtype']) return A.reshape(md['shape']) .. [msgpack] Message Pack serialization library http://msgpack.org .. [protobuf] Google Protocol Buffers http://code.google.com/p/protobuf .. [zlib] Python stdlib module for zip compression: :py:mod:`zlib` .. [blosc] Blosc: A blocking, shuffling and loss-less (and crazy-fast) compression library http://www.blosc.org pyzmq-16.0.2/docs/source/ssh.rst000066400000000000000000000057131301503633700165460ustar00rootroot00000000000000.. PyZMQ ssh doc, by Min Ragan-Kelley, 2011 .. _ssh: Tunneling PyZMQ Connections with SSH ==================================== .. versionadded:: 2.1.9 You may want to connect ØMQ sockets across machines, or untrusted networks. One common way to do this is to tunnel the connection via SSH. IPython_ introduced some tools for tunneling ØMQ connections over ssh in simple cases. These functions have been brought into pyzmq as :mod:`zmq.ssh` under IPython's BSD license. PyZMQ will use the shell ssh command via pexpect_ by default, but it also supports using paramiko_ for tunnels, so it should work on Windows. An SSH tunnel has five basic components: * server : the SSH server through which the tunnel will be created * remote ip : the IP of the remote machine *as seen from the server* (remote ip may be, but is not not generally the same machine as server). * remote port : the port on the remote machine that you want to connect to. * local ip : the interface on your local machine you want to use (default: 127.0.0.1) * local port : the local port you want to forward to the remote port (default: high random) So once you have established the tunnel, connections to ``localip:localport`` will actually be connections to ``remoteip:remoteport``. In most cases, you have a zeromq url for a remote machine, but you need to tunnel the connection through an ssh server. This is So if you would use this command from the same LAN as the remote machine: .. sourcecode:: python sock.connect("tcp://10.0.1.2:5555") to make the same connection from another machine that is outside the network, but you have ssh access to a machine ``server`` on the same LAN, you would simply do: .. sourcecode:: python from zmq import ssh ssh.tunnel_connection(sock, "tcp://10.0.1.2:5555", "server") Note that ``"server"`` can actually be a fully specified ``"user@server:port"`` ssh url. Since this really just launches a shell command, all your ssh configuration of usernames, aliases, keys, etc. will be respected. If necessary, :func:`tunnel_connection` does take arguments for specific passwords, private keys (the ssh ``-i`` option), and non-default choice of whether to use paramiko. If you are on the same network as the machine, but it is only listening on localhost, you can still connect by making the machine itself the server, and using loopback as the remote ip: .. sourcecode:: python from zmq import ssh ssh.tunnel_connection(sock, "tcp://127.0.0.1:5555", "10.0.1.2") The :func:`tunnel_connection` function is a simple utility that forwards a random localhost port to the real destination, and connects a socket to the new local url, rather than the remote one that wouldn't actually work. .. seealso:: A short discussion of ssh tunnels: http://www.revsys.com/writings/quicktips/ssh-tunnel.html .. _IPython: http://ipython.org .. _pexpect: http://www.noah.org/wiki/pexpect .. _pexpect-u: http://pypi.python.org/pypi/pexpect-u .. _paramiko: http://www.lag.net/paramiko/ pyzmq-16.0.2/docs/source/unicode.rst000066400000000000000000000212361301503633700173750ustar00rootroot00000000000000.. PyZMQ Unicode doc, by Min Ragan-Kelley, 2010 .. _unicode: PyZMQ and Unicode ================= PyZMQ is built with an eye towards an easy transition to Python 3, and part of that is dealing with unicode strings. This is an overview of some of what we found, and what it means for PyZMQ. First, Unicode in Python 2 and 3 ******************************** In Python < 3, a ``str`` object is really a C string with some sugar - a specific series of bytes with some fun methods like ``endswith()`` and ``split()``. In 2.0, the ``unicode`` object was added, which handles different methods of encoding. In Python 3, however, the meaning of ``str`` changes. A ``str`` in Python 3 is a full unicode object, with encoding and everything. If you want a C string with some sugar, there is a new object called ``bytes``, that behaves much like the 2.x ``str``. The idea is that for a user, a string is a series of *characters*, not a series of bytes. For simple ascii, the two are interchangeable, but if you consider accents and non-Latin characters, then the character meaning of byte sequences can be ambiguous, since it depends on the encoding scheme. They decided to avoid the ambiguity by forcing users who want the actual bytes to specify the encoding every time they want to convert a string to bytes. That way, users are aware of the difference between a series of bytes and a collection of characters, and don't confuse the two, as happens in Python 2.x. The problems (on both sides) come from the fact that regardless of the language design, users are mostly going to use ``str`` objects to represent collections of characters, and the behavior of that object is dramatically different in certain aspects between the 2.x ``bytes`` approach and the 3.x ``unicode`` approach. The ``unicode`` approach has the advantage of removing byte ambiguity - it's a list of characters, not bytes. However, if you really do want the bytes, it's very inefficient to get them. The ``bytes`` approach has the advantage of efficiency. A ``bytes`` object really is just a char* pointer with some methods to be used on it, so when interacting with, so interacting with C code, etc is highly efficient and straightforward. However, understanding a bytes object as a string with extended characters introduces ambiguity and possibly confusion. To avoid ambiguity, hereafter we will refer to encoded C arrays as 'bytes' and abstract unicode objects as 'strings'. Unicode Buffers --------------- Since unicode objects have a wide range of representations, they are not stored as the bytes according to their encoding, but rather in a format called UCS (an older fixed-width Unicode format). On some platforms (OS X, Windows), the storage is UCS-2, which is 2 bytes per character. On most \*ix systems, it is UCS-4, or 4 bytes per character. The contents of the *buffer* of a ``unicode`` object are not encoding dependent (always UCS-2 or UCS-4), but they are *platform* dependent. As a result of this, and the further insistence on not interpreting ``unicode`` objects as bytes without specifying encoding, ``str`` objects in Python 3 don't even provide the buffer interface. You simply cannot get the raw bytes of a ``unicode`` object without specifying the encoding for the bytes. In Python 2.x, you can get to the raw buffer, but the platform dependence and the fact that the encoding of the buffer is not the encoding of the object makes it very confusing, so this is probably a good move. The efficiency problem here comes from the fact that simple ascii strings are 4x as big in memory as they need to be (on most Linux, 2x on other platforms). Also, to translate to/from C code that works with char*, you always have to copy data and encode/decode the bytes. This really is horribly inefficient from a memory standpoint. Essentially, Where memory efficiency matters to you, you should never ever use strings; use bytes. The problem is that users will almost always use ``str``, and in 2.x they are efficient, but in 3.x they are not. We want to make sure that we don't help the user make this mistake, so we ensure that zmq methods don't try to hide what strings really are. What This Means for PyZMQ ************************* PyZMQ is a wrapper for a C library, so it really should use bytes, since a string is not a simple wrapper for ``char *`` like it used to be, but an abstract sequence of characters. The representations of bytes in Python are either the ``bytes`` object itself, or any object that provides the buffer interface (aka memoryview). In Python 2.x, unicode objects do provide the buffer interface, but as they do not in Python 3, where pyzmq requires bytes, we specifically reject unicode objects. The relevant methods here are ``socket.send/recv``, ``socket.get/setsockopt``, ``socket.bind/connect``. The important consideration for send/recv and set/getsockopt is that when you put in something, you really should get the same object back with its partner method. We can easily coerce unicode objects to bytes with send/setsockopt, but the problem is that the pair method of recv/getsockopt will always be bytes, and there should be symmetry. We certainly shouldn't try to always decode on the retrieval side, because if users just want bytes, then we are potentially using up enormous amounts of excess memory unnecessarily, due to copying and larger memory footprint of unicode strings. Still, we recognize the fact that users will quite frequently have unicode strings that they want to send, so we have added ``socket._string()`` wrappers. These methods simply wrap their bytes counterpart by encoding to/decoding from bytes around them, and they all take an `encoding` keyword argument that defaults to utf-8. Since encoding and decoding are necessary to translate between unicode and bytes, it is impossible to perform non-copying actions with these wrappers. ``socket.bind/connect`` methods are different from these, in that they are strictly setters and there is not corresponding getter method. As a result, we feel that we can safely coerce unicode objects to bytes (always to utf-8) in these methods. .. note:: For cross-language symmetry (including Python 3), the ``_unicode`` methods are now ``_string``. Many languages have a notion of native strings, and the use of ``_unicode`` was wedded too closely to the name of such objects in Python 2. For the time being, anywhere you see ``_string``, ``_unicode`` also works, and is the only option in pyzmq ≤ 2.1.11. The Methods ----------- Overview of the relevant methods: .. py:function:: socket.bind(self, addr) `addr` is ``bytes`` or ``unicode``. If ``unicode``, encoded to utf-8 ``bytes`` .. py:function:: socket.connect(self, addr) `addr` is ``bytes`` or ``unicode``. If ``unicode``, encoded to utf-8 ``bytes`` .. py:function:: socket.send(self, object obj, flags=0, copy=True) `obj` is ``bytes`` or provides buffer interface. if `obj` is ``unicode``, raise ``TypeError`` .. py:function:: socket.recv(self, flags=0, copy=True) returns ``bytes`` if `copy=True` returns ``zmq.Message`` if `copy=False`: `message.buffer` is a buffer view of the ``bytes`` `str(message)` provides the ``bytes`` `unicode(message)` decodes `message.buffer` with utf-8 .. py:function:: socket.send_string(self, unicode s, flags=0, encoding='utf-8') takes a ``unicode`` string `s`, and sends the ``bytes`` after encoding without an extra copy, via: `socket.send(s.encode(encoding), flags, copy=False)` .. py:function:: socket.recv_string(self, flags=0, encoding='utf-8') always returns ``unicode`` string there will be a ``UnicodeError`` if it cannot decode the buffer performs non-copying `recv`, and decodes the buffer with `encoding` .. py:function:: socket.setsockopt(self, opt, optval) only accepts ``bytes`` for `optval` (or ``int``, depending on `opt`) ``TypeError`` if ``unicode`` or anything else .. py:function:: socket.getsockopt(self, opt) returns ``bytes`` (or ``int``), never ``unicode`` .. py:function:: socket.setsockopt_string(self, opt, unicode optval, encoding='utf-8') accepts ``unicode`` string for `optval` encodes `optval` with `encoding` before passing the ``bytes`` to `setsockopt` .. py:function:: socket.getsockopt_string(self, opt, encoding='utf-8') always returns ``unicode`` string, after decoding with `encoding` note that `zmq.IDENTITY` is the only `sockopt` with a string value that can be queried with `getsockopt` pyzmq-16.0.2/docs/sphinxext/000077500000000000000000000000001301503633700157435ustar00rootroot00000000000000pyzmq-16.0.2/docs/sphinxext/apigen.py000066400000000000000000000376321301503633700175730ustar00rootroot00000000000000"""Attempt to generate templates for module reference with Sphinx XXX - we exclude extension modules To include extension modules, first identify them as valid in the ``_uri2path`` method, then handle them in the ``_parse_module`` script. We get functions and classes by parsing the text of .py files. Alternatively we could import the modules for discovery, and we'd have to do that for extension modules. This would involve changing the ``_parse_module`` method to work via import and introspection, and might involve changing ``discover_modules`` (which determines which files are modules, and therefore which module URIs will be passed to ``_parse_module``). NOTE: this is a modified version of a script originally shipped with the PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed project.""" from __future__ import print_function import os import re class ApiDocWriter(object): ''' Class for automatic detection and parsing of API docs to Sphinx-parsable reST format''' # only separating first two levels rst_section_levels = ['*', '=', '-', '~', '^'] def __init__(self, package_name, rst_extension='.rst', package_skip_patterns=None, module_skip_patterns=None, ): ''' Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default '.rst' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._'] ''' if package_skip_patterns is None: package_skip_patterns = ['\\.tests$'] if module_skip_patterns is None: module_skip_patterns = ['\\.setup$', '\\._'] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns def get_package_name(self): return self._package_name def set_package_name(self, package_name): ''' Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True ''' # It's also possible to imagine caching the module parsing here self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None package_name = property(get_package_name, set_package_name, None, 'get/set package_name') def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' if line.startswith('cdef'): line = line.split(None,1)[1] name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':') def _uri2path(self, uri): ''' Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples -------- >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path('sphinx.builder') >>> res == os.path.join(modpath, 'builder.py') True >>> res = docwriter._uri2path('sphinx') >>> res == os.path.join(modpath, '__init__.py') True >>> docwriter._uri2path('sphinx.does_not_exist') ''' if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace('.', os.path.sep) path = path.replace(self.package_name + os.path.sep, '') path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + '.py'): # file path += '.py' elif os.path.exists(path + '.pyx'): # file path += '.pyx' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path def _path2uri(self, dirpath): ''' Convert directory path to uri ''' relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([],[]) f = open(filename, 'rt') functions, classes = self._parse_lines(f) f.close() return functions, classes def _parse_lines(self, linesource): ''' Parse lines of text for functions and classes ''' functions = [] classes = [] for line in linesource: if line.startswith('def ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): classes.append(name) elif line.startswith('cpdef ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('cdef class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- S : string Contents of API doc ''' # get the names of all classes and functions functions, classes = self._parse_module(uri) if not len(functions) and not len(classes): print('WARNING: Empty -', uri) return '' # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_title = uri_short ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title) + '\n\n') # Set the chapter title to read 'module' for all modules except for the # main packages if '.' in uri: title = 'Module: :mod:`' + uri_short + '`' else: title = ':mod:`' + uri_short + '`' ad += title + '\n' + self.rst_section_levels[2] * len(title) # if len(classes): # ad += '\nInheritance diagram for ``%s``:\n\n' % uri # ad += '.. inheritance-diagram:: %s \n' % uri # ad += ' :parts: 3\n' ad += '\n.. automodule:: ' + uri + '\n' ad += '\n.. currentmodule:: ' + uri + '\n' multi_class = len(classes) > 1 multi_fx = len(functions) > 1 if multi_class: ad += '\n' + 'Classes' + '\n' + \ self.rst_section_levels[2] * 7 + '\n' elif len(classes) and multi_fx: ad += '\n' + 'Class' + '\n' + \ self.rst_section_levels[2] * 5 + '\n' for c in classes: ad += '\n:class:`' + c + '`\n' \ + self.rst_section_levels[multi_class + 2 ] * \ (len(c)+9) + '\n\n' ad += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working ad += ' :members:\n' \ ' :undoc-members:\n' \ ' :inherited-members:\n' \ '\n' # skip class.__init__() # ' .. automethod:: __init__\n' if multi_fx: ad += '\n' + 'Functions' + '\n' + \ self.rst_section_levels[2] * 9 + '\n\n' elif len(functions) and multi_class: ad += '\n' + 'Function' + '\n' + \ self.rst_section_levels[2] * 8 + '\n\n' for f in functions: # must NOT exclude from index to keep cross-refs working ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n' return ad def _survives_exclude(self, matchstr, match_type): ''' Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False ''' if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': patterns = self.package_skip_patterns else: raise ValueError('Cannot interpret match type "%s"' % match_type) # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace package_uri = '.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) else: dirnames.remove(dirname) # Check filenames for modules for filename in filenames: module_name = filename[:-3] module_uri = '.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, 'module')): modules.append(module_uri) return sorted(modules) def write_modules_api(self, modules,outdir): # write the list written_modules = [] for m in modules: api_str = self.generate_api_doc(m) if not api_str: continue # write out to file outfile = os.path.join(outdir, m + self.rst_extension) fileobj = open(outfile, 'wt') fileobj.write(api_str) fileobj.close() written_modules.append(m) self.written_modules = written_modules def write_api_docs(self, outdir): """Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules """ if not os.path.exists(outdir): os.mkdir(outdir) # compose list of modules modules = self.discover_modules() self.write_modules_api(modules,outdir) def write_index(self, outdir, froot='gen', relative_to=None): """Make a reST API index file from written files Parameters ---------- path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to 'gen'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is. """ if self.written_modules is None: raise ValueError('No modules written') # Get full filename path path = os.path.join(outdir, froot+self.rst_extension) # Path written into index is relative to rootpath if relative_to is not None: relpath = outdir.replace(relative_to + os.path.sep, '') else: relpath = outdir idx = open(path,'wt') w = idx.write w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') w('.. toctree::\n\n') for f in self.written_modules: w(' %s\n' % os.path.join(relpath,f)) idx.close() pyzmq-16.0.2/docs/sphinxext/docscrape.py000066400000000000000000000347661301503633700203000ustar00rootroot00000000000000"""Extract reference documentation from the NumPy source tree. """ import inspect import textwrap import re import pydoc from StringIO import StringIO from warnings import warn 4 class Reader(object): """A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data,list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self,n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class NumpyDocString(object): def __init__(self,docstring): docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [], 'References': '', 'Examples': '', 'index': {} } self._parse() def __getitem__(self,key): return self._parsed_data[key] def __setitem__(self,key,val): if not self._parsed_data.has_key(key): warn("Unknown section %s" % key) else: self._parsed_data[key] = val def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self,doc): i = 0 j = 0 for i,line in enumerate(doc): if line.strip(): break for j,line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self,content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name,arg_type,desc)) return params _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ValueError("%s is not a item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): self['Summary'] = self._doc.read_to_next_empty_line() else: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() for (section,content) in self._read_sections(): if not section.startswith('..'): section = ' '.join([s.capitalize() for s in section.split(' ')]) if section in ('Parameters', 'Attributes', 'Methods', 'Returns', 'Raises', 'Warns'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*','\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param,param_type,desc in self[name]: out += ['%s : %s' % (param, param_type)] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters','Returns','Raises'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes','References','Examples'): out += self._str_section(s) out += self._str_index() return '\n'.join(out) def indent(str,indent=4): indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: doc = inspect.getdoc(func) or '' try: NumpyDocString.__init__(self, doc) except ValueError, e: print '*'*78 print "ERROR: '%s' while parsing `%s`" % (e, self._f) print '*'*78 #print "Docstring follows:" #print doclines #print '='*78 if not self['Signature']: func, func_name = self.get_func() try: # try to read signature argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) argspec = argspec.replace('*','\*') signature = '%s%s' % (func_name, argspec) except TypeError, e: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if not roles.has_key(self._role): print "Warning: invalid role %s" % self._role out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None): if not inspect.isclass(cls): raise ValueError("Initialise using a class. Got %r" % cls) self._cls = cls if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename self._name = cls.__name__ self._func_doc = func_doc if doc is None: doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) @property def methods(self): return [name for name,func in inspect.getmembers(self._cls) if not name.startswith('_') and callable(func)] def __str__(self): out = '' out += super(ClassDoc, self).__str__() out += "\n\n" #for m in self.methods: # print "Parsing `%s`" % m # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n' # out += '.. index::\n single: %s; %s\n\n' % (self._name, m) return out pyzmq-16.0.2/docs/sphinxext/docscrape_sphinx.py000066400000000000000000000102071301503633700216510ustar00rootroot00000000000000import re, inspect, textwrap, pydoc from docscrape import NumpyDocString, FunctionDoc, ClassDoc class SphinxDocString(NumpyDocString): # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param,param_type,desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc,8) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] return out def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Attributes', 'Methods', 'Returns','Raises'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_section('Examples') out = self._str_indent(out,indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): pass class SphinxClassDoc(SphinxDocString, ClassDoc): pass def get_doc_object(obj, what=None, doc=None): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, '', doc=doc) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxDocString(doc) pyzmq-16.0.2/docs/sphinxext/inheritance_diagram.py000066400000000000000000000325201301503633700222740ustar00rootroot00000000000000""" Defines a docutils directive for inserting inheritance diagrams. Provide the directive with one or more classes or modules (separated by whitespace). For modules, all of the classes in that module will be used. Example:: Given the following classes: class A: pass class B(A): pass class C(A): pass class D(B, C): pass class E(B): pass .. inheritance-diagram: D E Produces a graph like the following: A / \ B C / \ / E D The graph is inserted as a PNG+image map into HTML and a PDF in LaTeX. """ import inspect import os import re import subprocess try: from hashlib import md5 except ImportError: from md5 import md5 from docutils.nodes import Body, Element from docutils.parsers.rst import directives from sphinx.roles import xfileref_role def my_import(name): """Module importer - taken from the python documentation. This function allows importing names with dots in them.""" mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod class DotException(Exception): pass class InheritanceGraph(object): """ Given a list of classes, determines the set of classes that they inherit from all the way to the root "object", and then is able to generate a graphviz dot graph from them. """ def __init__(self, class_names, show_builtins=False): """ *class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph. """ self.class_names = class_names self.classes = self._import_classes(class_names) self.all_classes = self._all_classes(self.classes) if len(self.all_classes) == 0: raise ValueError("No classes found for inheritance diagram") self.show_builtins = show_builtins py_sig_re = re.compile(r'''^([\w.]*\.)? # class names (\w+) \s* $ # optionally arguments ''', re.VERBOSE) def _import_class_or_module(self, name): """ Import a class using its fully-qualified *name*. """ try: path, base = self.py_sig_re.match(name).groups() except: raise ValueError( "Invalid class or module '%s' specified for inheritance diagram" % name) fullname = (path or '') + base path = (path and path.rstrip('.')) if not path: path = base try: module = __import__(path, None, None, []) # We must do an import of the fully qualified name. Otherwise if a # subpackage 'a.b' is requested where 'import a' does NOT provide # 'a.b' automatically, then 'a.b' will not be found below. This # second call will force the equivalent of 'import a.b' to happen # after the top-level import above. my_import(fullname) except ImportError: raise ValueError( "Could not import class or module '%s' specified for inheritance diagram" % name) try: todoc = module for comp in fullname.split('.')[1:]: todoc = getattr(todoc, comp) except AttributeError: raise ValueError( "Could not find class or module '%s' specified for inheritance diagram" % name) # If a class, just return it if inspect.isclass(todoc): return [todoc] elif inspect.ismodule(todoc): classes = [] for cls in todoc.__dict__.values(): if inspect.isclass(cls) and cls.__module__ == todoc.__name__: classes.append(cls) return classes raise ValueError( "'%s' does not resolve to a class or module" % name) def _import_classes(self, class_names): """ Import a list of classes. """ classes = [] for name in class_names: classes.extend(self._import_class_or_module(name)) return classes def _all_classes(self, classes): """ Return a list of all classes that are ancestors of *classes*. """ all_classes = {} def recurse(cls): all_classes[cls] = None for c in cls.__bases__: if c not in all_classes: recurse(c) for cls in classes: recurse(cls) return all_classes.keys() def class_name(self, cls, parts=0): """ Given a class object, return a fully-qualified name. This works for things I've tested in matplotlib so far, but may not be completely general. """ module = cls.__module__ if module == '__builtin__': fullname = cls.__name__ else: fullname = "%s.%s" % (module, cls.__name__) if parts == 0: return fullname name_parts = fullname.split('.') return '.'.join(name_parts[-parts:]) def get_all_class_names(self): """ Get all of the class names involved in the graph. """ return [self.class_name(x) for x in self.all_classes] # These are the default options for graphviz default_graph_options = { "rankdir": "LR", "size": '"8.0, 12.0"' } default_node_options = { "shape": "box", "fontsize": 10, "height": 0.25, "fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans", "style": '"setlinewidth(0.5)"' } default_edge_options = { "arrowsize": 0.5, "style": '"setlinewidth(0.5)"' } def _format_node_options(self, options): return ','.join(["%s=%s" % x for x in options.items()]) def _format_graph_options(self, options): return ''.join(["%s=%s;\n" % x for x in options.items()]) def generate_dot(self, fd, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Generate a graphviz dot graph from the classes that were passed in to __init__. *fd* is a Python file-like object to write to. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls *graph_options*, *node_options*, *edge_options* are dictionaries containing key/value pairs to pass on as graphviz properties. """ g_options = self.default_graph_options.copy() g_options.update(graph_options) n_options = self.default_node_options.copy() n_options.update(node_options) e_options = self.default_edge_options.copy() e_options.update(edge_options) fd.write('digraph %s {\n' % name) fd.write(self._format_graph_options(g_options)) for cls in self.all_classes: if not self.show_builtins and cls in __builtins__.values(): continue name = self.class_name(cls, parts) # Write the node this_node_options = n_options.copy() url = urls.get(self.class_name(cls)) if url is not None: this_node_options['URL'] = '"%s"' % url fd.write(' "%s" [%s];\n' % (name, self._format_node_options(this_node_options))) # Write the edges for base in cls.__bases__: if not self.show_builtins and base in __builtins__.values(): continue base_name = self.class_name(base, parts) fd.write(' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_options(e_options))) fd.write('}\n') def run_dot(self, args, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur. """ try: dot = subprocess.Popen(['dot'] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) except OSError: raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") except ValueError: raise DotException("'dot' called with invalid arguments") except: raise DotException("Unexpected error calling 'dot'") self.generate_dot(dot.stdin, name, parts, urls, graph_options, node_options, edge_options) dot.stdin.close() result = dot.stdout.read() returncode = dot.wait() if returncode != 0: raise DotException("'dot' returned the errorcode %d" % returncode) return result class inheritance_diagram(Body, Element): """ A docutils node to use as a placeholder for the inheritance diagram. """ pass def inheritance_diagram_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """ Run when the inheritance_diagram directive is first encountered. """ node = inheritance_diagram() class_names = arguments # Create a graph starting with the list of classes graph = InheritanceGraph(class_names) # Create xref nodes for each target of the graph's image map and # add them to the doc tree so that Sphinx can resolve the # references to real URLs later. These nodes will eventually be # removed from the doctree after we're done with them. for name in graph.get_all_class_names(): refnodes, x = xfileref_role( 'class', ':class:`%s`' % name, name, 0, state) node.extend(refnodes) # Store the graph object so we can use it to generate the # dot file later node['graph'] = graph # Store the original content for use as a hash node['parts'] = options.get('parts', 0) node['content'] = " ".join(class_names) return [node] def get_graph_hash(node): return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] def html_output_graph(self, node): """ Output the graph for HTML. This will insert a PNG with clickable image map. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash path = '_images' dest_path = os.path.join(setup.app.builder.outdir, path) if not os.path.exists(dest_path): os.makedirs(dest_path) png_path = os.path.join(dest_path, name + ".png") path = setup.app.builder.imgpath # Create a mapping from fully-qualified class names to URLs. urls = {} for child in node: if child.get('refuri') is not None: urls[child['reftitle']] = child.get('refuri') elif child.get('refid') is not None: urls[child['reftitle']] = '#' + child.get('refid') # These arguments to dot will save a PNG file to disk and write # an HTML image map to stdout. image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'], name, parts, urls) return ('%s' % (path, name, name, image_map)) def latex_output_graph(self, node): """ Output the graph for LaTeX. This will insert a PDF. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images')) if not os.path.exists(dest_path): os.makedirs(dest_path) pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf")) graph.run_dot(['-Tpdf', '-o%s' % pdf_path], name, parts, graph_options={'size': '"6.0,6.0"'}) return '\n\\includegraphics{%s}\n\n' % pdf_path def visit_inheritance_diagram(inner_func): """ This is just a wrapper around html/latex_output_graph to make it easier to handle errors and insert warnings. """ def visitor(self, node): try: content = inner_func(self, node) except DotException, e: # Insert the exception as a warning in the document warning = self.document.reporter.warning(str(e), line=node.line) warning.parent = node node.children = [warning] else: source = self.document.attributes['source'] self.body.append(content) node.children = [] return visitor def do_nothing(self, node): pass def setup(app): setup.app = app setup.confdir = app.confdir app.add_node( inheritance_diagram, latex=(visit_inheritance_diagram(latex_output_graph), do_nothing), html=(visit_inheritance_diagram(html_output_graph), do_nothing)) app.add_directive( 'inheritance-diagram', inheritance_diagram_directive, False, (1, 100, 0), parts = directives.nonnegative_int) pyzmq-16.0.2/docs/sphinxext/ipython_console_highlighting.py000066400000000000000000000101301301503633700242510ustar00rootroot00000000000000"""reST directive for syntax-highlighting ipython interactive sessions. XXX - See what improvements can be made based on the new (as of Sept 2009) 'pycon' lexer for the python console. At the very least it will give better highlighted tracebacks. """ #----------------------------------------------------------------------------- # Needed modules # Standard library import re # Third party from pygments.lexer import Lexer, do_insertions from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer, PythonTracebackLexer) from pygments.token import Comment, Generic from sphinx import highlighting #----------------------------------------------------------------------------- # Global constants line_re = re.compile('.*?\n') #----------------------------------------------------------------------------- # Code begins - classes and functions class IPythonConsoleLexer(Lexer): """ For IPython console output or doctests, such as: .. sourcecode:: ipython In [1]: a = 'foo' In [2]: a Out[2]: 'foo' In [3]: print a foo In [4]: 1 / 0 Notes: - Tracebacks are not currently supported. - It assumes the default IPython prompts, not customized ones. """ name = 'IPython console session' aliases = ['ipython'] mimetypes = ['text/x-ipython-console'] input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)") output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)") continue_prompt = re.compile(" \.\.\.+:") tb_start = re.compile("\-+") def get_tokens_unprocessed(self, text): pylexer = PythonLexer(**self.options) tblexer = PythonTracebackLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() input_prompt = self.input_prompt.match(line) continue_prompt = self.continue_prompt.match(line.rstrip()) output_prompt = self.output_prompt.match(line) if line.startswith("#"): insertions.append((len(curcode), [(0, Comment, line)])) elif input_prompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, input_prompt.group())])) curcode += line[input_prompt.end():] elif continue_prompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, continue_prompt.group())])) curcode += line[continue_prompt.end():] elif output_prompt is not None: # Use the 'error' token for output. We should probably make # our own token, but error is typically in a bright color like # red, so it works fine for our output prompts. insertions.append((len(curcode), [(0, Generic.Error, output_prompt.group())])) curcode += line[output_prompt.end():] else: if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] yield match.start(), Generic.Output, line if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item def setup(app): """Setup as a sphinx extension.""" # This is only a lexer, so adding it below to pygments appears sufficient. # But if somebody knows that the right API usage should be to do that via # sphinx, by all means fix it here. At least having this setup.py # suppresses the sphinx warning we'd get without it. pass #----------------------------------------------------------------------------- # Register the extension as a valid pygments lexer highlighting.lexers['ipython'] = IPythonConsoleLexer() pyzmq-16.0.2/docs/sphinxext/sphinx_cython.py000066400000000000000000000075521301503633700212230ustar00rootroot00000000000000''' sphinx_cython.py This module monkeypatches sphinx autodoc to support Cython generated function signatures in the first line of the docstring of functions implemented as C extensions. Copyright (C) Nikolaus Rath This file is part of LLFUSE (http://python-llfuse.googlecode.com). LLFUSE can be distributed under the terms of the GNU LGPL. It has been slightly modified by MinRK. ''' import sphinx.ext.autodoc as SphinxAutodoc from sphinx.util.docstrings import prepare_docstring import inspect import re from sphinx.util import force_decode TYPE_RE = re.compile(r'(?:int|char)(?:\s+\*?\s*|\s*\*?\s+)([a-zA-Z_].*)') ClassDocumenter = SphinxAutodoc.ClassDocumenter MethodDocumenter = SphinxAutodoc.MethodDocumenter FunctionDocumenter = SphinxAutodoc.FunctionDocumenter class MyDocumenter(SphinxAutodoc.Documenter): ''' Overwrites `get_doc()` to remove function and method signatures and `format_args` to parse and give precedence to function signatures in the first line of the docstring. ''' def get_doc(self, encoding=None): docstr = self.get_attr(self.object, '__doc__', None) if docstr: docstr = force_decode(docstr, encoding) myname = self.fullname[len(self.modname)+1:] if myname.endswith('()'): myname = myname[:-2] if (docstr and (myname + '(') in docstr and '\n' in docstr and docstr[docstr.index('\n')-1] == ')'): docstr = docstr[docstr.index('\n')+1:] if docstr: # make sure we have Unicode docstrings, then sanitize and split # into lines return [prepare_docstring(force_decode(docstr, encoding))] return [] def format_args(self): myname = self.fullname[len(self.modname)+1:] if myname.endswith('()'): myname = myname[:-2] # Try to parse docstring docstr = self.get_attr(self.object, '__doc__', None) if docstr: docstr = force_decode(docstr, 'utf-8') if (docstr and (myname + '(') in docstr and '\n' in docstr and docstr[docstr.index('\n')-1] == ')'): args = docstr[len(myname)+1:docstr.index('\n')-1] # Get rid of Cython style types declarations argl = [] for arg in [ x.strip() for x in args.split(',') ]: if (arg in ('cls', 'self') and isinstance(self, SphinxAutodoc.MethodDocumenter)): continue hit = TYPE_RE.match(arg) if hit: argl.append(hit.group(1)) else: argl.append(arg) args = '(%s)' % ', '.join(argl) else: # super seems to get this wrong: for cls in (MethodDocumenter, FunctionDocumenter, ClassDocumenter): if isinstance(self, cls): return cls.format_args(self) # return super(self.__class__, self).format_args() # escape backslashes for reST args = args.replace('\\', '\\\\') return args class MyFunctionDocumenter(MyDocumenter, SphinxAutodoc.FunctionDocumenter): pass class MyMethodDocumenter(MyDocumenter, SphinxAutodoc.MethodDocumenter): pass class MyClassDocumenter(MyDocumenter, SphinxAutodoc.ClassDocumenter): def format_signature(self): return self.format_args() or "()" SphinxAutodoc.ClassDocumenter = MyClassDocumenter SphinxAutodoc.MethodDocumenter = MyMethodDocumenter SphinxAutodoc.FunctionDocumenter = MyFunctionDocumenter # don't use AttributeDocumenter on 'method_descriptor' members: AD = SphinxAutodoc.AttributeDocumenter AD.method_types = tuple(list(AD.method_types) + [type(str.count)]) pyzmq-16.0.2/docs/update_ghpages.sh000077500000000000000000000016241301503633700172330ustar00rootroot00000000000000#!/usr/bin/env sh # pick repo for gh-pages branch repo=origin if [ ! -d gh-pages ]; then echo "setting up gh-pages subdir" mkdir gh-pages || exit -1 cp -r ../.git gh-pages/ || exit -1 cd gh-pages || exit -1 init=0 git checkout $repo/gh-pages || init=1 if [ "$init" != "0" ]; then echo "initializing gh-pages repo" git symbolic-ref HEAD refs/heads/gh-pages || exit -1 rm .git/index || exit -1 git clean -fdx || exit -1 touch index.html git add . git commit -a -m 'init gh-pages' || exit -1 git push origin HEAD:gh-pages fi cd .. fi echo "updating local gh-pages with html build" rsync -va build/html/ gh-pages/ --delete --exclude .git --exclude .nojekyll || exit -1 cd gh-pages touch .nojekyll git add .nojekyll git add . git commit -a || exit -1 echo "pushing to remote gh-pages" # pwd git push $repo HEAD:gh-pages pyzmq-16.0.2/examples/000077500000000000000000000000001301503633700145775ustar00rootroot00000000000000pyzmq-16.0.2/examples/LICENSE000066400000000000000000000002411301503633700156010ustar00rootroot00000000000000PyZMQ examples are copyright their respective authors, and licensed under the New BSD License as described in COPYING.BSD unless otherwise specified in the file.pyzmq-16.0.2/examples/README_PY3K000066400000000000000000000006631301503633700162720ustar00rootroot00000000000000These examples use Python2 syntax. Due to the change in Python from bytestring str objects to unicode str objects, 2to3 does not perform an adequate transform of the code. Examples can be valid on both Python2.5 and Python3, but such code is less readable than it should be. As a result, the Python3 examples are kept in a separate repo: https://github.com/minrk/pyzmq-py3k-examples The differences are very small, but important.pyzmq-16.0.2/examples/asyncio/000077500000000000000000000000001301503633700162445ustar00rootroot00000000000000pyzmq-16.0.2/examples/asyncio/coroutines.py000066400000000000000000000023521301503633700210120ustar00rootroot00000000000000"""Example using zmq with asyncio coroutines""" # Copyright (c) PyZMQ Developers. # This example is in the public domain (CC-0) import time import zmq from zmq.asyncio import Context, Poller, ZMQEventLoop import asyncio url = 'tcp://127.0.0.1:5555' loop = ZMQEventLoop() asyncio.set_event_loop(loop) ctx = Context() @asyncio.coroutine def ping(): """print dots to indicate idleness""" while True: yield from asyncio.sleep(0.5) print('.') @asyncio.coroutine def receiver(): """receive messages with polling""" pull = ctx.socket(zmq.PULL) pull.connect(url) poller = Poller() poller.register(pull, zmq.POLLIN) while True: events = yield from poller.poll() if pull in dict(events): print("recving", events) msg = yield from pull.recv_multipart() print('recvd', msg) @asyncio.coroutine def sender(): """send a message every second""" tic = time.time() push = ctx.socket(zmq.PUSH) push.bind(url) while True: print("sending") yield from push.send_multipart([str(time.time() - tic).encode('ascii')]) yield from asyncio.sleep(1) loop.run_until_complete(asyncio.wait([ ping(), receiver(), sender(), ])) pyzmq-16.0.2/examples/asyncio/tornado_asyncio.py000066400000000000000000000020171301503633700220110ustar00rootroot00000000000000"""Example showing ZMQ with asyncio and tornadoweb integration.""" # Copyright (c) PyZMQ Developers. # This example is in the public domain (CC-0) import asyncio import zmq.asyncio from tornado.ioloop import IOLoop from tornado.platform.asyncio import AsyncIOMainLoop # Tell asyncio to use zmq's eventloop zmq.asyncio.install() # Tell tornado to use asyncio AsyncIOMainLoop().install() # This must be instantiated after the installing the IOLoop queue = asyncio.Queue() ctx = zmq.asyncio.Context() async def pushing(): server = ctx.socket(zmq.PUSH) server.bind('tcp://*:9000') while True: await server.send(b"Hello") await asyncio.sleep(1) async def pulling(): client = ctx.socket(zmq.PULL) client.connect('tcp://127.0.0.1:9000') while True: greeting = await client.recv() print(greeting) def zmq_tornado_loop(): loop = IOLoop.current() loop.spawn_callback(pushing) loop.spawn_callback(pulling) loop.start() if __name__ == '__main__': zmq_tornado_loop() pyzmq-16.0.2/examples/bench/000077500000000000000000000000001301503633700156565ustar00rootroot00000000000000pyzmq-16.0.2/examples/bench/benchmark.py000066400000000000000000000011701301503633700201610ustar00rootroot00000000000000from timeit import default_timer as timer def benchmark(f, size, reps): msg = size*'0' t1 = timer() for i in range(reps): msg2 = f(msg) assert msg == msg2 t2 = timer() diff = (t2-t1) latency = diff/reps return latency*1000000 kB = [1000*2**n for n in range(10)] MB = [1000000*2**n for n in range(8)] sizes = [1] + kB + MB def benchmark_set(f, sizes, reps): latencies = [] for size, rep in zip(sizes, reps): print "Running benchmark with %r reps of %r bytes" % (rep, size) lat = benchmark(f, size, rep) latencies.append(lat) return sizes, latencies pyzmq-16.0.2/examples/bench/jsonrpc_client.py000066400000000000000000000001631301503633700212440ustar00rootroot00000000000000from timeit import default_timer as timer from jsonrpclib import Server client = Server('http://localhost:10000') pyzmq-16.0.2/examples/bench/jsonrpc_server.py000066400000000000000000000003021301503633700212670ustar00rootroot00000000000000from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer def echo(x): return x server = SimpleJSONRPCServer(('localhost',10000)) server.register_function(echo) server.serve_forever()pyzmq-16.0.2/examples/bench/latency.png000066400000000000000000001621241301503633700200310ustar00rootroot00000000000000PNG  IHDR XvpsBIT|d pHYsaa?i IDATx{|ژi9FsJJRΌȬPMH_%J 9u>a木 ),CҜ >?ƮϮڵ=n|>^.z]6L46W """"" 2*@DDDDD$˨,DDDDDD 2*@DDDDD$˨,DDDDDD 2*@DDDDD$˨,DDDDDD 2*@DDDDD$˨,DDDDDD 2*@DDDDD$˨,DDDDDD 2*@DDDDD$˨,DDDDDD 2*@DDDDD$˨,DDDDDD 2*@DDDDD$˨,DDDDDD 2*@DDr͖͆9Kҥ 6SNeJ""! p˾ED$wQ"""wd&i: T2K.CE]6Ǐ'111Y[ܹsPBң^*TH.&&wyUワ/͚5cƍ)?{ll6sa˖-4jԈB QpaZn͡CR;66#G#P`AիgϞ ((|6~97qWZ""nfa wl[Z5G:u(QW\aƍgϞMnh׮k֬m۶{>|˗SX1:Dœ?hܸ1jժS@9BDD˗/Aر ұcGϟ7oޜ͛7?PZ5 0EDalt=}tcf=L0ݻw';׹sg0 ɓװaC3OdCDDeʖ-a>6۷SO=\:t(qqq,[,ŵԭ[7ٱnݺgϞcΝcs=;6E?^^^*T(ϯ ׯ_gΜ9M:޽{y:Z/^dф/ov6lX111DEE8裏8VT)-}iҼyt-5ܩS'~mNo75kŊCw~a""iT".]Gĉ3^{5O&*vңO>cʔ)4hЀ)S`zʔED$4DD$9r)mٲ%k<<<RHs[gxǰllذ!2i^:O>$aaa|lݺ r}eZ"">*@DDrҥKyd>SXb@sCj׮M c…^1.\`9ŋ /p6qqq\|9>}p9|."=h܉^z)a0ydΞ=˃>Hll,۷RJ8q0ZnͲeҥ 3gLvÆ lْ*U3M"ExW{aҤI=J:uW>>>;w={~vM:ud٩khԨQ"ҥK4j(i͛?~?Ά  Ay&ʕܹs(QӧO'ED\M'! HڵL0 Ə[neРAY4Z*ƍ?r3f ӦMc̘1ܸq??lٲDFF2a-[ƴiӸ~:wu5k>zb2 á_͸qXx1ӦM#!!???{9V}srv?DDoz 6̝; ""l̘1:u *0w\""ٌF@˗)]4_5>qK.RJ*>DDDD$#X@TTϟB s=ȑ#)R}Λ7/͵EDDDDrԩS RF 4iٳSKLL_~*U'xURfM:jNbΝ⋙ DDDDD? KHHtҼ̚5 ÇgL67orQ<<Lppp6q ɓɛ7oϗ/UV͔{}}}3uG`+;˛7ơվs]N͙^{-{Mﵬ_5׽? nd\tt4JO5wjwX2"3f\Ssf^˪^{-{u5 SR8i?la;6W^^^f3tH0ׯoi\paMڴi܎rf8qʙ5[,\lӦY~}0###]*VHzϹvZ'r M 7nVR-"""Mj*|3CŋvZΝ;a|)Rɓ'ӠAԩCv_Xp!v.~O?܎rf8qʙ5[:/(ܟF@ȑ#SGY!qʙ5ʛ3k9s4>=ga n޽Ԯ]KPPJ{f$9rf8qʙ5۝?=JLL(5j\vY$U }Q,L$ r: Lи1= ۶+&7tTwcjGQ"""9в%>m/>uuTO5Y;ɭ]`5 '""9իк5lFcW۷*XΣGmuޝիWj*&OL>}YnmڴGyzz?m6ŬYHLLӓToC4M~7-[oADDWNC 11G͛dԨQ`&գYf,XhoN=]f%:L9FysrfMn0t(|,Bd&`FFF:]np9Df͟~)O?d%K4ϝ;t|˖-aSO=ezyy)LLL4+Vh>Cīdm 0m6[b{Lf>={еk4'A=cM6 ooo^|LV\6{li׮ժUmۼyf^݊+\QάQYp! bʯߙf:?\FH6U7n8*T̈#X"Ǐ5͚5Ϗ3f$x"+V]v.\ة1{4ɓlڴ 0hԨQٵkf%tرM:-^I]:N9FysrfMn[Xt ':^|R~u& Iиqxᇝol,DE9;R||2 `t 0{w 8=Fd{wB~@0 )Bz޽;;vtz"""9ͦMк5̛1y^ӛu Td \J{1fO?ѿnʌ3xgnGG Zh… yg9t|3 ǏS|ynܸAdd$}eȐ!/_Ni|dFSҥ]DDm}@ưhxK..t;Zc4QdH+lذI&QfMFɑ#GxGѣ?#Eݻw'<<Ν;'O:w͛kRZ5CӦM)S>ׯϖ-[شiݺusb"""U+x1//Xsd :T=6CӧAYC׮]bɓxÇ+r>ڴiCɒ%9s ZdɒY9,YAqU vJK,aѢEӓիW|rƎ!'kQdIfϞͱc,s=0l0nܸA֭LYuѲeKܻcpF(g(oSάiy;|7a:(P>NEmiV -ӦI5o<.]JÆ 0`@sa0gj֬ɫJÆ )]t}=#iKk3B4ҥKnW.i͠Ax72d .L3;yw'$$G}zQvm (ٳgپ};ǎKsBI Wv3k7)g䤼;MBɒ~=XYQ߾<^q=?TdCח?g:up̝;76~~~;={ңG֮]} Hs@0R0 L0 Əm(ѣ/xw8jtЁɓ'efϞM\\ŋ硇w!88r"""թSP!bc~Zo%d +/\hh(\tɉQNWKڵ> m'YhLoeC4?_?":EP8_'q"" !qʙ5s#ׯR|9F(S קY|sqQ\QάQYyt 6o??8y$M6pl촑޷__2Ns@DDpU$%qʙ5˗eK8}m{u3ܦx<A%DD>>>(g(oSάqǼ] [Ñ#e <}zsr#ۻnl! n%*@DDDDm\mq#Ԫx1bϥKl?_?)iS"""""n hu/Ԃ8}4ۺlbݒ $t[(g(oSάqCP}+:ꍫ6vHP i'to߾(g(oSάyKL޽a"O6q#hS;Y:[ yTdCƍΚ"""&3f9С}'b؋le#VЯXn}1|k'tNd;  0e txf"]WveE = JTI""runG9FysrfMvQ0niWXB?3XDDtunG9FysrfMvۨQ0li793yNQS"" ':Y9N9&mDxm<oyߌ֟ P2L=͖͆򭜐ihذ!E%O<.\իөS'VXjitRڶmK2exԯ_crڵT6lXR,ymfϞfci^{ۛ{w_~'O_.]R\W@j֬{ǟuW^eܸq4i҄%JE"Exx9~x׉{,W(g(oSάNy1 $Zmw|̧ PB%G0 #ٟxY~=%Jglٲ?ɓ' LvݥKx爈חVZŋY~= `„ YjժOHH:u@FѨQ#^$lܸiӦtRvͽ[k`` =111\#FtR[ .vݴoߞ3gP\9~iʔ)Cll,cь=ݻwSH` }ѐƯtק}>l!o<P"9Rhh(ׯN:lڴ';w}XBB۷gʹlْ PHd ˆ#h޼9{dɒ)]re;'|‡~P܍5bȐ!IOh߾=˗/?f֬Y^K/#FO~&LÇiѢ9=_y믿]DDĪe vb_Jw3`ykc#X#}t5ECÆ_p!7oRJ%+><<<裏xߓ>[߾})S cǎ~0 .]gϞt_tݷ~,~z+Gʕ+… [Mr#G:Y9N9y[{|Lj9'?:5AS~)L6 /_4ݚ`Rϟ??Çڵk^ƿILL~l+-i&ټy3Z$o޼F;ƺ:Y9N9ƕy۴ ڵ֭a\p?.۪n݋-:Rv>' IDAT ɑ{9l6ǏSN,]'N>>>o0h޼mVJDFFڦK.ԨQ[~i2{l!q5Ν c=;]6 SnbuVd.Y9N9Uy+ƍa"ȓ>VXE>o79 HX.dfCUW'O޳N:,Z_ `-JF޽;O=TRn޼a+W+Wh~Tѣiٲ%o6lHW[lIz*|< 6,/_/'үZ'NPre @tt4w}wb,~ ZcAXxy9dž7aI2LlWw*@r QԞZ;+K?m߾=lٲ;wo> [nL>=u$=mgÆ [.Ym۶m۶djժŶm\Q `ʕ\?~4hPDDD\m~hjԀU>F@+x֝o+߿?~BdLUW{I͓2M+WҵkWfΜI@@Is)*Ut~O> @2ennDDD0p@Zhqx  ֯رc7nڵ/L={vURs+֌N.\@][QάQYy;t7J <nZvӡOD'Xa y=fbhh(\t)KɆƍ;'KF"0 <͛7'ueǎDDD̙ܶ3˗GyQ;wf֬Y̜9<SYh&L_~ߞ|I˗5Ģnݺj*WV3k7)gdUގf͠tiX }/-RL{g޽{];(IJ (=z0f̘TWet)]+E}0d^pFˋ> C{sѬY3]ѣƍn.N9FysrfMVIh X18x }yS./AHDDD$+0npSLAI_|E7ṉch߾}![RLBNJ.̀fܸqロ={Ø1c>3 *Ĉ#OIHHHԩS<ڵ+Cʩ2ct2SάQYy;s^|xzڗMe;:z(5Bwe(;#X#}?RJOGN\:AӹM):mwQ*YJ[{nʕ+֭[XjiRxq|I:vHǎSKDDK,a޼ylڴ/R`AVJ߾}yWRݨ04'ϟzj] PT)^yƎ'|՝Kc=FTTӦMcժU?~*W[oE^{WDDbb/]>~M6%G^"^D'ٗaĭIMMolٲ}ݜ:u=8c w0܊rf8۟̚'8۶AjqYnHXvt=㋱;Y4Dڭ/֭H$ػwCp;ʙ5ʛ3kWukW6X+>b|^s._ΛM ,qKCĉ,Z[eVvZ|v,p{*@- >B Q~}L:u\Hvov_c9Wi5bea$?oQ劫#q{*@-%&&:[|<}c*v5p"^R9?Pnͨ?L)S"""""%$@ΰz5?;F /iϮ_w>x=uf'.]7ز>#I""punG9FysrfMFhhB6Oв "~` +O}8в%}xzuWG#hDDCp;ʙ5ʛ3k4aL;>Q t^љU?"0W0|Y\V *ʾ:C# (!!{eRP!yW$"܎rf8+y3Mxm8?vf"fяXBoa$OÁ~=ԮrdwyӧOs! .DDDD2,$FqW/ǯ7M_g C:8?H]ma{ae9/- L6_ DDD͍i/@F?(4yg;LvSB;9?H_g;a:x WG#Lo>J.{G%\2gvuX"A+VpunG9Fysrf#y0 2rHƶKO2˭Ln/հ#ʱTdȟ??,Y~D$BCC]QάQY޼MoG@e!|#op}# `riTKBBd֭}`(0 3ɼ-ޫB$>>~X̾d* ;<#U4]vqUʕcÆ Ɏ,YpON8K/ C aȐ! 6 i&l6~)Nruxu!qʙ5mVMq/}K]v&;M>~t5 :vtuDN6y'Gp}8WrժU+L@@@QáC2rdN:E@@QQQɎO0z+ٱXꫯ M?dž HW_eƌɎݻ7ݯ#+>s뀬֎:nɪu|-nmCױVY -+̙5'_4?I` M,VBz!Fw+SR8iѣM0_55f3tH0###.'3 ôl{衇L0̥KiM41 09W_}eav!ؖ-[L0&M^^^f6m޽ᅴׯ_7ׯoaV^8pꫯ%J0 0~yوރ""9Ӿ}k_YS_?o6ܼvs̈D844'M܍~f8 )Ǔ7oެIe׮]8pƣ> @>}زe SNeO2_~9E_[la… Ɏ|WtRl2dueԨQiӆ'{CysT y& ZRLmV|UC;־ .]hTR.,Q 8[4 4M /_@ʔ)Ü9s㏓ ?%KPrePݺuS3gà &$%J?$883gq}ҥaz(\>~<#Z*k->3j*SGyݻwSnݤ㱱VʈKPPAAANk;􊌄ΔCފ0 )Bz޽;1Ã={²e˒9spuzjߏ?xc ?e˖MqYf۷/ïM'|aqʙ5K~$q#+x?]fsQpyֽ^U#G!9# %44K.91I*VHzy`ĉܸq`gܸq<ثTYJL0 նgϞ|GL:5:u*^^^iR8O-5j'9ϨQAʙ5ʛ3t1uT~w S`A*VѣGR#yYp!#FpfȒ{]3q]jaذjִw$oWn\ՂV8;m]U3)Z /ΝK>,ӧOwuH""".>aZXܾ쮣輢3kaiVB'%"y>~BȓI:9:wLbb"W\aҥiI(Wv3k7)g/1ues[NmZyK4鹺'.&](M'YmV&M/ o^WG$P"" :Y9N9KLB`gn>Iu6sЮZ6p'Ͻ\8H`8`ĉ(g(oS4at=>7N741IMbS 9Z{7j> +V@|H,P i,K|:N9Fys\nϙi0q"Lb#=m!2xz Zg}ǃWk,Q i,qСЫ>FEȶF4A97ؿjU<<"""">Qźay;m7̠'97G^*ٗ-XIqȑ#]QάQ[s6f >|׏9{gw]_<>F!ҪÇiS(W֯d#,ƺ:Y9.7l$xMx]{b7'a+n>0UGؗ-Q6nE]8 do݄rfܖ??+uC0V^E;3S|(Rľ{⮎HHH6UDDDvͳO4>JݰZ^Xgz،ld^|(L*@!%"""i ž ֊{f6iɯڋOOؼJvuDI dRW16-ُ'wu͚5cNv?6lX=z_|Ԯ~1MKҶm[ʔ)ŋ~;k׮e5u.\punG9Fys\nʕб#Y$M.y,n?Zq挽HLw$hDܖa :D=JXX7o&22QFYwС}ٳ|A5R/`ϟ?iӦI|||t=ҪU+x"ׯgL05kPZ5KQ֭VrunE9Fys\NٺuС<}s NlMhӐ-%G쑷gG\lx$smDFFv9afKq|Νaa<Ã.]{n:tC-Y&k:'%ش TquDETHk.8fGO>L:5E)S/߶ߕ+WRxqVZElkݻw0d ̙֭34S\7|a4oTVRJqus۶""-ᡇ`j5?z?R /S oڿТ}݈xWG$YHг$6!([+34MBBB0MD~g{˗/@`` eʔaΜ9|{?`ɒ%T\MyL޼yYt)*Tcl*Tq̛7#G)X /ΝKqMLL 7o0 ʕ+w{+WhVgƌtaq9)g?c IDAT͛ڵʚ&wi6V/)U(vY+W੧;o>u<*@$*6ڑNN"k3[A"EWݻwcǎIm<<<ٳ'!!!,[^x9spuzfΝM62e4hzAǎYbO<k׮k׮y;=;j$ݛc>dq9%gQQд)/_~ Rn4۔r˱>x=|lyzZWXFvE0U 0gϞ|GL:5:u*^^^t5k_N`` +mH3}:G%!!}-Z: qq L>S#$t59N?͛G*UXxqJM=HLL$:::]۷FHƌC\\\؇JliȇŒ:q4&lzi%p~VܸaAq6lzgI# k4i҄￟ٳgsUV꜎[+^-ZիWS÷Qܸqƍ߱/ٳٲe ۷gs JR-&""Ct}#>o+O4ӘyfJ,@y^xժU* Uzo'N @ڵ7omUm۶m-[;>Ʋeh߾=TX֭[S|ybbbX~='N{eʕu][# UV: Y9sv3+{˿xK͝7ScL->aw5ǟPڛ$j+J4բVU[;F3v#v^U2RAqwQQ9wNr;y?+sy[MLڷ{CD*^^^ 6 㑑$$$`ccî]صk3eccףFP988on:-[oFll ]taڵвȴ]HfHn3ZfnݼNz#銉$%s*d1$4x]X:w{ÒDd'O( =zxbhCZ|1c0f̘ =J*}=zУGGi^{5v#G24KX%df>#ev玺`Tۧa i5xUh⹙L{ WC׮=0<.rS-@Yv-н{w.I!б#?Nhc&vY[n'f^p$+L0h,] ˗υbj'*$$M6Ķmx7iҤei9uuջ$!fHL.]}vҶ߭h11w?^eBP2.ŋMs r_ 9y$cƌa׮][,^X2ݝѣG;Ed=???K0L|֞í0m_6qIq.w%Nn"") |̙A7|||.9&x֯_!2W G2Fr35g ={+]m ͛ae"nG[p,h2ȑ0cE9 B3YF G2Fr3fFڹ6mR74W|r<Wv$F8aن/ӹ 'E!B,Fڴ 6lPW2WbJ"Wu&ZمKywjzW# B!B,`2V5kcO;'`gߝ\Qđݏ /&\JJһÓD!%Kh~c$&mM7Da{V5שٴiW0f |d?Ի ÓIBa]HfHn泖 sa|c$%c]_fͼ^us=>\"2)֧NQV.3KB3iek!i#2S #%-^{p!??TkcBcVn{}|:& "QIIqmmezi@ ?$3m$7YCfǫS#MӶ T)>6H-_d8Ed\|\}qvt:Wd|Ҁ!BXĉꈤW3W) /?/~=+zSN/R+dƌ\|\KNup0I&{T%2 ]!"f̀/T+_oRL 5~{5oyEjfp긲\|HNMp0wSS9ШU.)ǐ; "{wz"rC]HfHn+oo1Ǝ5&Ġ-Xݖv= Kfeq:O%o.$ؔ4lH i>,*wE"DZ}+o޼*Um> 65"M4I G2Fr3-^ }Cs?FQo ,~s1?噹m УAtkHW<`3u һG`Y!ooo uƆ1,g28<7ndϞ=> ~૯|z+rիW]HfHn|}U>@]JKӛ'}!eMϑnn۷&,[kq>{6~ח8+lEQ.Bpqq!00ƍgy9-666=#GвeK"""Txr !rgOW]ܑI0|pϝ~fPAYSvA.о=[dէ4BBKH`3.E<70dQ5k#&'N֖I^(Y$vvv4i҄͛7{;wMŊnݺLNNl>e,c=~sYs@.]/Lz4hZ]k.ZnIIInݚ'OҸqcy8q"B3@йIC?2u|Z<~-[< ]QH2x3,ɉջOѣ`kkK/io>+˄ 7o۷oGQLs'Oɓ'۷/'N~`ܹ:u,\z >\ G2Fr3_Vgot Ր/ǘp`ڀ^հYH>|8= ;/.L;wkŊ]R w@r4&fS<,?yMQƍ(L&.\ƍu"FԬYoo'׼ysWhCK.%o޼L:WX#Fg #yE̴̗9x5X^ȤI'oշl>EjT 5RW%>%L<}=qqlqt%Ҁ"g tԻPq֌7n}*^8͚5c;CoҤI/WݻDDDPn]J*7on~ w #i#/2;~:tشÍG3򷑌j9Q|Z<ɐ ~}غ[)'KUz-4mzHS@]7+ -#tV`@ bLi>._V?= JsMQ?ϢfI:*]Zr5C 2L;v'NpUʗ/O&MxWZ(^wĈb N>Mvs֭W_eؚ֭0ٳg]HfHnDf/7s-שVEFi\\6ylY kم ̽zk?[ǪX|||Xp!׮]K9K 6,Չ1/^S`AѣY~=>>>TV/777nJ\267Q'i#/]6{@rcÙ ؗ>}/X/_W?܃ꆃ+>z('k0""̭UZPYOӦNJYd {f\xx"7oO>Xի??ȹL&SWZx1iiiO[JL&-J5{MŊcƌ\r$Μ9_|APPuiByR|tv6g,~s1yl-&7oWeQLbF|XXm2o<͛ǥK:u*:uraggGʕԩSN%22`~=)R"EȮ9@zw]w}Gqwwס*!U?ƶ豮o~eݖYO#갫5(یd¥KL^Ջ-'k?^H&TZng,e%?!O˗/_&{"06OOOV\ɲe˸wŊ㥗^bȐ!r#HLLԻÑ̴gnf/^{y}\Ûߤmy{ XɵHb"tnfoOY(`L*|]t(BӼza^=x-[2{l*TB`` 7*r !.!Am>;MaeWjΦ^( ") f(t ϟJj,YL <o>>>tؑ'OLӦM_>>>>zLw&::K.Q|yvwIB!Dw>&ښ#QG踲#T|??i>ՕlU/W2yUe͇ 4 \vF1h ظq#%J`ƌ{4N>;}t3w-~wg>iӦbooϴi ʕ+U!y@>?rnW_5\nw?v,_л{jwEf39Wեri@nݺE޼y}6ǏgԮ]~qLƍ4mڔ"EH֭YdS3L :ekQn]8sLd2eV!~bcc.p$3m$7=/Krn{rW8V /Z KK//ش ֯WW k+]c2|yf֨!͇_>{{O-wjoo㧥Q\9Fŋ=ǏϜ9s?>)))?1r e˖gN!Ǝw #i#YL0`x[:a]ޖjū(b-EQ``X-=>Qϵ_o3<2e_6|aFѨQG_lYM=J||S5jċ/H@@W^!BcO'i#/>VTu37i T  :XZ Ps;,Poh`sm͛x9C%YRy0C5 6k֬l}PVQƣ3cǎ4mݸq#G>[0s/<ƍ3pG !C>G Oa|;6UҶyö-tx$xct>ff8Ѫ<,u^uaaݹ2%>/RX1_FĉI&)=7eܸqO|ɊkvTT)SJ``>ܹs _׹s2u !DV1/eעE K.ʪU2u5U:VZtEiѢ $ö/lݺ+Vd{VVf͚1w\߿g&99W7Bয়/૯Ͳ6|8III4mڔ͛ӠA9qEܹsЪU+N> )))899q)fΜɱcppp`͚5t"5!LOaԕh'O]j{ IDATN-oGzűL\Aж-lYԿ5kS('6t6mq`^<7 ʕ+W\ޥMNh!"c&N/TaWu+gcϲk5jq0o͚C Իl'OR֖} R@KJ\e$"***Ϗb͛א͇B!2chmGBrWu̍3zgu5'N@ǎ~~JJS䳵e72y&5jԠwlٲ֭[ӧ5j:Tjy޸w)B!PuQoa8ĔD:vT)vIMX-ׇ-[P!+W̖[h^yUЦ +o׹vNj*{fgK"IF&B')0s䮹Ӓy{fʹju"l ?.wEVJ 7jD 7̓]]BaL&3΅?4)i)Zߋv&j>; ݹKM5$װaҀ!Ljñ`5pH5׾l9qfBQI|y+wSSBiؐ ]Ё4 V[vBBk{ҥx1xyi8)wecFXGZ-_V7nG|<8*]QOKSh(቉vv%EX$JV2_.p$3m$7IfRSՆc2X¼af&{7̗UokYToKƪîW׻l9ʩxv89ᒍ|Ye9rDz`86$3mrcn));~=BϞbbЖA, ^nQGݻQQ갫:us-dkXܽNgg^)Z4KOX?Cu)6lwYNVUB[%'CްiYoe1EmYbjU#, \6''ZeYA2C7nLYz5z#B z@6ho>>sOe~|ܿN;v#dvűA|0D`߿O޽Tcǎֻ,!BdRRp~~uL>gƱ45,UJл$aE р 0@:DV\2_x׋/x{{])7NPbkk8;;+W.-S@ Իҥ%df>L[|n( ) ^q[6:tue(={*J|u'Kf2)ΜQۧ~bǵ&ryo xۣ( 7o>VZ;vLV^w #i#O2&'8v o֭?Flb,Cl ֓Z֬vk'Y*709V\:u^E+rC5 O?||8;;sA8y$ʕ>лLB ]HfHn̴ɩ=\I-?ƥK4_ԜȸHymd(+WtwEeEa,oשg2LTdO߿^xSbGqvvf„ XB!%.Nm>Ξ]?FPVQ0oA8L5,_V~ Ò%Ыe EQ\ڵW% +ggϞ8::2|C}^*UK~؅B#u ڷ  ]\?Kۅūv[х_¬Ymn`02"̩Yʕӻ$a{n8p3ի|lLXÇ]HfHn̴IB6p╖cMpqw͇& _d&ёL§F >PU ow UTI G2Fr3dMNuu+j A xk[tՙ}S@t[f?6 ?2KT>6,^QEѻ Ņ-Z'z%BhySQyW '0j(>~cf$m)V+33Ǝջl3e໪U_z-|}}%..H\֭[Ko3gPdIJ.CeZ!]wOm>j6i4>)?o[}7-&kᣏ`.՗EGE1FW̸U.'Zb֠A}l޼y|z*BrQQ되|UOW0FV*U.G!#Gжmtk׮抄9{%df>L6))jQUr>KUӟ =7~mez5 |2x_bE~ZBah@ܹ /cܾ};+fĈz`86$3m[D|`蜘Z-iE@ kf>[2WW;d,%11 :wO*T`j|L1DRlYBCC},,,,G3{lK0L|6Fy(P@aJn]EiY-ҳΝ;S~}EoջD!"W:}Z%|y^;b7/y*U8UdAto5/e XzQ jזCX!rqqGpp0˗/'44///?Nr.Q!uBBk2eM˔1kOkRKeMЩ*lܨ1޼IӧV$K%4‚ рڄ̛7˗/s}.]?Lٲe.MX']HfHn̴NTXQ磔?fk}/<x}~Ld̂ ,{|+n⭰0:+%4 3q]hRSS}_Qlll?tβe't3%&&]HfHn̴NvfMعkEaQL88acrX,, \]MLm…-wl+xn{nͰ0ڕ(zȗC&[;1N&M/RJԭ[%kg; fb̟?_~B!tr0t蠎Hڶ kܧ׆^l=]Ϲ_.@6P$;t 啢Ek@ р%͇Bչ/7Kfq_NA{ӱ.:v6bA{ݿFaihoze+w #i#O2ƚr۽[꫰uyǕWh%gnawY|d*W#OEnN޻G_0[),͇h@&MOTTޥ+5`K0L|6֒Νй:% kƞ٢fIáx+YW(uuՃswR43 fd;9Q$m(!δqK͚5qrr>_ٳGʄ;v%df>LkmVuoWWX kEӪN/ˎ;X4/5ev&m qqꆃUX,D`όɓ)&͇F8nܸA%(^8O-'sCBa>L|6ziXkަQFlLqmvfqq^NrY3k 2גq -;(K6W HXX%!Ɔ Ыt VA|e HǚY}5vt{ԉ-6]QJqQ#i>. 1sa6m$!Y`P~|L92/?/s6Ė3gI.zW-Ť$v:9Q-.aiӦQlYZh[oEDD]veĉ:W'pBK0L|6zb~-_`RL|w -.k,2YRzk'0PI񥗲0+(:sϨnh@-Zݻ7~~~7dxm8xlfͲ0mn~}^3*ռrttCL4Tω'hܸ1xxxpm̴ \\\ }Bc|4>́nOu=ݖ#k ͌Tubz]QP/\':uY%\e!?.]X…抄Bac;7Glb,mClͺ4RZ>4?\hfլ)͇XH"4::ҥKgsEY<==Ի!9Ԍ~M9u)Wvm&>^~=9ѼWŋRO*TлWBC4 Zbڴitq`ڶmcu#Bd)S`p1~1GPVQ0oA8L5P:w!*0reb,!`7'OҰaC{V\I6m8vFҹB7wwwK0L|6Y?g^qAZ,nA¥x"3E?WǕ͟}WX6 u.S5(f^z8pŋ3n8ONRRF Pd]HfHn̴ɪܾƎ3|v4.ט},/3o`t=ԯlݻt õD ׮mgTd7C۷osu^xJ,w9%*!*Gw jQ^;˻-@^+=`(u]M9@'W0NNZ,ryŋSxqB! CQ૯`D4I~)L88Q{GK0mylv?>W5IIblqtCX5C zj<"B') ||L#͔ƐCwϬ=[_%%'.8`,X B!ZZ +V{etnEIJM`8qB-x-puU75 1) Ϟe[lvtYbz$YypssYf,Xz= PfM3 !9Ej*xy5r%ꕱD(^8{졲v NIuV.( _kXY.n%J]fbŊTX^~e)sUB!uJIQG"+^ oM5]W[{od!_]j4}0/_':jijLBCܫl۶-ݻs2mڴD?#6$3m-9<V{q7nܠ}:u6lX6Vdٳg3tPv؁H$3m$7Ifd4Gu.ƍйsƎ?,>)}}`͛ꝏ4Rx8'kk_sZ*[v\NMX'C/1L۷ӧOMZ8>ۍ77oԕ<==.p$3m$7Ifd$$Mߴ)c͇(|ʰWRc4Ap*=zu.S5jX|O^h@oΈ#Z*ԭ[iӦʸqtپKkrT!Y/1a>زY?RyL88&3u 6HJ]?aWk]Q] õD ׮m.YfXl[GPjUɃ- ֭۶mӱg;vfA!E%$w;m۠m~},뺌/}ZBj*xzvfhX:иH֫G\̰ q&/^xlll(S O~Xll,:V>ħ~.%W8t%df>Lgp$̀Vo•"~ÿ?8cbァYZϧs-2) `*,fGG ɓer`aÆĉٺu+=WZZOf߾}\tϻuy)pB4h@~SpWway&MһÑ̴'i^nw@Nhǹr -$F8{CYPmP6LMq2uϏ)ڵdc';pțk܄(i&e̙(ʕ+WZj)666RT)ĉ~ׯ+/bggcǎ}yiiiʐ!C|)訜>}W)ZRdIdɒJ޼ybŊ)ӧOf f6 z`86$3mۊҴ88(d7•J+)WRogAYh8EE3'/ Z\JqJDbbgN-;Z;C |򄅅q!vMddEQ\9Fŋ]eb̙3瓒ɓ'nnn$&&>zެY'88SNQF ̙{wZ *w #i#O2n݂6mv^zE-P#Pd,f͂1c` 4N''f6FM!|ѬY3lٲlڴ P畤'))S;@YhR) IDAT...,]G͛7/%Kޢu !nPݻrgfmk/B-er:>JjMɄǙ3޻o8 qZ=zxv5jċ/H@@3_*!0к5ĨfX ֖j>8&O\IQlu c Ym2uTMpZj=X5,!bǎqwwW_?.\pwwΘ1c8q߻|2={Ϛ5Ç?DݟZAח?UG|f͚ϑÇcРA9sds89?s%xl|v޽г'..OM}xF:Eҥ,=׭[ˮ1|p|9?羾ŪVJÆ ~8LzOBy3eI7nPlllq=ɓ'+666JTTSڵRRLLj""$3m$7If拎VRf**(ʹs~)M| cQb2HK:~\Q];EIJ|#kEF*ݫ֭̉#'^<r]KxJ|HMM}Tϟ%<\@ddf><#솰gTOIKaVdVY n:8{ pu `F(P@vͽro.^*UB0Znʕ &&*U>>zbxҀƏWv,^ G ~!3xvnBi) 8Brr IHN *JVء\ KHSh(.E^=Ze&rs@V8}4666ĩS9s&ǎ5kн{wwy$3m$7If?n߆9%Kr;x ]|Px5N2Y\%'oCpõ-ĚϵLwt.OKzĚs9(EUPP...h<==eBɤN( "B1n;Jǯ-P4k *iiл73lX&\KN浓'8ԨeeLCח8EGv5^/p!tw5&1- P.%%LU;;K* ٙyQD ^}Uqvv&44T&Nw #i#/7ev>-_?ȫ_Xge>5~ϲg1hFkg1k9L€?d[lvtb.?YKn9 "s 1 PB,[~֯_O~r12I!]<MSZ}֑6ww hQVw_͛u޴leZ >fT77E ͪzU% 3"E /X%抄Bfܹw~xqmǻz*+CV4x)o| Ee cзՌp23S4Bk/ϝ~G8tx\Cr޴:ݳ~OΟӊcU04C4 DEE;ټgϞƆlMXw"i#:xu8z^yżc\}eXT.VO_~QF1\nϫw=Wa2Avܺ;-Siիcc!g;ׄyFn `oرz`86Yx}89w@2>?K9pQѢr lm{hrV7=)^\5TT]ȝ;t íD ֮0ع#Yr Vn!*!aXu‹/{T4S{.aiR6o$)56E:(z,6ZTg:K) fvy O0\e4i;;;lllxVdcc#B۷f{{ui?T}, ^ʊ\w:%0ޝEU ;({nii746K[r7SoEWp˜oW)[* 1 Ypf/orp}&5FNIAg|H6 ̓oDks-H=F`j`rXX..?æp!rihL;:ʀ22]`+M^EMI#~+N5Sy뭷!9seh 3̪-ӭ.^yrn=J|-6݄o2u ߏ-1\Hj %I,+<VUaLJ &6}=9z_#sFhV͇Y0#=W06T`Li7ҥK1o#Թ:y{3oi9=X3!geӋ$"DEEE CϞ=ѦMAN/`DD&YONN7s >9 6lF֕,zbΐ9x" hChfԫ W^QYY37cRJCdT]???NNN(..]v ׮]SDRXTT%h3'gf v[j̚ei>_ÆpƻXwFSyl24vf̰tp+V4R vfqxkW<۱U[MTQ#G(]ٳ.As4M<92>XrA^{ hruJ#z76ߌ !T{urUk?l9 ED?^ *5jV4qzQϪ@D8f3'OSX.]'jNSmzbjߩ>ݒ:}70|8иKۉ\n{/ךNS|M?o\) gO8 xBG}qȿhcǀ1c,Kf|ZT ZF|Ϟl>l@# ؿ?\~ lV2u@n/)ɲ%~a:k&1O}?` ڽ3g;ڴ?*f"w90wwWGGK"u@G# h4©iM@ll,|}vKf& sZeeYF;x{a YGuƒ=WOb=+pHNýtȾgB+)21:%]\\uXXj>{qXK<.BOMMEjj*h~deeyLEElقnݺ)Q"^.CS4Mf,[] ryw+N܄3R}"CVիрV"5 D߇ûjlHnxb,]sqq{Lln(.V~ 2{=}RZ DF'N*]l pב#ȯEHxtOb̘1Ç?F޽mB>G;C\8<ZjkjUT'))?6`2Td|D ?2d<==ld,،Λ=}@p`K?&0yz}4p{Ui6cǐ|~}y="{{GF`۰&Ǿ0޹'84u% ضr͏f,+WMXzy)]QdOt ̤an5Y0S?b'x 8pل1f||_{`: >ȍLdeaŋ֭eL{䦉ۉTaf07jg^MG7ac\0.@ Ǧ`@o7t_[XNȬ1-;{ssAp0i^ԏHn= Vsij*=bnp6"QMŊ9C:4K.(oZg7,QYٙ[׮xsg!ZqDbbbx%t"RAPZY²BY>[>l+q_pa&t/q_]0kR Ԧ?}3XXjdxs231*]iP+SpDEQ-UP\^\ay&6`j[8[z7<}p%~y1Tv / 2yx :ظph>k|Qڷdž^~<^k:Վ?^;vhJH ك#F(]03iMU5<&vqDt耞m{o,rc*]ñc@Zr 8u ;74yqmXu_۳0;ֲN2p_gO6 $76 N t05+W/O4Rs3 fzR#JseswK: wKotoƶ7G)[zѩQh\,Ʊ덆lyZv<`ׯD\53zp!)XN֢3K-)T {N$7Nk>퍇~۷ǥKm6+}YIg0t, @s۫Ux"**DO]*./ݶtlY!ḧC==jHc-5Ʊ7{[>!!Wq_*ef#F.jgvh?3*ךN?O>d篿:p׋^{ =(df(*+ղjeWkdnv|O^ަc׫>,}0yZzk:tM7\ZX)4#j4n649{֭-W9Y^E=z>.>h \ZmڢG5۪|}VFP\ \bY|~ӚJ{f0pj*rK"ݚ&~aٲe1b|||jbٲe߿i 0V.knXely5:!}hMQ~==4}…Xjo4jMKZ2I@TaԨlF3qiLط/Bݕ.I33JrDo{APP"##Ѿ}{\x(++K ((7MiL 5׵kQ>]1onNnV_XhLsRq+פ[e%0ipeU~)Lrtz:z{P i]?$7՞fXd ~g\z[ƨQW<>[^W: FaY!U >G 7Bh(9klQS{[nji4HAf30u4 )X̀Yx);:"gOmF,XM öm۔.C7x7`#QqiH~F#^HKkѨ`AHO4ix:=㙎Vnٮ4A3?۶m_|TU<`ujApt@PJ݁B pw<<,?wsFdx1z5G)Xvlƛgl~0P"R?M4 +W+@ѱZw`+ѓ%%7>v,5)R>wuUOcs K24Issnl4n4-6Xx-)LA 2jk` Z:E?5iMk@0zh[ڪ9>>>Ftt~  7&b>oh[ck65&** ⾩Ž̪ Bܦ//~: c$a5uSi0mKեx)||`n,WפanQXXݻws HhquuEbb"Ft)6EM`94qkp!iL\ o@ͨsxFCleG?mvᇁ3,Ss |ʂdŠ <[_k07qtU+]FYF"\\kPD^ IDAT4ŋO t:4jۢ54:]7k.6_5h620m>|f&|ڶ=бe?k07&+Wb޼y>|8.@quڶU""9xxA`(`5$x?7> ->}0aM4 K,AAAzpxxx{O?@eDDD2IK׿gg+\JdP^n9nj*O ؁3ee\۽;|p1=1M4 #Fu:Ll7ٵiEaf07H(ص m4H銚J^N8}mMXY"Sq_4qz ӦMC׮]cK$͘1C4Icf"dg[|lߎ}Zq]d,~x̞=?~"5Yx%h3oS`n,ԤR Ϝ99us0˦}MFr8q">S ,13ix6*+ 8|:Tӹ%^C&7EDؼIHno@P\\tcСhݺ5ѯ_?|wJEDDjr,p@l,;@JW%Y~E?))Ar` $^׃&`?6Ʉ,\T@~)7nģ>"+#""Uk˔+//``+L|r"^8y `c^xCxj]"j&$_ֹsg<(++Ú5k.''l6 We.As4M3˱[J9L&3z7ЪUU{nf3VdgϡC2}x8՞Z17NA"dBll,֮]4h̷dDDD )) ˆ͎i7JW$١b<Ԓ.]tYDZib/ݾ};ΝCBB_d2!-- vٳgo+W`HOOs:::_`@RR5%""^x?rÇ5|Lpp(" bADhbhĸqk.h^^^(..ի1j(ܹ...Mz|7;-¢E}gwf 򈈚3gI,MGl,0w.|22PPYAA}/D,M,ZD||H2M4 _|.]ӧ׌t`ڴiXt)&/v؁Eaܸq > o6LSAAAX~=rrriӦ:}ꩧ:_~4QQQJ9L&^ˬ?x!`(臄+ pďWbKH CWWWE#JgU̍䦉 .`-8¾}PRR޿ 11f 6 )))Eٳ.As4MfӖ)WG̙#yC2 <]닷IzYkVHni߾-OwQtA:233޽{M5vXDEEչ 6 ۷oj;wrr2PPPPgEbŊ:۲'Nپzj,\6(ٳv^ӧ׫mҤI6y{!Gdd]@^z#22.p矷L*(EzA#22R1ʕ7+/\'NĿGdd$;ۯdxv>{c]_~<$s<==>L0L lK[;wU_/??_t’%Kl_js Mzݤ$Ԥ!"" ¼y„ pIHskɓJ针TkM)X˖-Á3gM6(((hĐ!CM:?WUUջ ^|peUJ?UUxib'""C鲈Ȏib vލO>&MB裏O?ݻ!/J???@^^^+KTçt{L&fW˖\͞mC !lûݻc߀n>v_1FrDXFqc툏Gtt4;Rﯳ`0 55ATTzU9`V13ixvYyz'$'V_-sP^ ̄ ypn_ sk \t:ARرh׮&LVZ /` ę3gh߾=/^\##F@~~>9"\r%^y:t_799HJJB)GTwԔ+ `݅ x)88ݻ/"D"xT$;;#GĹsjkؽ{7/+WĜ9s-˗ksN\t :K.EV;ĉq)lݺ111Mj>H/` m[_?^Z32OaenhSQmx뭷0`9sK,C=cǎgcȨYq]wҠPYY Ǒ#G>sL8ju k ntUVn6ly,~p$"KS1|5aԨQxf!#GшV,"IYRSwfԔ=EEx:=F#^ k;A3?TECףw&Po/bذau 2Ed6"4tq!cL&f3۶rB`>{4%ª*<çE 8˺vC˜[DGG#!!Jye2ТET$RR13ixˬ2- ׭d/CJn ૂDɄ5=z`VǎpШMShn_S FrSm'NӄT_!捣E13ix,+tqq*6Jln9嘝xm[-[ڨ:uԾ"̍dڴi n2eJu:L& #&&k@O:+]Q?:GG+4mSYY5 4m@֯_t \ʀ^>|ZE\I{i)JOb<۱# -T_;UaA$jKj!{#.CS4MSr̍4GCzR唫sRYNB 퍵qwWBQݾM5j`_13ixʬ>zJUU&L > Ɠ~~Ժbj_FrcB<  |8pWn+ efbx];{w5Sςe="#"?2m ښkrsiq=z m[""x… .As4M̤an*]!K/)]03ix6 `Ox`fv'B K00) >z**%$' xfq954̍)Xd֬Yt ̤an,O?yl:"LX2ӪqOVs_IHnl@.13ixV`εxLũbbEV".7&&kBC_̤an$76 DD'N p$~=0mgJ7s|"Ztx1h]DDjOYnLGraev6._3wgIѺԎ.XB4IkrfzO<L:Xa;?Ó1adg+Vm>̤an$7"`Paf07ٱc :l`rɄ/si4Noo|[FS̤anXNA""99HJJ€.HY7[\u |%*{ ++չ\Y aA@xy^ ך# DD.0{5kwwYK8m4❜pf3|}B@\]e!""ru4q#0u/۵kXmh䄅xcGsv""{Ed .As4MFgi0h 25fA˗qב#Cbq.5cf07 3fPaf07nYi)0}eդISmflpaT1S'9:ڼ?}M>rfGGl|T ^${(.UVӐd M5cf0BLLҥhEBB.E3.As4M,g8p kLAxqsrErDNxL8HHH@llҥhNA""99HJJ0"6 8wr ,!0yU_&ϣd£cA@zxXux3MDDb6[FƍO**, Lj/}`?Ν'yypvp~~G^l ݚ /h,j7YYhyХ У?> [ŲɥSTU˗쌥]♎ᣡDDcprڷkn<.0Tq=zXnݺ6)$^PU@q1za}ϞxC" م($$$(]03i4[QQӥ23WoԨkTQQ7KuXy *+~n.Je%&k!!eպN 13iɍgRUh,nh]ƅ 7s[ 꿅F#9w30AF]lkM""5NnxԹse9nѦ J9|NNx%0uꄶNNJFDD6H)UUٳ O:sr[{wKScu4d,/EE1CZYDD^l@.l߾ǏW Maf҈lX44]i8gg"=1]GcGin5vff3>x8w  ¿BC`۶p`#eKIHnl@.zI`n`Y{ilO,stGʵ2HLJ99PQ6mgOgTut,}:йs[.@3ZPj2!`Tx/I㵦 шV"xx[eeӷ[9l[!C)S^eKy]TJp 2F 5甗|ZN06X!zz*]qDEj:>=,}TCS99! ax]ьnL"" 2U{#LtspN"""%6 dك#F(]03i[ejVгS'k|5ixLFrcBvaʕ)3vU NNen0|EE㪧L7)Sפan13iɍU3 pkFf&`k͔Oe{ڋ vuEf4e4M뽆3Zx:7/TߘH|< IDAT6ӧOcΜ9طo***0l0|֭mDDDPXp!VZt̤an13ixLFr)))#}z&Mkźv~!&&HՓ/t:aɒ%uZJt¹s}&nRR@HJJj4Go%h3̤an13i8<^k:w_UU.}jf& sIcf07yyy˃%uK,Qaf074M|8><35gZf ***0ydNll, `""""Ftt4t9E(++ܹ.]NҥKѪU+;1qD:u [nELL wFm۶J)L&3̤an$7N%##iii8y$.#-- iiiȑ#{qTVV?;Cpی3.As4M>>Ftt5{zzJyQvDDDD㵦 {Qaf074Mz GGGܹSrZdd%h3̤an13iɍ ~!zNt9DDDDDktl̙ggg+!""""R]ؾ}%h3̤an13iɍ م+V(]03ixL&38&& 'N@~~>vΝ;7+W ##ZBϞ=en֮];Kf& sIcf07G@鉰0}ظqcǙf̝;;p;vL4 u& ~~~x嗱ahU˖-C\\֭[Jdffcƌ`ت*ATTTLr+z^uuJnkfR55}k|}}c,Zƍk1eeex1eL: 둓M6yTL04)5i}Mkq_5k@DطoJJJ0~:$&&b֬Y5ۿQ_=BrۀB$''乥>cv;xޛ-s܍>{Ls_&s_&s_Sf_>N3_|A K,>t:pرzsw Mz-[x7x7xSm˖-M:k8"Bqq1ӳ}^^^j=[lA.]ڤ"""""+++ӧ1zhK,6 "899,.YUUUx޶m[Ι֬Y L͟?={#vܩt9pihӦ <==SN)]cСhݺ5ѯ_?|wJ)k֬.E<<<OOOxzzb…J & :u///NÇcvޭti}v .I]>05^UUн{waݺu o-W?~'wΝ;.GRRRM6 %%%BEE0|aJjBVVV6lH[.]$G ~Q=!==]24g…BttPXX( 9rDኴeΝ`6.Eծ^*slݺUL=8!Gee%|I@LL Ο?T+S3gbԨQM>Mrs'xprrBLL .K՜0prrB``Ui+W_}@AKДb[k׮7o߾ W-7oFtt4t:ҥZNNZlc&MR\tIԃ ;v }!!!HKKS*j݋PЄPbϔ.G8,DGG+]5 mڴC=\QÇڷoݻcƍJO(]M6رcL&lق޽{# @T ݽ6*T5gϞ /J iii0 X`&LKٌywUMgϢcǎ?~%ޕ+Wp #//_~%ΝFڶm8jΈɓyW,Ua"Ʉ4ڵ gϞ\#=====5%%%jj`̚k疟ѣGW_ѣ]*b_sttċ/`7->>}j"kk<<<; %%nGA[:пv8>Vnǎӱk.TVVo#<“Ԧ"{v%aРAN',^L&0gIpppt:&y޽{.]|]UU%kNHMM{3_~v ,Y"[-j;v8`c&O,xyy m۶ڶm+hB}]ޒz_+++l[EX;\A(---6/rվvYI8߂"իxζѣG 6}Z2L/ 6@ -[8[̄#ƌPC :vXg]Y;3BYYA@EEEu^쉵s3 ;v,9Y;Ç0(//\/ڹ^ǏѣGqtqqq5'۰,++ )))Xp]/vn;vĽދ˗l6#%%;wĘ1cd{OfCO?ŨQgk֫W/ݻGCCCO>Ħr{oc8A@xAPBa )0 9(u)Jh2DCiXjָ8]d*El (9?x9g`~mΝ{s!=Zƌ#ٳ_+ ٳhoo7xpڴiptt4 ;***ގRrO?~:nnn\AInaaathooG[[Խ"UIf>>>B[[~wbҤI^AK|(l̙E{{;~7;vl]x\Jɓ{.3 ƨpM'?ׯ_GLL)c._ +YOan13urL3S. 7\5L'3S)an13urLֻX3sss0.~fsSܔcf07嘙:̭wg=ohjj2{{{S/an13urL3S. l2?~?fsSܔcf07嘙:̭wgNNN5kۇ=55O['af07嘙:M9fsSz0%$$E݉' Am6 >;w.:bڵ30=fsSܔcf07嘙:ʹI^@)/UMI > [[[@}}=݋ 6  u'fsSܔcf07嘙:ʹX"""""2 DDDDDd2,@dXɰ!"""""aBDDDDD&L """""2 DD(33( +!"~_˗/# C(}`_Qz^{nIDD!"7lbt &^Ӛ5kP]]qFhZlڴ!" Dy{֭EXx1}Y 66ݨ1tPaF@ll,0x`Ҡ_yy9.\V +++xzzȑ#z}zn9o`Ĉ2d:;;x ,--憏>H~ϭb8y$DQ(Foi$I8p&NKKKL<yyyzYk4$''{#G`֭ظq#`gΜ<  kkkh"hZh4hZ̝;ϟy t,@5lقSNtĉXjك+W",, * { 999;wk׮}0{ltvv"33| fΜdggob8z(;Fc|nݺYfv·~lذ gϞ=^|E Ͱ}vڅǏcҤI_|7n.\CQF!,, ˗/'ꫯMرc0}t ??3gjjj:AAAtQRRCϏuIDDk222$A Srrr^xy.o_vMA2K)11QNHHAy{{K HrNHA3g񍍍|rU5kK#G48߷zH$m޼YEQkDQ.]$;Vo#4l0EntҴi$XA7nH嶔Iz444HFڴi^{GG(EDDc"""I">bnn$#?? ۞8q"1|ML0z3gW\Amm-ݍ.ׂ 믿VoZӧ1uTL2E_$IԘ7o{9y[ʕ+hlliiirCEXs:u ݈E_egg1c`Νػw/~'tww>7"Qz-[W2d޶,,, n2333:Aۈ#p͛7WƠA~-[ zkZ{KK[j;]f ~\|vʔ)(//7Zއyc IJep]$$$qqqr`Xh.] {{{Q֭Cvv6c(,,ݻrJ8;;@cHOOGUU>lwʕHIIAEE>sO>K,\]]1vXl۶ .^@X[[֭[aiixTUUa +A@qq1*++r&"z!"eƮFL:5ؿo>8tww# 7nV.v܉s!** 1c1~xΝ;$Z ---/^}"..k׮Ekk+&Lƪ^yahhh3rrr FٳqDEEE\\.? ?w\l޼nnn㏑?orppvލHgggر6lPt^DD )qh}6cÄDDwx7nիWB &&DDkxqAŋɁC/[dxL """""2 DDDDDd2,@dXɰ!"""""aBDDDDD&L HRhIENDB`pyzmq-16.0.2/examples/bench/msgs_sec.png000066400000000000000000001562641301503633700202050ustar00rootroot00000000000000PNG  IHDR XvpsBIT|d pHYsaa?i IDATxwT e"HQT+=*6(%KbIOC,MXbEƊ-1M "6a.+s'{|?jՂ xk׮E6mP\9-[w}ĉo]6ƌhÇ=z4TKKKԭ[sիW]3fQF-~|sS[okkkTP ?7wWzz:&NZjAPɓ'ѻwo8;;>|8ܹ|DEEAeZJmΝ;Ѻuke?Y]wu 0Df/Xxyyׯ._ZDz~e%rD?BDFc;v,/_.](׮]4`ڵǞ:u ]tGУG 4Gtt4ڴiM6 tnp1k}􁙙nݺرchڴ)`1bT!C駟*cį8qΝ۷ĉQOOOGpi4iC Ajj*f͚P_n:̙3hݺ5F!mۆꫯ_H>-Z۷o߾ڵ+?gΜs`nn6W^믿6oތ~}N8͛q!{^ׯaeeRJ8|0ڶm ;ѢE ԫW~!vZajݻѻwoۍ7"..;j׮ձxxx <<|g>\"GD:' $IjժB!* HNNVnoڴpww999bСB$w^ϟ5j;;;qIݻ'jԨ!*V(ӅB={VH${=񤦦*?7nX؈ !q~AH$fΜ>e!I2dJ[DŊ$I"00Pe[/]T=++K Yӧ5QyWZop!Iprr*ۂ$I⧟~Rio߾$I4o\| pppwlZ0zh6mڠF;#nݺ1už}^XJ .""ҦMԩSϰl2Ȳɑ#G} Ղ8L2-ʕ+L6M /7|$sAƍ߿?bccQF bŊP(B`޼yP}|㵷8=G8KBPk˽:;;[[[[@W!*TOc\xqy ˼;9"҂'Nᅬ"88Klق=zjNNN?>ϟ+WXd {ܹs?oHHBBB#>|6mŠ+K.'ODll,|}}i&%[/?1\r^TPYOJJy(M/&ϯ$= DdtѣG?pvvF^ 쟻ZF `mm-[hW\9Xt)c߿p%@=ԊgϪ}-W<<$IYQxqNQ,ioغu+LM ۫W/ԨQ .ٳgU~򾐼=zlRi/[R%_=z>HCEvv6Ǝ~-̞=[ 'Ob֬YsZ ǏW6o<\x:uRY~{+TXf>z;;;XΝS mm߾]힒 … AժUE=ŲYYY7QQp %www)bbbеkWthڴ)pm=z/^DRR,,,p-Z@&Md"##_|rnUVEvv6:#GiӦԩ77lׯ۷Ѻuk| 6UVضm2  11Ǐի;;;ܿgΜ'f͚Rus~Ɗ+Я_?kCժUݻwRJXdʘ@|Ǚ3gPN\| z쉍7… 1dxyywEŊٳhӦ :TrSϞ=ѫW/F8s ~WTP-zcN:ɓ޽;Zn 4jԨKLDT<U,*u=1n8Q~}aee%ͅ+VYYYB -[D888N:M6̹pBѳgOQjUaff&,--EEXX{-_ B5kƍO<ߐ*>* [3g?$IژL`%섩pvv>>>b̙*P)ܼɓ' YŁԶ;x{{\'NIjժÇ;wh_wyG #<(=;v///aee%텟8ׯ$I-\tj4h˗/#33*X"qye۟T%F^www۷=z􀛛}X`˪Amx\\\p}( XZZW)))Ιw/KNNΝ;q?DDDD_Ϟ=ѵkW8::;d7| .D pq#55Wwjv܉; """"zիWcРA m_@P`ǎ033hNNN8p 26>%%$PBddd ==j}7o{ 3w&^й7ѣGc޼y>_1קOlܸH.̛sfLy\*_|:v̫رd޴05gs皮sMW\߹@TxF{;Ν; #GU111x䉲͛7޽{+ۺu DEEIlGBB?lիѲeKTX8_1WN"ﻰJ2o]qƚ3msMW\㹦y\$Զmۄ,ˢ}"66V>^ş#׳gO}`p30oEǜiy+:L;[3+ ݻwǶm 2}t=8X}VN߿fff۷/QvmV^u9*rpWXQDpp0(̙vcδSVVn2,Mdv30oEǜiy#"c/Wcδüs捈 """"" DDDDD3,@HgXQw9Vt̙v7"2V,@(9R!L;[1gaʞHȲݻwށ,X|9`e,۷oT7''Ge{n{a+˲ /_Zw},eprrBNO?廿5 ׇ *W=z`Ŋ,TTz= dt s30oeOpp0~lٲ-U/^;v@Ϟ=W\о}{W\";;[%I*RGF!?7>Þ={/仏p@NN}!>>gV?eDDD@///t HJJŋĉELP/x}BDDS½{/^T_xQXYY q=e{\\$IBP͙#W.5j$݅,";;[$IBBXZ5!˲qJ7$I߿_m\~8|011&&&*sN:UH$U&?1_UtرPqJaAKJ ''',[ iii}^z577}!n:}`p30oEǜiy+͛L>3f@w8SNpwwGddE>}`ggW1|5ƍػw/$Iw9rΝ;YW:~7@ǎ-^*=x:A1gaފ9V8ii@B XY6664i I-ZkkW BXXN>ƍDffMKyBܾ}7nӧO1a4i$q7ʕ+v777@RRJ*7,@`%$M~@>߱ųg0k,/oذPOF @DD"##`,_jBv=FMWd+L8q^,Ӳ1pbJ' DDDd<=_oI ŋ1zh߿/!+]vڵkѻwo_9sf'I]777dff">>FBXX0dȐ|*U:,@`YY}صk.\7|f¥KЬY3?Cッ}v :fff:thknnVZa۶mW;U9۶m8ݻAAA- B``C08̙vcδüM))) Bիaff7x_}r={oF⢃7O>ɓ_k@aƍpB}&t\t̙vcδüM}ܹS~1cƠm۶ذa֯___i&̝;CV1rH ** /_zjժa;#|رݺuz?\EFw9Vt̙vDtt4ڷo1cƨl$ V›o#F}TRs5k,mPreiiY7n>3aڵ*ǏGVV"""мysxyyiӦݻwqA\|9ߗRHn޼O>vvv4 E IDATqwwܹs1l0`۶mEޏ$I$P$}K!$Iw}KK^\ř3g~gL0AJNQM4 âE(51x`'`BDDDgnnnx+#88Xwoooz?׮]^sR(^U}^T:2 w9Vt̙v7"2V,@(̞=[!L;[1gaވX!':L;[1gaވX!`ee s30oDdxz)4௿&&W~m>ˬ3t`R 5fgiv6nge)?|. SIʷH)p) ^8>""""\,@Jqժ+ !,'G()sZv6Nfss?k~͐*IR#y MrU'y! ?s)d `δżs捈 &IrhfV !0ysAORfjg{a W]PDB+=[r Vt̙v7"2V,@(_$&&@ 8BMiٸ ѻ7)qRh)6RX5J!$30oDdXH I,B 8+p^򒳂?] rr^p^G1e* d$I$\a_B>5/otN\Ñ"5V G33YX)(oj"J 2 ,čD`cb?w6EMYY'#CeۿWyժ}٘ W*0as͐1gaވX!-[;bwC1/S9Kq#=7q##CȣG)=M$ U TS(WN+n.B41s$1gaވX!`}`p,Xhni8+ 7s y#=ӱ?532.G33+'y'33_s30oDdl2 |\e&gx+TGQ:֫CVVHoZāF'FWHΔLv }ϟGx; Ǐٳv"~?$%aj*={煼_xse-;;˖-C333١~2dbcc5'@tt4zWWW( 8::m۶;w.={qɓ,^Xc(ȲI&;6%jժ?7n8_@@8曘8q">|qӧO1oiٸ2bأGt k֬0̘1;wƩS⢶5k˘9s&NZ̙3ѷo_lڴ ӧOʕ+5￯3кuk9sǗ_~ pv튴4̚5 cƌQzt-|xqb%Xdf͚ Niə XYB*UWXU.7j-["];ݪ7nubZ!T|uZ:GA34lPB( {C ٳg-ZoM\>Osh 剪ѣG[oN:P;v Сkxxx?>.\ׯ0aL_1޽{ Ŷmې bԩСC(BQ5,-5n>}(ӑ;Qիκ < :߯'̬tz ׯǧ~5k`͚5x{{#88oJJ ?IP5KgSjU$%%Ν;K9s[nϱk׮BSٳgϞEÆ 1y|mڴ W^F-[Y&FHJJTRPPaHXX&@Ϟ=aii͛xYooo4i6lgm̙3pttT6m0~xtǏǗ_~K,Q@ǎ#|pvvƂ Э[7ٳڵMPdI9\V>}ge!E|P/$"$$'Ҧ:ohRNٷo_!..ӧq1 &&AAAX|ڸ,u*L.]K.صkvءR8pJ[ƍq|7o͛X~P7N Xd^:Wv$''cҤI`ii[*Chڴ)jժ3gܿSN||Dvs|=z4֭ qeBooo4l8zNrP$''jYؚbAZk\(OO}d0xiy+OGOꃩ):w\V%͛+Vdee͛QFWW͙3{Ahh(vx'O| ֭[0w\̛7}o,Mrc}I$22,#88[nE@@JlڴIY@``ʜ8q"bccȦM,>OVA 50ocef+$I1i$۷055E˖-oaϞ= }6,,,ЬYנA :+WĊ+`VUVŷ~$_'|R/kӦ Oz1 e1>Dtt4:v|+W7|S ped?SٞWŊ+|җGAHI3L;ӧC%M~i9NӦMND \Fy[{Q)0r%''+E{A˗/o߾HMMU"uUYJJ0f$%%a޼yE>*U`ذaHII7|Sy}(Wf̘o~1tMGy}Q)3Hdd$P^{5ZjX~v킯#F 22RԩSErrJ{xxˮn޼ ___$$>UdUC[Nmߟ0_n30<:ҥ *Ww}8q"5k… C>}cdYƍѡCl߾իWǐ!C0qD|ǨUN ڵ Ex_hh(\\\peg„ ܹs_عs'*UcǢzGj֬-[|ȑ#>֭[.FaZKeٳg$I>SiOHH$ŋ;veYddd!7n$I<{L4h*UWuV!Iؽ{8_#"{#8!8!2$IBP߼yS|7gϞ]( ann.\]];#֬YS?ٳT077*Tmڴ~6B1yd!˲Ը}ɒ%B$!˲4iƱ4f!˲;v- @Ȳ,VZUɓ'bܹG8;; 333Q|yѬY31aqWQs+'|"$IϟWiژ]:u(vZ!I8vJ;wIČ3m]tuUsƌB$qqaδ7o Ō7QsM;e-o۷$Ijժa;F+##WF-P^=mٳ'bbbe͛7޽{+ۺu ;sEEE)@ 8~-++ WF˖-Qbb>J:uꔾC08̙v歉-W &_Ĵ4=FU\V6e˖zcxccc ^͛G7nE3f=KL4 ܹ3N8 6 y  _~9s&h"$&&bϞ=%~e… aδr&cbإKװ!B̫Ṧl ױ~zaر+V иN:ؿ?зo_v8x *Tw„ 7oѵkW,\ǏW!ann{F/޽;vm۶%vD{V&&XZbER!"W_ah۶-vڅzK!sWiҤ v]FQFr-"2NX"^wPIwHDd`rrr""]F ($ |$%QYS3733̯U WQn<״übBFaȑ0g)(oгBHLDjV*xiy#"cB.]aδSP$I¢ڵ$;^aT50oDdX* fV%o`j!""*uX\]U]t>نH 2 0g)LdI²:up==SoAT50oDdXQXnC08̙v z֘XfݼsOpT50oDdXQ駟aδSssC++\l!J0ҍv7"2V,@J,cY81!""*XVvvU2&^J)YU :u02:M@lr2>t ;4$IJ!IAbb"bbbo>czx @}`p3h7[SS_ؙw@T50oeSXX0ydY{,oqM}GT,XQ9ӎy{B tv˗q/3*xiy#B 'O"!!,CiРq_x߿,#""u{{{Ȳ,j1c 4hְCvJ 2 C08̙v^'ojք$Ib㹦^&'|||~$&&9|0Ο????l;x :w sss9AAA077Gff&t邉'F!Cŋx0n8HG1F A..^C"Rȑ#8wdYFÇG\\.]9s_d ?T+..k׮ŀTڧOCѐ C˖-1{l[.C2 vqwѥK8߼9lMaג$$~UN)@DD+W=z4~~~puuŪU0}t`Ŋ0118;;cԩhҤاBpp0cbbaÆ!""7nTVBFF>sjJǸz*ԩʕ+mԩӯ}lDXQ={6Psț%zx`̕+x-˕+J'ka E13I]Æ ôiӰtRetR(|fggC1 2 ׯw9Nq*U=\SM\6g\VHVV%r%suu/bbbp%$%%!!!*ܢ$%%EvM ')Vż,`δS\y3$,S0ȟsM;QEՑGm}M`G%4Ahժz.<}ptuATTbbbPn]k׮#''~)rrrɘ4i$IBPPPqNe ""=w t 9B;"20}RRR UƎ6m && 6Dhh(F7xW^Ehh(9r*XQaδSye,S>Ē۷u҂v7*CB$XZZbСZaffݻwcڴi D:uvZ̘18C&MdrNǜi$־|y|Pz=QE(}50oeKޥOEqi!Я_?/_^cooWίP(0~x?^8W@(5J!L;%Y5j/]0X<״üQa|7#G9"R)֪#Y!Q)u9l޼NѫW/4kLa "R 121`f:}4aee޽{cHBBBC08̙vJ:okBʕݏ.\F:t(rrrDGG{QiBhhC08̙vJ:o TIIAKWxiy#"c‚ aδWK]+i<״übBF,:L;ț,IXZ6@%sM;+ DDPm++Wo?w8DDDņQ)5jUaeed!"Yf;ÜiGy3e,Sg<[tsM;+ daδ5/WTAN]\xiy#"cBDDC08̙v)hn/^0X<״übBDTY`Iؗ$}CDDZX.xc\ARf!"" 2 0gg޾YO6xiy+[dYVcjj '''t ?S]ПK&Ol wYٯm۶!^z h۶-ΝgziT lٲEaL;[33|W&^AutKEsM;[#I999HLLDLL ۇx̞=[y!IR޽ŋC$4h@m)~g|wV۾l2"++K~RSSbϞ=(_|8`ҥj,Y w͘0aeW$C$ ;v۷EYYY8v$IBΝ ^zX"222pMd"##0 sҒ7I:uILq)_%gy+l$VV21)9999r bbbGXjO333aԬY;vw?Νannhx?Yf˗/- {III!IV}TZIIIH 2 N"bδS5ƻa͛NSp4>^oMlm}J{{{xyy!88T111a7bUV!##|A߻w={DZZ,Yv: 8hݺ5mۆ@XYYrlAOzOvvvc"dȡC0}t=zO< |ٳ&M¹s`ee=z`prrR>}:V\$xxx`Ĉ9r~^c"..YYYhժf͚ƍ1E .w9Niۄj"En&!k-gy+O++7m7I {ذa6m.],@.] B@c222[nO?-˗/Gbb" fޝpOvYȞД ؕZcJIRKPVZQ#Ok-[5k$Dǩ9d朓fΜϼ33s߷5rrrUp7nTXѠud4h틟θx"8p:uB.]0sL$%%ah۶-N8#GĪU0sL4l۷oG}4|ѢE ᧟~-ѪU+?~@DKɓX]%)ҲTD;hZ_qqqubccѿP=z:tO[[[ 08}4j׮F=w}+++4iݻ_͛7aooU.E7nbԨQ/\aÆv"77W?$E闝9sFXXXٳg|hh7uXX e<o߾ϭJ\#/\kjB$KY7yȞ={$IO?$I8puÅ$If͚Ey@"##N:u޼yW\)t+W IDE ~?=/Rm(lGZt)1~ĉx`a(6m lذAlƍB l߾]lÆ hӦMWNNNѣ6oތ_NϜ9OOOxzzXΞ= ˗ѳgBjժرji&0)LcͭVg߷oKWff@ϯ^?p{ovu:vX,,,n: [nyΝ;رc^ի~+̓6@o?jԨ|Z-RRR-:u*̙oYBBZ-bcc-?>-KOOVÇ-*t}q9r,CGPPYFF}ƏGݻߏ W2#((,0QV~ɓBwpvv.~V{+77˖-vԩ-ңV+WF^^#5UR;I.E;vkkk"Ν;j-섮R^=(ג$vt3>rOԩSHJJ*ʕCj՞M{{{=," Fgg|GЋ:u _~%w[k׮hРekbر8vB̶m-ߺu+qAFjժ|#S=zqqqѣ~Yn IVXo{˗/}Yݻw޽{@ZZ֯_Vo-*>^@/1$I”+W.$23F̍ɓ:u*vڅ=zয়~Rbj2eJ}X}PN|Ҿk:t(NoIdL<9s};~ömйsgdeeaڴiJGQ&DEE]afJnU˕Ìʕ1 @ZL%3cy<<|k׮-QSҔ!&L |}}\4iʷޮ]DӦMEr儛-M&^}Uakk+jԨ!,XP/]$w.*T Dɓ'_X/)$'8~\=vLd=5*1⿁6Q vvvGxx k׮ڵkYYYaԩ:uK\+QI$, @̽~%#XDDwr§*akKOW"""6@ʕQqq{fd@"""BfɷŘ<%{LLT|1`nDd!v &cqqoo]+oSLm̍̕YwBj`r<7Ub˝;uOMZL935Ο?v TFS DDe5W>a}r2zxx]`*WBeݓsJ DDeH/ha|<ڸي q^:␖v)T999zja/Ç7T 1$IB?^;v /]OSL-e1^Js#:ZL3rdk9~~X!350713y)M"::W{{{0)Ls-OO© agQz2̔ anZ43\r$,ǵL|yj\2Ss33!"*j:8`ÇjCDDe DDex__ 'rHlY S1l,,4 iiƍRهef8f&s#BfWL3skR~5n]HvIDDFk; DD'++wbURc'^YYjCDDf 2 j`r<ۼj I>xĶif8f&s#Bfaܸqj`r<枛 _ժX}66= an46@,,X@L3,6 A..bo,dVᘙ<̍!h8f&OYM$lLr+ f8f&s#BDDT)W3TW""2#lQTNNxd] 6@,̙3GL3,f)IX NHYIbncf07R d.03yZn;:b+`k8葬mJ s33$!P҉A`` Q~}!"dnmCo IR$""zx^ǃM!""[h0e\T""2alQ :8}p|q}{x %%EL3,6jU8ZZbd|< yz,gVpLFJR*İaom ^>ce3C MJt掙SssBDq,$'g>W3+f8f&s#b6lWtw9 +zm;ƒl4`'0{688=Q2ޑvڴij`zn=</LYL.f8f&s#q,#b*|CxXטzPhZVˏ協4d dDxqpwDz5.HQFfxNtOYYʩ]"Q(˒ _''.ن,-K??7~~û^^hvIDDdB!ceT{,3O/{HL,|ܗֶd2'UE#yj4nlQBf!22C-=wRB(5Oݹ\^zT)qRclJz*zxx1GDDcGooݫE7n|kV` klgTVxefT))p{^gDD#Vv &G,,tdm \dd$[f-]'_~ Z5P L~uZ,-?ݻn3HiBf?TzfVVժ:/3|9#]qqC͛[OT͈b>t o3HiˈpT"xP7R׳t))[O)q‘JUjv6j;VV-!"*q^+>!"ԫ{=݂MN|H]O7N*W=sfmUû㷔tuwW$""22lpq5ҽ&pvɡCĪ$C+ggTDDkJfaƍj`rTfM7!Cp`:) @κmƌڷ^}pp4Wh$IXu f8f&s#md(uر|޽M6<<<Qre١f͚X`A|2z899!(('O,c% **JL3+׍Եy3EH]s#j56zvvRn+.$gpLFJ3NG6m֭[{VZppp8pڵC.]0j($%%apqq'`(.0l0Z 3gDÆ }v̙33g_/99Ճf̘[[[㯿_hDdn7=u0vEF-W4$N#"33sWF>0ԨQk׮?wWR͛7Dze0|pٳgl2̚5 ~)e˖HMM̙31|pѣxWo&V)Swi2oo`N]#{''2Zqu,WNO􄧧g˗' WشI7֠A@n-k x{ceR2 ...?~o`ZhYٝDO{99X[|53̶#<<;vDFСCxW0~x{~Z6߫iӦ۹s'ZmϏ5 @"院L:sɷ,!!Zϟ?aaaCGEE o߾r<0Pd(_[7`b`B:%=;u۷I#((,PdRd#**J-VJԫWcǎ-2(cƎ+$IbB$m۶KTXQu~g>|($I&MB.,,, `!I/h@DGG=<"RCxjWb~IJطO}PRkgw@^F$Ԯ]pԩ>}Z>뿑Я[\9TV۴_く?>W1Jam%jBDD*+S bÆ x7`cc+QFXjU=8C[n$ +Vȷ˗;v/޽;݋7n藥ajF""3 Iܹ׮]jWdtl,,n!#if{%3f`֭8|0V\f͚!11|~9s 66{ݻzjukCbԩoqL4 K,ɓ_>yoضm:w,L6Mʌgcf<77 2h8~\Œؓhp7'H3j8f&s#ũ Xi5kxׅgϞĉݵkhڴ(Wpss"99zbڴiW_FbҥK{B Ao^yRQK3D IDATQ13yaxV|;X 00Ѩ_嘔t۫]Iaf);w-4R%e3ROg۷9o5xF anZ#XTᘙ@}%S{^f98 81QLF an46@,7NL3DrVZvN,6؋2 h]\P"Q13y) 2 ,P)llu뀚5KfFE.H53Ҍr \pAuvv6O>} 22RTpA13yJ47GG` "E //,vFχ?cf07RQ4@ ŋ뿞:u*O{bذalrwv͒ޱ#p)nnggcSjڥ_e˖!"##' %%GƢET;v7nZ-PCvp@屄3 Fs<==ϟGrr2BBB]vŹs,L9s.03yJ-^= ~TPB}|]\)cϨᘙ<̍f ܾ}B U jG& ==]L3TskD1}6`0@ۗYoT`g|3j8f&s#IB/\v퐞ɓ'#,, 5kڵk˗/nj3pe,}111 Dtt4ׯv9Dիq2X&M`ma#"*kg+={;wƍ70yd{7oFÆ UHaᆱ믁sծF14NDd֬.7nk׮ܹsY&\\\ 2իWW:""$'aa0xh\<'&Q)QG}ÇvvvFf5>wށ呉IIIQQ43P`P`f[ ,T;wp53+2 53Tk:t-[F={ PLC QQ47I.v)1$pD$;gTf&s#K#&&AAAСCuVdggUiӦ]af(%M;N)`HfD?F3j8f&s#(Xp [֭GGGt={DN`ggv*s=xjܺ9TvEχƉX6]Q>^+> *U>č7[no߾pwwG޽.H=qp$+*5 93:Y2Ӽ1b޽8Is'Щ ;wDDfǨ Bȑ#x! CŖ-[TYdd%f&U\tyzn*L bf07R4@V\J*^C˖-0`.\rudbbb.03y":ut?ssծdhie3Qk&HiFYf ѬY3,]OW֬Ybud """.03y&7~7`H/f~N Wd\3!LFJ3Hxx85k`РAޫSNఓDDK 2X2EjJ\}''wtdgt""3c ϣo߾fђX9{)q>>ؒ] h?q-)\ 3`8࣏t(gielEy戈@as"hժEIjj`r9.\/^:~2s$&&F.LQFnsC^SbB~t4^ڵ.8^<Ѻuk?/^DRRvYDD\9!ot9]UHP/^ߏ%Q1#XOV7o\κ~ ֺ!))jWTlzyVS\EdŊXreUVMO/qFK09LMv yPRJ" VVOg(('N-߿?&N FdBBB.03yL> cQ5keIgJf&ygt?Tan4h̛7hժPV-xzzM6pttļy: ~'*WKI#cIw.K:ˣ'&vɟk*`f07RQ=Š+p Zaoovy DlkW!`>A+27nKpiSxب]!^+> 0rH9RR̟vNÇ*^^2ߺjCDD0GH-'5z{x3: 2ȁгgOԪU UT_\K. v ...ٳ'\Rvϟ5j~~~1cF߾}5k{8pV1\]:G;%,22޽Rپ\S3ҌuVm8ruXYY᭷*o|g[hժrrrf,[ qqqhѢRRRW_aرիv܉#Gb֬Y5jT?~mb߾}i&xyycǎF_]afen*;wII@@zzn2k^ֲѣGj`rJ 3kgw@OtUw"//)S`ƌj*:t ={Q[Ɔ ˶oߎǏ+;$$BlܸQlÆ Q7n_fiiرcH4ӿ֩V0xc`4ϑQ +v TNJ8bjߏ08@Ǔ}qÆ C52ݻ7fϞ ,\ؽ{ck _m t9TvUj4h_8t>Z:;]=Qٸq#_ ĦM/D#IR~X[[W^ ?<77|N8ڵkѡCDDD?GDDDlllgnGVERRmۆ-ZȱP~vLcfܜ-[Cʬ3+f2R=׊Hqj?&vvvСCB]tIĎ;B[NYbL|]tQ)]*DŊBw#Jfkj&3kgw@򐓓8zhǤp=5#`K09L2۫;v @׮@FK?df'~6&3ҌRjUlٲ/7ހ 99...jG&WL3UK8@~?@z%3Aw3ᘙ<̍f ~-<<>8?83ݤP#`ooU&ᘙ<̍& #襧j~0w\7nףk׮j"::W"B䧟ΝծΞŅtjРPDDrzHLL ڶmzx|KIh{]B5yGȨE$)) *U\|ׯ_ǘ1c`_*WHDD/ee^ 4it>j9\\PlfF'"2FO6[ZʕCff HIIQan/agP СpjYHi4m{\%ƈᘙ<̍f ub…8w"""ТE ׯ_2d%f&s+m{{ (C Pood `gtkcf07RQ4@ l߾kƉ'oǎC/5m4K09LVD^^Ν@ZedVZ77ᘙ<̍fv gϞʼn'{͛7GݺuULcf07s@\J>>tQ 513y)(?}| E&MTo_,VB{DDFh Я_?o~ /^T:""`r gb)I퍨$<0DD( )))hРFGbϞ=HKKl۶ *WH.22RL3.MR! 2mj0513y)( ƍCzz:N:UnG˖-q!*#Sv & sЧ1\Ujhknnф:\33$a===1o<Ɂ N8СCh߾} $&&f0"2/ǎ7;R–Tt>}Izȃ[{99902HFt Sdk7oVIr8zh8qXDDdƌvΝSe5}iNDh 8_~%vءN>sS:""*z 02DD( ƍCfЩS'TT jQ^=4h}jj`rCK09Lf|FRqPiigwcspLFJ3QH*Q \XY)!PQtqs"ODkŧon:_B@$ݻWԍX{wwo |S*yHQ5@8'''/S 7uFWC5|y~}C5Uj "*T燴4/_1=3g:}Zeظq%f&s3\{ )_;;ttu59AxHi5@w^aСCqJ"v & Whfݺ*:$oχUExHiF ²e`R<ŰS)gӧׯ>;/=`gt"*"^Q [B >ǎ_6m`ĉ1bڥQi6L%KTٽh4XG@DTEcŊXn @劈Ը ٪0i3)FHZZ/^&MVZ0f\v jGDDih `Uv_\9DDB1h x{{?F@@ۇxL8+VT42A!!!j`r

    h 0P ^66Xի+f8f&s#6 EoH2A*Q T ntL| oͦMaϙщxV|YlZ&""cbi |!0i׀ ï |XL3HYY_T!i48r>>zspLFJc_v & WFn֮-݂;|شiбcG_""*QwK݇`{ggt"a I&ĉسg>C|6meo Vմi|yΝ;j |~ԨQ̷,&&Z@SbΜ9%$$@6CGEEx\ q8xХ N9g+~=be~8xe8bUTAz0v!Eq|򉰴7oB$hѢ}gB<~X!Ą $I"##b5۷o~w!IصkWS|gVan+boKⱂy 3; D^^PjU+WN*ӧQzuOdz޺u ]~Y:uM֥G# an+ZjVmHa 񛂝yHierAXZZZjB.]~z<|PNBBۇ=zuvvvX|y-_$[neݻwGll,;_UVI&.,Ov & W"InbML[j98y ΌspLFJR2j(8;;I&puuEjj*֯_+Wbܸqpsskذ!:w & ##SL'>S\\\0yd|puuEqqL>Æ C52ݻ7fϞ ,\ؽ{Yえ`\w`pl,.edj!Qј@߿[o`$$$`ժUT'ƀ߿իBBB)OL8ڵkѡCDDD?GDDDlllgnGVERRmۆ-Z(rDDf6 XxꮵRz{xDD$$jA:111 Dtt4ׯv9&%%%jaR'kcf07R d}ncan+ƌbcqtDspLFJco W♵hԫ|]nB5sW22Ju?< an46@ȴ<w >^􄓥%"oR|DD ""2=!y`i^^X}%h39NfNR QQ[8㘙<̍<8f&ss*\{-曊Έx 's_s3x,7»*9 L $'+ (n"r-πg &FΈ±j|_\""L0s&v-w@5 y;v]03yTx}EW{n0ȭ kcf07R?.AsvDD yW^yE4継u-yý11<]ncf07RfY4֙͞  ;.1ݭssSLFJ b޽0`y>Qぞ=WD{`h` >SQx< w}SK.Fpp0n&ܹѼɸa2I&ѣM.wҥ/t|yyy{ :?ӷZ@ 6يV'[T"F'Vc;w.{|Ǩñm6|6l, >s|5\?9s`غu+z!,Y3gl0_MM nl۶ o6n܈=۷oWd  VPtDF*X'y C9sѴj}7hv뭷m۶ӲEoooqi4X%KDN'8p>m ݻ,/45ݻW ݻױ%1??_4̞xBEBNNKm6[4af07x< H۶mMA=po- 6mڄI&h4狉ñ~zo555>}zeN>(bÆ iׯG\\ d1m4ٳOvv{Uaf07i"3R`EW;#* 镕UV=Mf<̍ HSJKK#>>}i4oBB>Z@ZZ}z} Czz}ZZZE =%h39Nur [ޒ`t{MbJf&s#d̙… F󆄄@E񁟟_y.zyx 13y4@z: C F,/ FLnnHiy駱vZ߿\Ҙ1c5dȐy֭[3gʕ+LKNNFbbb>l?~h0}ҥ;wnifرcGIII.W)Sp;n{nu}HgA܎QQŵcσpHJJuÜ9s- %j' NDk-g@/^l-X{]vRRR}.55ݻw7t\8onn. ѻwo.@y9. ]39NSq,_jgDG#ۧi*77an4n@^x,^-O?}q/@EE}DZm6L8>mŪU,cժUǏO0a222g4łիWctV %03y40cr%PWx/'>MS f&s# > sѣ#F~O?xwb(++/pX7okɚ1p@ݻˆrй3yl,FС2[/9k- ñ}Fj999Ǯ]`0p 7W_EΝ}vҥX|9;(L> .^o0_^^͛M6l6xp_fDDnۀ@955ٽKuÃ)N"rZc-MD&v``hV;>- Ǫo@zxr=HCUʌ(쯨y6 8]39N `-[,V;*$||MeLFJcBa֬Yj9L8fv@۶ҥR/?* Iyy[쾦2FJcBaȑj9L8fU@ib[T|EF S4؀]̃*||M>HĴ4lH l@.&*J%ҥg# ޽Si)<+oXID y 6]03y4#Ҁt> @VVϵ4؀GHJJRaf07i>Aכo*ڤ${Oc#lB.CJ)"t#| ZN =Kտy$>tODxxr&Oƃ(xKiv'99@D,l@.[#Ūlǎݮ§yy@D l@c bVTe n0-"w< U؀G>}%h39c2N-k*7 `euhk cn46 W13y<*r.N1807b_yk Hi ]4/mT-b#;G<^k9!""r#?Z`>}Du5"""Goڕ [ ZK"",6 vءv an̼35kw}bM(S`}M!̍ t39c3~o#G"N&'?n k]g}Řcxr_&XqL8llI`z,^NnW޽MQ̄c5cn46 DDDr ={b͙3xaB"r7l@=عػWJm[eNc.6 Νv an]r-:t糳ɓNʽyB̍㖴4ZVvIDԊ!v ankܼu:7>FIIAZeSZž̍<¼y.AscCC1)=?ZOsV17R 㘙 Cqsj*]%CfZHil@\GTծD_zFJIA&AD-ȕ㥳 e2O{ya8Q]vIDal@#\R43v7G6c[ `DJ kk.wL+) ydKf&ss30m,_쏸cn||]߾(X0:%%5is#ylRQQyaȑNŋ7997x#L&1i$o.]8K.xai༼hG5n~~ڧTW#15UV%ٹkf<)((ᅬ:L0t_ edd`ذaX,"++ \sM9s0ydlݺ=,Y3g67pmۆz 7nDDDF۷n}=P^^v%-h^^@ͦvID! pN:PXX>yaӦM0{xW/ۗ/bƌx^{-h"̙3={ ]K]vaРAaÆo߾7ovm'""7Ա#0a4ƆhР |ѻ7ƥ➌ |ҳ't&"RǞOȽ- 6mڄI&ٛñ~zo555>}zeL>(bÆ iׯG\\^iӦaϞ=8}6dlAծ)F`MϞ4/DDbOTWWO>KHHÇQ{.iiiEFF",, iiii]&s$&&]03yY=\egJnmw{S̱c֢ s#轐2.5788ؾ,(**2믗g֬Yj9L8fVϹ[n :tYQQg.x1;8ZZ̝07RZn@՘1c5dȐy֭[̙3;99?xWL;~8`ҥK1wf3cǎӓ]SLqvܹ#C?#Gzvyyv(1rHImj;FqܘHJJ,m1תq/@EC?~۶mĉF ___Z2VZA0~x & ##{OX,Xz5HmT+uSXE;G%ZΣ/"""rk DDHcA<̍ȝ<(p й3+@y) X۫ ¸T$&j؀ 38xiS'R+s9_))4.\ ys]03y㘙 :atI֋/;<]K |ݧz{c8^]r_؀GQaf0713ybbbKG_R#`v.六}@/?jk9k07RFxW""|׀ex`\ *J\Ϫ*e>Dy{c[~2.kN3 DDDZ-5~( V5 8qBꜮ郣*U품 ؀iMHxԈ,Z$%]tI0uBPg]j9L8f&Or cǤ6ݻӧF †޽Ȁ"Ws_؀G7o%h39Pn&0otWo;p] z᳼<<|}MFJ t7Z5"PW@eџf 9?I|\ ?8""4뫫 8:wcqqeGV]-yeI`$LI߾-R+OXر#^ܹ{ani-kIPg4}| @zuHl&mCsXNw9`ALlΝ_RGF <pK^q U}QQ(Xğ"`:&s#јZ4v9Xcp9K5SD|C"9g|gsj^--Vecҕ.))ƍҝQP)ߨDFA!y{KM=kpR#2tx(7!zm<7KM5K{(u<RjJ꿎֯ ӸAߤDDi 1!. Xj륧_w:ΝQlo20)<\품؀no!$; Ft6mD A&`BшP F?BF56 ϟvi* >^z5\[ѣ ~{Aimנp_%&u/ \stF5 eݻb`SB ;JJcj[P^S*K :*n8Nmlb15{MMkju^f%h3GNn&лjJiԠ;srAٽ[zCiy.~S' ,t:i`ĉMR#2r$0hԈ~;RtPj`Bz: aE5P;QRFfEEmk˥5<_ĥnb.l^zo%#X%%/}yy>{rMWDغUjDv u-lblj*~,)AƅbBx8nh~r9xuz!7)E:s UxF.No% ͥGPԊi#_Ԡ\xyѣO?VIcT1>srU`(,ȏ??/[jD&Ovk{zo_V^XssctHƇalH.@"ZkoX.oo4ҧ/mN8͉GR{O\P)A:Ag^Y=:A?/?{73)~ HMCpK "PTɱc'}E`/jD`pctFۥ>0u4 1(0/w  w< `X6[G풉Z-^FxJOr^zVQ[*6x2nK͚3]g5]uה"DQMٿq{-f}>R3rFg| E˻νtKaaaN5qM]F䮻{Nev6#?"d„0 COV}fm5Z˱q#ܡKLLƍ.CSZsfr͊;n>^*Kuf3gsiyyrPSsMڵҾƻ:'ݾ/[-XL.[M4'_bCAjEw???ی  5#n97Zdf f&+s6TU5aq96e???)E~(-Gq? NÚW T܌8@8bhZ@>yMEͬfX_PȫC7n 0\  o)Z MDJڬ.mpufTU5AhOE,jrbQ{:(Ev+ڛNؔhBf&d ft'x@ӁYEʰ gULz=n p 7BZ˱q#ܡȓY`3tidd"0 YEY,DF~UF–(oe,bCzα֯35q7?^>H$cYMQ^orx "ʍ.=#ex؀DԚVJMdL sjq6=PXDb#,Wu r@9sٳAw_mF~*-U180>n%LAk-čpoʕ.CS6"!bCc% | ڸtFZvoO> _oJt٪, wEFHV|_\ xit8qfdX6Hxč&"lv+w{,dg!ג 1$ |tXEP1 EvN{ SP 5$KҝڵS2"vǍFg>:$&b<^k96 n;4޽"v?=fPQLxEf'D7{hw{CR9 8o%5#f3pҳD:vT2E);j Ƴv$Z MDZ $'K,{(*}3%3> ,sM6&]CiPY|9k@I pZݺ]—g~sI DCػ3Txrl@wh""l#盒s dm,DgƘ3LוةM&AxIץTV# XXS2ʯŦbTl`oF46Z˱q#ܡKLLƍ.CSnᛓz+h𢴖YՊEEX_PM(XaBX صxr Ea֬Yj9L8-f&@Lss}$'GHOc{-60=RL.8Qv¾P&n!%]dn;c/ L 5"i-1!<u6~7}٩S6psh(ƇaTHzԡHxč&"ҞgJ΢H*{bHe˜Y%]1A1K#6J$2E!Hc$<`)cO? veEf$:F~sh(9]5<^k96 n;4g(/oؔ8X%5%h[SCES.?mppR͉1c : _`~ @F0bԈ\sڕ)pUaBx8n EgbWZ MD乪ԆMIjt`Az8ߔ DE@`*EtU'@@1Qƨi9;m@[u)V^|Q gke⫳ؿ+*B(h7 ]k-čpoÆ ?~eh 39]\mtf\Coc2 L& _Zp :S>|r`6䠸4NW4nZTMk :D#lNԽlСQHk-|ST \TR|}AA04imZ UTT`ѢEQTT8,XSLir~ 2vR Maf0713Xm'L^څBaⅅ _5^EP+| ꟃ9(sPXsJ^eD?D0 2FǢRJ_k/-R#{7pR#2n jZ ?`}A,(@Nm-B0.40"8~οZiP7qD^;IDATxW5k`ԩl:uypKf&ss3s^ttq.?(M7&N;z(,ԣ0 Q6XNKJ:#',XmQeANUށU4X޻Ψm#Q`ছOjDn55Qz}|<.o#BB0"$˺w'ͅNQ!!A!^^() |d?qu!;;sŔ)SSDDADd:ujjj7* /Gaj{bi@_\ˁ_x@}&T؊|{)@AnQzt׬,\N׼Fr]8_@P^ :AU*0KtAٌ/bkF>}Lh(, ނ#6 .~zL&z O>wq~W 2Dњ\zEW׮ֺ.9.5}-:nVf1g_$o ҳ0TӀ)0堮MO8$sPut'l*Q+mN_?".0: ipe@HLƉX@JUQzbLkz y{;$cᗞ_j.s}_~Sb~L rjj }T WO 4g&n܈'N2|-]!Z6 .={6:ˑpɮl@;GNv<}q䑻nxILLƍ-}QX&&~W"bNmD~i% kNRjSϾ۷؃Bx6, B_`=zAztQ: B(gà3e[qxPgVZWgZS[:>5ȇW)xWjj`0W!\+W^@4`ma!n{uL& fJL={'hx ;b(Y??5jD5EQfk(g_Pv-V;U)A~p7LMͳz5N}=9),,DnM j]RSII ]log.7ߞ={\mM9OLq_Sj.lA‡5~;JJ(-K=7P U"E5(G#d  D V@gٯ{&933t,}ڵK/7xo͚56me WƝwީv3 .Y"5jV^N:O5%"""r;8z(Fv)E郤$l@RSS{n0vDDDDnnСjiL0Xn]VBv0h *#""""Rπѣ1b<(++C׮][b͚56JBXp!={ēO>nM҈T1 d2aСR$裏GؼyhѣGq7#44& #Gđ#G.˭a A@@-[],[ :[nUg4a4a2`20w\KՊ ]v W\vInm߾}}d2h4BV4ao.ɭՊI&Dyy9&O˹wxwDZ7TQQn ǏGQQz)S][Ú5kPTTJ̙3__X~~>}]tER4A$''?K҄'|GŁPVVxxkֿ>V^^?_Ksk%%%:u*x f,ZSLAMMڥ 6 {n̙3999[R> >j ///̙3{U,兮]l6rUڱ`\&//O+E???QQk4j~aKt bBB`;w:ulXp155ۢggV_~͛7|ո"ŋŹr_;'::Z_]Qj۴i@1,,L  $Jm˹z_ǏjTN:%t:>mԩ%K\-Jqվ-zyy999U8;Kƍk0mԨQʕ+]Z3 .dZ749p^+^ѣa6 <^^^Xr%DQěoBٙbAuu5DQDmm-%9;7ٌ1cꫯ3}ٲemwh 39ancf072]{gQXXh͛A#88+V^ &M‘#GvZ̙3Ty ancf0713y㘙Smp1[8x ڴiɓ'cҤIխ&f&ss39anbBDDDDD"""""R """""R """""R """""R """""R """""R """""R """""R """""R ""'Zjt:pF6 *Tt/TY:n& :=Etxᇝ_~/FiiӖIDD6 DD.PSSE5  WMgFjj*>3޽>%wfl@\  =k׮EJJڥ()UW]1cફB\G:<""7oBCC1Kw1t:|GtXx{:[0ɹsʰ0VDD$%!FFFÇQQQ 9vtt֯_ĄǫW 9VFUUٳghkkCDD144|kK߿nnnXxN|ΝR*9?fΜ !gϞWWW+ a``؟q cǎ:Ӯٳgc޼yӧ# {nDD "?hC044[>}αJr-J5zYfa``سgu>BOz33_ʽԥTgVΏh}޽{q=<}_~EQQBCCGF޽)++SQT>OOOa&o?^HyWߎaLc14Oz{{"7ڇann7o|7ǣo H$''#//˗/GOOi*++N.]@EEgxDD "?֭CffV066ۮWьiիW555899ax1Ɣ!''Xp/++ݻPaiZ\v Rڙ 66VXӗ [044DGG6l˹!%%ׯ_GKKxHD4)!"ǎ'z{{h"%nlleŋÇn#{2Zሎƻw*mΝ; lٲQQQ܌F)Gii)6l؀#G`Μ9}6N<8888_ߝiӦMMMr^۸8梡/^; 8s "##R dffx9`jjo2e Ԅ$BTWWQ>Xf=pssCxx8._wٳB 55F^=٨GDD|rbJkעYYYG?LMMm۶tcnnZb߾}=rss8~`pqqARR:;;r|V“'Ow())AAA5k:u #,,,,RP8y$:;;!=i^DDcY8LDD4 aܹHHH ""ktuu8q DD6DD(**/ZZZP^^ZDD,"""""0|BDDDDDM DDDDD4aXфaBDDDDDM DDDDD4aXфaBDDDDDC2}aIENDB`pyzmq-16.0.2/examples/bench/msgs_sec_log.png000066400000000000000000001650761301503633700210470ustar00rootroot00000000000000PNG  IHDR XvpsBIT|d pHYsaa?i IDATxy\,}( '(*}VjjWXY}J*W]Fi坊e&Zydex ( *r@P|he6m±c0|ptСʺ'y'"!"аaC4l+V_&kҥ@uV4 &Lٳȳ>+CTDDd,L :u*.\;wVY 6`ذau/^/f͚6lm{m̜9-Z5ヰ0$$$Tvݺuٳ'aaaWWWt_u / 00vvvA˖-o͛5|mDDD 666hӦ ̙g}:t{{{888{X~}&Q?sΰƎK.U۶Waa!}]hVVVU+>>#GD `mm ___KrJʕ+P(jժjv؁=zT;bE0m4xyyapwwǭ[c1,[ ۷oG>}0b!.._}n݊8Wl_XXG}GEpp0Ə[n/ݻTons-#@jpBlݺO>$q9r?pqXZZV{ϰap)1hذ!`=z4OqqqXx16oތ}U&H_~=ƍ[[[3ؿ?zvպ txuaعsgflO?aΝ9rdvƍصk8-[u.~~~Btt4^؆MHD""#'-(N0A+ևR'L _KJJDIׯbFBQEرc bxxxܺu;bNNEQxb[z(WY Ǐ<33SlԨ(8iҤ*ᆱT1bP(ģGǃwժUn% zxxUM2EA,ӧ(ةS'1//ʺ;wx*fϞ- ߿ EX9΢x*ۿ Bϟ?_ܹs  W_}U={A T m۶*,X >c(bӦME??CD/lED&eԩ(..իIIIHLLĤIjٳg1}'<<<0c \vbbT4iҤ?՚Z ;wn^^^7n`͚5ի{*駟BZiKDD7o^eɓnήʲXܼy&L@׮]+o>pdټy3n߾ɓ'W{WWZۢE DDDTYֳgOzO<XeٴiЦM `,"2)={DVl2kP(*npkrA@zz:f͚Umٳg8{,ڴiSzYmY\\J%JJJjϟ3g`Xd \r?cŶSNԩScӦMX|9틴44l񈍍EXX6mT#f9:::פ/yɩq}Mj5 :OOC k\k.P =E/_}\\\Ǩ)g8tV'+{nP剙HODQJDDbBD&iܹos?6lz3{رO322*T*Rڵzzz~35c„ (++Ì3,ٳmၧ~/j+WUVȑ#UEGGׯ_w՗/_^e`ڵՎ1l0899a8~xu|AO,Եuj}J,X<#XR,j5n""U $___i[sss/xѿ!!!˗q!>}W^50rHtpuuEvv6bccQTT7|b#F oooa߾}8x BBBЯ_?R'c|2z聛7o"66~~~hܸqO###5k 99۷at {f: ,@zz:~mY<p $%%!..k׮5ε}bogg˗cݻ7F ooo$$$`ΝĒ%KgҤI/$j /~W :7n[?ݻwSO=F8vz}=9*4tP 6 #F?}va… >>˗(JuDFF]v]\\D333UׯiӦ*ošCޢhcc#mV|k%N8Qtww͛o'8í[W_}Ulܸhee%iF3gx Q_bqbE'''\lР#y9T<d֬YBw]m][ҷoZ('1B---ŦM/xʕ?uO#{W\Y9l۶M޽hkk+ÇѣG =Sy;~w[n">MQ\_|QEBQkDD""իWcĉXdI9?H;DQDQXXXedu\'Oʕ+j)B""> z`( rBD&ڵk5.aii02-U@.saԨQ2DEDdDDzd4kLP1(sppp@ff&6oތ;w`hذ!g燢"߿/Ȩѱz >SC!"kbոsЩS'+|%[c=_~ ""NńA2ѡÇ#22wFv0w\ 0@d' :T*1}j&g,@SVVTdeeM6qZ~ٲeh۶mx;Nڵã>+WVNTW_EFУGiAAA8uTك 6HMMŘ1c3"""""2<,@SVVOOOXb!>… ߣ033QPPP󑒒cǎ!)) ͛7… 1uTa4j7odgg׸Maa!Νc„ ,_!!!Xj^|E3+knnwwwL  |x;v7㏊~'NxcF+1ܽ{x.w8F -[yյc<3탈to͚5x(QAnn.:GGGtAiF}$""B]F7tew]g9SwxkxkRRR3TܷXPZZZm]ii),--5ڿ5M6UgggW=Rg˼ﺼTsyZyZywHuKWV[wU4jH!$<<_㏘7oo!L=̛30ocü (rAGz lDLE̙z71gaTǜyS 4Ā_нzsE[C""""WLL bbbp-ݻXVDDDDkc"""""e8bA-?aڎm0;֬a&^+e-[)SQaü9SF tqA EQe+J,ǿt(oPxo м}iqߪiP`7q"lt;Sț50ocüaGLd^)+}-VAvv>!"" 4' (""`M*-+z(@ĩ|<ǼL,-8U)HSKDd*EŠKIONY[W+L'A0ͭbyRԂ˫(J]B5 \BDDLBXX!L=u͛BШ6ozڽ; ‡~~hmk;w0MJ9xCN;gb8R#PkM=̛30oo|B&_;ÜGӼ5DWWwuXVT"*͸~v /\X)F 50ocüaB",'+H|U*HL傤=ZJDD5 (XD>Dgޭ, Z0DpcKKLD=|b@XQrKKq!NYܼZ@;;rBE""~Ms|NO&!66VsC͛9;9ca˖ױ#ns]"m[Dxysr蜘{aNNcKv62 ϥ 5gySsC0:̙z)o /6t. G}Vİ'wCϣGRZ_o#aL93$̛30oole@*5o`[{b ½*k^mKq(.Է}U_fGzEE\eBajPџ*Q=&Xc'tC xxH_⽗*k^UuK8ʚ}:M}{LBQ@19< hbe&VVTiBŢ*W|w yBvvvp儊DDT  7"Wm MU*2Dw0Z#)dll-vWӦ&J@{{{ZqqpV_ p/+d+zUɋ7 DPNmq꫰sbc۷8@i^aRӄ5DCKKsqXVT"!W_/\77|ެd  y@T7i$XB0AL4iV|ppo|<@QQՋ`1a `g <:nNM8Lhl?[Ccп9SV7D{XhvjRр0g0`% ߪ֭7ԬY¤E yχadX|2>k7fz{ ՆcüMI}ˀpT"TP:u()i%кuբm[Ho&Vi)>p/ |}1lHDFkGODDdk Jnܨ^+wt ¤J#T#gss|ެ^jgd`Zz:t 7kannL""X GzE 3S*F C+YiI@Tkkl^^x8y=0]tϼ$۷OsMWO<f p$'SHFl,0iЩԹeK`H * ذHM嬃=voAA--EDNNFzA#4LUsٳga#K,,c}$gHC> ,]*-%SOmHIǎs۶/}̺l+CCukE@\^MOGGUs ;5FEn̙z"oYYU<)m TCTݲ2|s>="||z6Q\k9Sޯi}@$3E<1 u^8!Gq1V@F^*f==8s.9{?eefqEDzUy"½{ X>#"qrQr-?"/9g`xs5CG6_;I9s!L=&o@׮s_ 7p:p*s'@ IDAT$<ԄxeCkZ #vwmRQ0>% CSo5-büC0:̙zuhPzwoR ;Wiɮ]@Y|<<__- ܰDed`CV^moBcz}9SF&XȤGO? ۷KOM( s331'3kӚ4H7x9&""ݰq84n -="{33D"sg<ՠ"ϞE#Gs| !""jK`@য়~O++,n'BCdgq))蒘nH3,@$aü.55Uoر7Xm찹];ӡ$%aHtך30oo,@$DFFaü"goHz Q3>8p*?Aqqxi\.*tך30oon@ةI}.\(*büƜ͛'"> ,] p"/_Ƈ(T*7fz{ܰ䵦:L=̛jx9>!?cüƜ:nyy:;B^^8ӵ+^L4?|.]BRjך30oo,@H֭}YY:=9>o i;c]|Xٺ5CCce'OwRDdbbbC1z,@ Ptt4lقpC1_|!L=̛BIL}P:#(薘H/(ך30ou-[ ::ZP 2 r`L30oSΚ5||>}?}`PlrsWӑU\jkMu̙z77e@8{yy\!\ -+7.!x^^53kDdx9>!""aol"uPii^=13Û>>8ӵ+&{zbVFZ9Wi ""2,ӏޒ 1pn);#&>؞4LBvv!L=̛ʙ }|gd@ 9?Ǐ;:?651gaHXI \T&qS',n;oD#Gy nhxTǜy#}(X*=cA cм䕕anf&dfRMbZ&R="S5$ -, e QH=<0>r1׮Aj/o4qa߾ғyZYaIV8٩0.%]ϭ[rFDdXIXl!L=̛괞3wwi>}?6k:t$ =qjך30oo,@$$&&aüN'9bc!z SggƏ88`C"oX!&&FPǨQ R+W䎨kkjˆ';) `"bbbC1z,@ Ptt4lقpC1aaar`t30okzBwltcGPrKK-1^PP 51ga&<<[lAttܡ= d^~eC0:̙z7=gI=z*PlrsWӑU\kM ̙z77e@8CIIRK%Zal2GEDL+@~@Xr=]O´4q,"21,@t7'&}f0k۴->վ=ܽM""R ""z퓆 HM;:쌣hgoǎdC"22,@~  ;:i`iAAMsx dQ!0i$C0:̙z7tμ{bǀ~; ʛ ??l B;o1:dךcHXI0`!L=̛ >g..Re@6GCCceIIxyl FQ GU "QY+EG+ kJJ}ffb;n 'ss"2I_OXXBԶ-u ! H˓;,"!""*'ғKF6 +8kb"|^7""L¾}0gaTg92Epv`@97͛ cbFx>- RSo$Or4eך`HXI={!L=̛6gC$'z/Z-C6ؘ.H-(bth51oo,@t$77]v+СCl۶ML0gaTg9 ؿsG)Zy{aCą@)MH@̵kZp&#`ڵA~~>"""0vX2Yr`t30o3n-MR"͠^HHa\J ^JKCR}d¼ J%,,,#sTDDƍ={ _?73Ú6meK,r=Źw) :̘18)}`(`#R _8%%NHrED KNNFAAf̘#GTL̙30gaTgR9֯Y s88 !$}v$ |˼XX`S` cŋx4) tv<}1kMO77A E,<8y$ t*** QQQUS*ŋQVVQѶm[_%/T""S2q"JsrHe=p44A>ȀMOFyfDEE'qB̝;ǏDŽ P(˗ŋXjUm{9899aÆسgosss}àA]@Z4L rG}}'N DDQÇWYޱcGx{{?X֭[7?~w[m6jJ!r`t30o9Y]{wIw9 fb{PAx}[1h^\k:QAzz:e˖5o޼b*nݺ!qU{iӰlٲ*\_|Qeم V󫍔QPP0۷L4Zlcƌy 6$C?H8@?_~$C?H8!?{gO$.]yDFFr6hh(|;) olj4?Ȋ0JihSq/: ""~H5^l4hf_/Ddd$.\//*1b8rðS.\Q cf͚{Xh޽[|(..3 (ׯG oggg<('qT""  ۷rG2/- k]Ëa` 2}_TR>7o!""*2p4am@rG{33к5z99tsgc#whDdQb't""ּ4G'л7kMи1# rrDеb't?*=sMuY 6EzO61;8 14}1ID9RqS V7섮=,@$L޼c FLGPsMuC(dsLuz89hH:cc0#J= i9F:nIe<,--(QAǏ#X1@ n:hLAAHCDŽ}[Q(X[l!L=̛3,ZeK˗VwuERh(|') 23yyy=,@$$&&aü9SĉH ^z 05®7Μd*-1xy#}D]|AjILLDHH!+,znWW#ҚlLLM9~ DGC"3ޯiO@ 5q# 0 wwGbHQ|wNdabBDDd6֮m>Dh cbFx!- RS_V&wXDr@EDDNDT DEI.]Hk ,nHÆ;4jbbb[nQT&wF9S:L=U{R!2np|A ".$(Sb"b]Sk?ü G d^~eC0:̙z71g꩒7XF LGp$$#1.%/H~/üq,Q=IEF'DQWE[;;laU R I<앗a嗙4}![`n݀g;"7nPNNFpBVn0wwC#"-aBZURܹSA;w|[[QzI7JeWYYݖe[cU^T( ھ:U5XZ@ht"d0u*p @;8 !4RS1IƧ~~P8cBE9ECn.pn^Prr/wb:͑. 9=q"QS*ulkhQe=[\\`aQR;]~mQ`( >pvopzln_ŋxY4q{^kaHX1QS8~s -^8IBLLNx  aj̘̜pT5fL 9~| 3zUғl?:d(>N}ÿko,@ PDD#w(F#22RsMu˙Scg"8 XP,SEך9~1 ܈5gV 6"_(y˿ku0DDDc'tNMpGPsMuL5kw;C.8`zq t۵o4]}D`C99t8>{ jx9>!?cüN 0l1$/-xvs7G?]k={JǗ_7:jwuΜEew`e`*/@W>7 q0aӥ'MN;ziSwqdQ @r20Ol~_;˻Үt)Ф4BV^鍙 `m@l i7OD}o+oud1$˵֦ l,XkHuqtļͥKu-Few`@DT7'NSqq/|e|--f$^:yw;\kRgЭOEKIo7n .$mm 4' DDdtڵ͓>o81>ydc込;- ODg]Gi U+x[Yd䗕IbBDDFLIU+3wsbr*s\YgO8wN45Z U==$H s,Cf2UA!=e3)|gVnTK*l}__y1 =~+[.}P&޽f@ÇJp2$t&k'W@BdJsÎp6T&@bɤl=Od䶑CM\"[o9"Zpw'LJ11|zqd""SkH`8Qxu'>7__5 -~;]5˗1%K2E~;O "S3gLzFX榬k'RwN]ͩg>#)8tGq:e>zEQQ\IHǚdb@AAAfBCC0pfH3R^z /PzC*U~zk=ܞ\O$KDvf$# SBЩroزfN&}|DHl X3Pf3AAAzGqx҄n $s<2_B|M|+\Jv >S2OId6WZ5f&eU,48r\9Z!YR2m2;֚5 Y˗wu"XaQZȼ#fHb|? Yl ZrUٲ|sn;I B!, S0l}{v{ȴ8rHZȕZOf7Vk>֯;] "`A>ͩxpd""+RVTf=p*VT&zzmn 9ӃNt^љr٥ODwFAvУ\w1Lxl5=kٙU\z7OƑ[jX @D0h #86R7fy(]r+V琔'Nwwd)MU}"S= RV|B)CuOO:GDp'ʄd"BqpzuIXe\B!4*ZVVޭ,`8gh1{O܉C&Z7 ZAnpidbNNt$η d"Bd;+ o@kp}_7EeyzI7wn Nd7]]YKALJ B!֭pႲnɐmՊTcmZv^I=HIMn`{țV2D4vU+W.˖eڵk,uK8B @DЧO#86R7Z͚6'YԁGO>RvZʪUmjU>]Yp<ю,XOweO "Sh֬LzYf{Bb"ԨD qWhϬ|6Jlc}( m4NF;L&3)-Ħ*&2?@dV!|a$eFڒ% Ϥ=m_!vhbb;DQ3P //+gݻn?#dD <%Ǘ&-ʕʍ={}B=xx0ۛ[u;Hll,]tG@@ޱBvT Y|}aZu/SzF%aذ&N;],E :wG]F*UDϞ=Օ >wLkzGp8R3mnIpad$TA@ܼkU7̶3i]5wb=vHme-[* ~UwmcmJ2TsDwu @dϞ=#Ӛ4iLzR/AHrud'08Jhچ7O)}14opvkcɉ+J(R ҆kGҥKԩSӼy}45iO9PfHԓ۷a0嶬f` (UupW؛黇RyK1ܹիCBʽiٲexrC[$,wQZHII!""_K.bbbطoOEGGӼysF1Iʹ'5o ( oNC]m3n4]ԔϸWҏUF`V(Z|[$c/^dkLqn"_UTTƍ3^ʐ!C(\0LJʕ+kݻGyBGբaرߦPB'i9TN5WFaYȇ/H|x=*+j"_RRR(RFb_+>k &0}tf͚ERRgϞٙ-ZxZjEc]8J!]yz_mYS2OIʥ{0/50ZSл7&?wq2Xデ##IB !.\0ƌC֭NJ=իNNN)SsrU,XM6w^͛'ʕ1bLzR3F?v=~^;HU]INM}Hk1`teX __}<- &ۙs1xL9ߏvea3~? LHMA6?[΂ t>#~bs@:KâEѧfͧsYsv.VT)TBPPS4pm ,رcߘ2e #G+Vm:t@xxΜ<2B:wj׆^͛L?!y`CZӇ*4nwKIXիYH_Z\]]HN~d[A!PlYXvwwڃy>!i8eѵ+\wpvfW=ӧ:!T EƍO=w .lH/,+OjM=6[Æ0g5yrXțǨm%ݡsgHLL~Uȑ9ބ޺^7xdB5طosĉ3*((LhhU9RGjM=6[G0jdйbgW6Jj/+VC{,3k] dHѢ ;w=3Cq?XE<%::b2,ƍ{[ʗ/oO{/L&ˡC2Çgh?YѥKpfHԓi󬺥Z,]Z,ٳ[,o?Y-jfYNˡk=ewӧ[,`X{b{oYm~n"kg&W7III|gDDDмysg3f w!!!sRfMj֬db͛ǏӨQ#/?.\ $$!C_fI!$$ŋp/mbcymk\|gs4$zT.8z'Q!{zR%8Їeo۷/3gL1c7n  xjZ8s ?W^y""" )) ʕ+sQ6mIJJbٲe|!4C6間 o@4]Ԕ>5LP tv%eF[cb$3l aDRP!fΜIvX,.\=z0eʞ={8t1BFB!+"Ճ , \\W?>9rNop,ԨM*!Yjs"*Wy|z5 HLL   **贅dڵkGddlBEfHԓi*[g\}+n:5VHjjqAf<>xEZG(.'$=2clAЭnݺoFܹPRRRg]@8x#86R7fڤn͚w7ߦom/'1%}{e Q`g$3kN&||prKd$VX7[ dڵ^{x>CF+W`? .:䒞B-}֯-ӷ/B%-ӑ%d2$!9YuEF‘#\Yipoxy1\9dirqO?ڴiիWӞ[n5k1Bɓuk'ҷMR eyrl+o@hXa.1YA\.[o]c_w ?]6.]b\x*U=׷o_&Lc:!B/SFƍmѧ#3Zໃ1n8ۆB}۱VO&2.N8BhfСCٽ{wڿCzț7kݺ5;\¦n߾w#5FꦞLu˙֭SRj{[d"vl$[zJ3W_)dcd2CtwSDVucȮ]hԨ^^^ 8T4]̂^߾}pfHԓinŊ)'W/HQGnwi0'wϠA}BT55ggVr#>m[BݬAf6\UVqAˇlߟMwD&åf*Iʹ'5&#u ҷM%>a}9ºu(B{Ul,Ԯ~gdcmŭ[trxh K~FՑ3^ʪUXj9sM6ӲeKhr@ !S`\kJJI??3zln i O+n4gE ΝckZڹr'ː3-XTX1Ν;z*'NƍP@:wwD!P}x|~5}۸:2N` IDATD܊iFy`2e.,dRsDЌ`9 .oͶm?e!L&eq—_̙m5;kR"w -nŻm*:uR.l߮wqsrb/SSEojpbԩSٳXO~ذa̙w#5FꦞLkU$pa;wҷ]]t'`w#5FꦞLk-OذS=Jvsfk&bq ?o<6Bxݺ;ARw"[bbTo/?#`YWXA@@@Zy9tժUclܸr45 !~ƍOi4UTeSM|'Ne1%KLSzB'իS"LcDrq2qDz-VXAϞ=xRJ?~\dB!WO^&NLv UfwuINM]HkT ,PfĚ:U4vd2LJt$1 & QQQ<9OOO7%a! a8X<5(рW:\UN`h5 n;sue/G<`z_~#Gdܸq9B>w=a߾o׺|k淟#symZƏ-kWB'5<=\9v`%~R~}geѢE+% l6Hʹ'5Vu3`lYSiP3aT n̤=ˈ'j(+go)BB4qq} {3?f4hЀ~ 6ЩS'l|sBa ;Ñi#uSOj--^ 9sB6 Y5P>h#~#lQj'Kл71+1L(_t 6%?_/? ? 8p ON{\r ̪ žNR&][%}Y,6gciWmfTXrO,t|<5u`"3ٚeC @;w7oޤ@x{{ 6$O<w,!?+wߥڔʺ} /|ٶA3j8kZO2:L+WAEǡݻwٵk @2 Bale CGɏhچWj /pd?daݵkZ:rZd,\_/f\~]Bgk֬;Ñi#uSOj=ֿ? Æu.K6~# TŒsv!?k,R}M*]tvRSϨ7@C޽ճgOf3ŋw$'|q$a3zGp8R3mnIʹw&NT$o͓6/{>-nQ55k C*:9חTEF_gT؛o:x ]v]vtܙ^xh-[ƺu={6Geь5cY3'BOqqmܻB힛w7{^ͨMukeQqNc7?'͎㣒%[qeᯀ|_Rn]ʖ-Kݺu &00aÆ{w\!ayx(=&m Hҫxl?A6%Z>S+\Z4v$o^Ɨ*w~a7o5kƖ-[xWt= !_g(+KB l춑c7yEgR70QsgeIJe|trBqDeHJJ .\xs.\H[!''!^z .UmkŚkvazj1hARJ˟.L&Be>a,?co֬GfΝO<믿2zh5k@TTK#0>}Hʹ'5Fﺵn _}'ìY}kz"AثTu<<xuxΊE>WWVr46~`r+BB`4nܘ9s (: fJとH?6R7fn׶mT߷iOl4լtiX nRr5תwl&((H(`ű`v;wȟ?? z"GzdzUA!$'+ {*_>>t|C5߶MHkK>\, @4vaXp odg*ԒE E2E8pQB,E@Pr[ ~tDGwȟ#?]|.lF G@>PəL&+Wȸ8:<ɡ)-ޱD`[B\CP3yd_ҭr7؝.hFL0sr}{}[DvɉUL&:JSر|}})UKti[ݻwHʹ'5hu{EeVÇ_?Psɉ4+ӌ:~dpͲgWSnJNN0; }}9gmu~qF4iÇJ\r^~e# 4iLzR3mXڵaB QSٕ块SpUZ"2:R%`JعFI+ʗgލ|{ޑD&g& PJqssT^ӧOӢE ڶmwL&3Մ 5FꦞL#ϔ /nm{7.5"a {cƫȔKȤfHԓicЫ {6{t߂7#:.jZ8`:dy4ɓ\xPT"33$99ɖ-VT)NkY0gX,"#j;nBCC ݻڵKfDEEQvmϯwi݄B8>;:tP{dLm6wkMX^^z'ꅇw$]Z#G2jԨoܸӦMʕ+'` !1a ҭbZ}5÷g᱅Qnn"NN.< T{))DFxcȚ5kNG}DYv-K棏>10SNHʹ'5 @ж-\~67/NS]jV^ G;oz1ԭ;+}}~.#ΟC*~rʗ/@tt4GaiӆѣGk. #9rLzR3mnٳCXB6n{]vtYم]{n5YSYe1>iC+yuٲ_7lJdf }pvvQF*Tj'ҷ~w#5FꦞLG[B~@a` ]ə%Px=ڄ؍cή5!CnPo>mJdf)S 6l2V;\ɛ7A8tF"5FꦞLG[J|9lܨmV,ww\r4_ܜ1Ͽ5<6T_\{[L&-WrIg>a]ߟS /BӞ۳g~~~:B!ĿiM`e 2@kjxz2ۛ7n͵kVN%23@>|Ƚ{xl9m۶JM|'5FꦞLT7gg %nT/Td9BRJSѵf0b #uV#glO+WB7xyy1iL6]d]vzFYYS!Dfv Ԫ/۷+jm=6!m `A8 ԔhJYA(UJDvb ߿)@|- Sl4i$ӦMGܿ&Oc:!BWn?zfe"_Í{ROlٲ-6w^<<<.weϞ= }LzR3m2s:t/O? o3|<<0PlVYn>THیt* +WfDFFwѰaCL&W\p:'Fзo_#86R7fdrعS>>l!k o2rjֱ#|!>lެwdͺɟOKbŋmEGa6ٰa...l$[lرcpfHԓif2)rEd>(WN>L;t_7ѣ YBٲz'z&kk(8z:E٩3U/g8s zx{{=>sL*WL:utLg=gUhذ!y!00@c !6PҐoBVbJ"햶cp[?VAJ}w"KI#KN`wu;Rݻwٵk̂1*dZ7!Y9l nnq/ub~r~PNRnV'oq0=sc0|H _'99777:DjصkM64k=B!:E^ ;v(aך^\>UAz@OJ zUl(15ǎqCUNl􎤙eᯀܿ%J bTwL(Vᔎ? y ֎;8|0DGGs֭g~=~^!3,Y)3ciYT;_|u kM/R- I2&OP-hCr bEݹ؋#ĐҥKrŰaطo'O| qYFGjM=6Ryxuf3?|zj[З%:j5vLQR=_?8rno:~>+] .R%rrY~J._~ٳGh B_EjM=6Rg+ZT72zxrmf«c,V(&̘JSNƍp*^X3|{ eܹi Ӈ^zQP!Y45 !vr>r$|- +xv#{RJՕ+JS/lz'9Ÿ^rqws歷;vƍ3zh~m !B'f3L4ϙ~{ļv(햶vmԪxqe]Bά#.%.$GGX EEE`VZΉB x-K˄n XÃtYх$ԪaCkX@4vSݝUwG؈ <`̙ԩS___2d.]bĉz{aÆ퍳36l;BL9?:~/yU]V.on[п?&8wi'ߖ+Ǵkטe3axٓ… o߾g2zh-wtcƌTP!]w#5FꦞL[Šj]hb#ƴӘ~.[1%V 7xˋϜ{#̐ 6nܘ\rѭ[7ɕ+&gި @M$_fpfHԓi#uKܹ^jF`2K}U-<ަB /Q6aʖ VR;uR5yQ˖%".^bٲIX!grrJDJJ d\ժUOiժNfUB!ۺZŤk _̝3p⹋[?V/C߾z[O} IDAT8|BnnRjG6 kg+ s;B!LfʬX#F(w-ٍ]VRsVM:,>6aժSOWzBV7;]tsc 3gX(g [NIIԩSDGGST)^|g.&&3gΐ7o^K!0ÕEooRE= 5zsom?t\b+~~Pމ좚's+T 02*9s2L M لhjժ'*Uq̟?ץ2d .LrDFF?H{n#86R7fH۽{7&̚*( ְGU~>'Cd͈ui].XZ (wA2KJJ EaԨQ̛70aӧOg֬Y$%%qYiѢO699, $$$dE&M;Ñi#uSOjM5ˑ֬x4,ŷ 4Ƴ4Ǒes G9>)U528"m1Lq=Ç-޽{?xxxd2YO[L&S#""}>l,އ"pfHԓi#uS5۱bqqXնKې\sYЊX-}- ʑII->[%%A2Nw^bcci߾WZŋu'_~=bŊeȑCGjM=6R7YF 8XbN&'w\L\hw4,2b+ի̙0wҜtvq!Ϗ뉉t"xt gϞ|O=Wlٴ3UV'֭˚5kx֭[1Om;0gΓ )c6anj_|c/_l6sKN6#FNq46V8BPHnܸs7nܠ?(c-R3mnIʹ_5T mCkK迮?Ҙ P:uJYP-zL&f{{S!GڟL{o%11ݻ[}Yv-VٟB!};k]Y (Y\#Fmz{v'00kl*\`$cƌΝ;i nذ[na2?~ }Yf;֢q0\]U*9PBCC{.vu@2@ " !7b2ٲ^{M>ڳk~{iZ&Ъihl,P}|ڣ#k'= B!>\x ϛ=/a]ø|2=$b`U3ڴ_3gNc7Urd~ ܺŔ+W#A B!Ҝ!4IwS|^!?Sa1!rrŋHݿw"R K`ԅ l;L/;Ñi#uSOjM,_> Sz1qڔoç?e\9ʥ~ X/3kJutL|q_d"2x?դfHԓi#uS/5 )z?\>o>\ӓ7g(Uy{+k*=!5'>>qs|$$&tCYB}ߺuкi0&)CjgvNc7gNܹ YCŜe=21UA!_j.c8\T8#y0ף㲎\wٺ3Y3<)#,"3kxd)"D-DH,XB!X,*7¾}tjEESsVMeAXW/Xv,T=^=zE0|^f̪ BS\ԭAeB8Fh[-4Lk/CPމfq 3ʗM/tm#k'` !B<Ҕ~.t Zֳ{K,le|ƕm!{vXZP"xyNѢ :{]YhF0Dd s;Ñi#uSOjM=[׬T)X~?mG>_>` ќ9shQA4H5+L΍D%Dd zGp8R3mnIʹg5iS(_KhWҮB;^_:QQ AZօ3`,Y+|}L'OI;R'= " !o{ سG[G;.RqfkZwmJoHq,6z  _{t|- B! &2VeQmlu #a ]Wu%9UCSL <:uNc7/ Xz\;N&!BgWZ&=Ν!)I>+Nϼ}-SoN+\ "pw'Ӓa6 B !xqX ~ Ӷ&e/w* -nȟ_y;K5+Y6CCC1.svZYP٬w#5FꦞLzzԬA6Mi: wq5_綩D Mdd!c ٓ]h0[;75JAc,2Y2!ƒPL}Eto.cs95O?m&«sSn%PdnM۷.ZTEIHqѨjUwewww!$$D 5M@VCC GC 'Ufc~.qgZZĀx,F gqqeW*#I<#]C* U!=Х p.x 7ѫGh*QN CgXRWTfed+ƒuk@w@!BJА8~3waifR%8߈e2`6~' +*3=*W2[[,Ǐ.GkB!D ,,{$q,15[cf]곫_XMϟÇjlMU jԀH|JrM@VCC GC ie ~1F6L=<GnQn(qnu/= LڢqؠLMx&feM@V%ġ܄ġ܄S<=I O7⮋bap+r ܜ`Jg5Z6XŌId3۷.GQ&B!D;|\ $&x_n2}9 F_c;v3/2sqndKQC!! ݻSS h?3/gx!FE)_o|ҪLoZ \eԥh<F!].]ww@ Yga~/RtU 8~(&VztB!DE7nÁ+čѻ~o, ~[={[`iXX11n7mRB4!ZK4e&&e&&f6lhl&;/)Rlh`hB%hLM8LM8ulBŅBC"Ja&UC]V[s9NSjFBB!D{=bd9 ?:5Vm#G`gBΞΟlmHeң; B!eR%~3{B"]6F̃3$) Uc8)M@!BHjX{e޺=Y K<<<ͰN  NCC GC )̛̝.oK_ă۽JURsIB A-[&u 2r2rN23ߠp(uqctY0h {qOt-JϭO`"`| 2Rc@Mj̄ehLM8LM8M+m[9 &f>E֨hTOT*ɍ1`Nș3@Ӧ_BZ4ġ܄ġ܄ӴW>FU[1WVInl 4l?sA4M@Ԑ?\]]!˥.B!*Tx/eP?{[`i;y `rG`G!e#"Cō1<FX6tUnq$е+0v,vՔVzthK88p8pY@Kp1vv02z$$ sЁx[ZD#hkkkK88p8pYnDXϞ Cacn#mF+ƎƏ&LNVZG#Bt۽{#hԡC1<֡Ҳ%<}AT[7 %p][DJB!/NōQR]C/HrmL+" !BXgqc8qFHJXKaJTUNI|}BΡ  )))Rq(3q(7(3q(7)3??`huqcLh=>-|0c0<́m//k@ާa8p8pڔ6͚úh刁*$m`,` >5!MMxUUaaa!u~JH3ܿ_Wڑe&&6f_|S-p(`h(|ǯ1VfV81d)|/ogkjB/=5Y H]!yMB헛@C լ DE}!&wEQb૭_a쾱s qyKLm ssij!e& (ǎ;ШQ#!5j݅#27v,ТOZY-=-cJ)/T 6m#}=="*F ֨Q#G!1c$9h^2=zBŨ_zB,"bԄN!,]T4e&&g?4h1:/Dz=1SK88pҥ@.СݻƘ~:\k3Km[#fpwwG\\BBB.Eh͛7C&wEӧOd2lڴ pqd2db7KKK~ޱ "M2 zzz077Gvzjdggzd2Zj֭"""^JJ &N&MbŊ022B͚5ѷo_lٲvvx{{K]ơġ܄ӵ]4 ޼>qޛ:u /^(P'MDX>>>طo~z|߰a<~7{}6N8N:{֭P(GNNNxْ9c믿ɓ'#>>+AA ѣGe˖;Gpp0cprrBn`fftoņ pyA7o%hLM8]̬J)];LJ_V'~>*TD=q'S``Fꊈ206t'OXՙ)qF7n`&&&Fɓ'y;vqzŌبQ P(XݺuY͙ d,'''11LV &ؽ{~=VbEq;~xӧ7 q/`Ν+_~uڵDu>^a `lc ج_g)0exukڔׯ~*=E4ZjL5 999ƨQ7ZjΫR '{Gݻw1f06Y[[p…(;wEpp0 qnݺs{xB!je`~7Fz=Y ҫT;wѣզ)G\]] . . q|}};w{/449rkϝ}S۶mCvv6 ƍ{aE!D=}=л7#} 4~'-4ہH`B!DBBBPN,Z/Fݺuzb֭lllyמ={ 4+VTjMw~WpΝ;x3gʕ+dyw:N< ڵ%BJ2rN3ɀ; ˒qnaK-[.g IDAT"sU+ ̙J] )jB!@JJ_01Q5ʗ/9sa055yޘ;w.ТE l߾߿/д^Z1bŊ`!-- QQQx fΜ-[y^nSB۷5 ==PV-M x"|||.CPfPnQf@Ŋ|S_~[a{Sٳ+WP;#DHB>&DAʢG+331q׷r=<<c=x &0g 4;>Mu}`"XLƪTœΝ;ۨQ#q;tPj*-j#ocXP1Զ)+eٳ2< ,=CDijqqql޼ (vcУGcv,Y8ݻwamm#11'Nܹsamm "+jYZZ"%%>TvB4P߾|OȜ9@f>!B6uZZF_`n wӯk'}4Çn:4mK.͛7___\r􄁁<==UZ!ڵkqoѵkWXYYC8v~WۈBHf.]ZJq1"5VijS:=@@` r h xyy;v쀁1|c_~QлwoԨQ *jԨ3f͛7< ׋=vB'qm}nnsa6wVaaRWC hƍãG`4i$SCسgvUطo݋UV|PF l۶ n=_|yӧxٳ3)e&&eVP|Sg=L Mw^<|#ef;8{VjH #XDcm߾ԩN=Mb„ ԩ,--ѱX10zB]ظ11c L<sExxxu|w}luprrBVP|y<~nݺU&d.APfPnQf""=3K_l+B>H1\,tV88Xvotp(#ͤHO>Uɽ{9377g/M61X޽cWbW*l?Ƹw^b5kdzzzի!ĉY&MX !b{f[la߿<3H!)ŏy`ɑ+L=bV-ڴa[^~!K0O;w BQܽ{ו1F.##BWru.!5e +Th!|ܦtOUSt ~ӓq〭[#D-Q!B8 4hԈoJo1cqS#y3ߐ"u54!bbb.APfPnQfgl(x:AxnM#FN9"u54Q+WUV033n޼)uIRRq(3q(7("#SSVR] nsUPe),ZbI:4Q 4ǏǫW0x` :T!!u 2r2+ի5k^b]cE KAOˁjՀ/|& *>4\zU!Boogyqc8`Pxx'(07bc@.D5h"׮]˷1L&CF,aUB!<֭7xX5Rt;; <ط7OjGh"_iʗ/7oHT!BH~QQC #)=m5̟#u5_4DNNqqܻw2227nYׯQ|yK)[^^^Rq(3q(7(3qfBd$"Z7O  \,u54M6033m۶) ?ڷoFiӦvZ7nq\~ja!DK88p8...hG-[DMbg7(M@KKKL>[n?c Ň ===yǵmؼy3cXz5B4%hLM8LܾϞ7VSStm2d M@eaaXO>+VdŖ-[C+ CB!DMqv-в%0p .f ]7ö5[[ /L,u5:& 9s_[[hڵk^wpp@bb"^~/r !B12GW(7o30"zz5?Z ERSS 4(^z/޽{5Wv QׯO>zPP.])))^_f 򽖙 WWW:u*r&aÆÇZ &`^xb(>ǩSǩSGX}*ԩS++~rЧn÷̀6cXo 0kb\X:uмysȃH78{}8縹1kkR]711`J9U`~.APfPnQfO?10iZ0lOuXݻ'T],tD@vvvahhX%62YrNNBCCѩS'T\X"4i"(1 FFFZ*:tUV۷7o޼Z6lP1۶mL&Ü9s<77|SУG.p^ѴiS̚5 Oy!!!pvvFaddJ*/ٳqB#].APfPnQfرY nܦ1ի)ؘ߅񣅄K]&&{鰰*\,''}šCPzu 05kDff&^hܻw@?ϋ/0tP{ ={CaԩXf qE "})l5ܹ3:w $9rDBBB}Dnnnh޼9~X,^8w*VwlBB4Ԯ]}233˗cHHH@-u%hLM8LrGU)=1WmJw]n8?<*W*eJRo/s'߉OTO[0ꨨGn߾8cV7o/usouЁ׏{qd2Y׶o8c_~%{us޼yÎ?lֵkWqի(̙3qʊ{?((qׯ8cf*pݭ[2؜9s =ϜB`dDZѣGӓq򽞙Zh8c{ڵkB L__-[ݝ8q{3H!#-1KKƜ{N3nJK*;zlXZwϚ-YRa_~C3,֭ '''lذ!c8k׮1j(\'$$qqqy)]qMMM obbN:{-<<G-QRa…6l=zٳgz' V_U8=p…gllw޹s+L>>Vvmm۶Bvϟqc|ܔ>؜ϟP f8p2,N;#AAAÌ3&Nuׯ_ǏM67ok̜9hժ哏NqF w9S\rEۿsNdeex۷o1k,!eJP(0ݻ8z(󩧩pB>2r2$k_kvǒKbD^7}'m Gn޼dܾ};w)ٴiS\t ݻwٳgDDD`ʕWO>6tPd2^ğY8{,8C݋qưwX1Gv؁˗/>cضm۷o?Kɓ'ZB Dפ묭.APfPnQf47__`8`qך4M=e2`V- +jԄ\^lll4r‘!)O~VoW&eئMڵ &MΝ;sN@ʕѹsgW^ygddÇ8kkFzz:=zTaٳ' lRYcǎx qe4k +{Ν;F8矨W&N_(jժUZHr$%GC G#$իA ++aR Xuk``>@OOꪴM@Ԑ?>)hK-;ӧO#)) gϞEtt4M68$:8x` OQN8'N{E8qD+j@ll,bcc 003fȷ!R@d$ЪScBQ?b?dj~z@DЫ0s&Ѧrr/HD eKnWc dHڕ5s{yU1 ///lٲ%ow}Bq};ÇV@Λ7srHtn <㙓sB.*o"9:u*E^V-3Xb??B Xx1V\ܿÆ KhkJ]ơġ܄)MnVwC҄q6ʶpoEʕrrw冷Hkhs5kСCYfaĈW_777 4(L(8;;[.<<<0k,?ǂ PN>|իW/q=Qnݺ%9s&j*<{L`ggCӦMCݺuYf]tAzsssf̧p8p8-)]OO覆YE?Iʊoxp && j%^Xފ)S~#Ο??N8 ;v@ttt|W_?`Ϟ=Y&VXd4nܸ$njjㄜ ?~<^~%K|HIIʕ+akk` ׯ4kLиBtSIIGSv څ#w`/4ڶ6n|&6k+x+"U]Gf͚U߿/u9:>B>u+ loX(݃wcXZ'kuh5n ,B4Zm۶+~&<)LM8Le>q"pꔸ1cGJKi/tOGI&ӦM$#233.APfPnQf(3Uv$)^zppC Vj&l LJ& D#͟?С>6mH]<(3q(7(3qgAbV/RnS5B+WV G ^xxtIr!5k/_۔18B=ԑG5D`B!D9:m7ѭn7,KN-=-P\WWWZK .B*>}UJ]Fġ܄QUn@b"0it |⣋;vUPAu;V$B[4e&&e&*s[h߾M鮛Pr}kJ'B7o%hLM8LUf` kJ710a{"ND !@G GC GܪWwJr?tMwկ)FB!RƍmuPt"5B!kE`diScGcSS:B7oCC GS-_|ߔ)];D > /J]ơġ܄)rҍo!US:<cbZ*+XlCXI#DU3H!4.^6 غ8c߉G=%ݖ("ң; D#d|_Vu놈#BH1ZBC0`qct ˺-K;yr $*EMDcq B@jj*qQ$&&bٲeWH! 2oJYSMADxzh:hs;vʕ+kkk*#B,[\ $&Bl6_| "l\Y5GVqrr .\d28;;y c!'OgϞTd2߿ŋSSSTX;vǿt%hLM8L)s""SS`jJ'h-ХK?~9}45j{~Cahh???x{{߿ f͚󃇇nܸwww̘1L?" ???K88p8RV*w/p:7)}Y/(#j#11`J9Nqd2Yw&ɘwcHq6mZ=<<q,>>>cǎ1qL.8g…86`ǏY:uqԩS6} !(Ν?0#By}~L %kg(}XB۷ K% VVV âE```x9كzk׮o۶-^-[@OOk֬LիWǂ 0j(lٲ۷WL!h#>S.]1\LHMj& jpwwNIZRx%ȯ&J* >>>1bD1zzz3f 7 ûw0vBn׮]^z;waÆYfuHJJ*F!蚥KC /v>q'p c/()].C.ŋJOD fc;;~2PT2,q)Y٘1cpBlܸ1oqF˫s*VXPrgzqDM24 e&&e&:妯n7:%!ܪ+ IDATtPGGГ镺? nDHģ .11QٝugeeWWWDGG͛HOOGJJ QJ;)(  ġ܄QܪVbbv퀱cۅ۔cG<:K-UMDZo㧟~|7033CݺuG"jZ2r2Gsk زع 7FNN/ M@pvvFÆ m6DGGQFرq|||P(0i$(ן>}9s8x{{+tB!D L}O)`xz+-FSƍb?gڴiꫯf͚!00~~~ǝ;w'''%WN!ŋհSM 4.7d-jRhBt'81<==Ea``#G`…kbhذ!ñxbeL!ܦ Lc6|vJW4!IPx%%%1!Cܼc:w Bs9;\zoaذak"UH(3q(7(3q=*Uo|}ncnṢ3_$& DX'q%DSH]ơġ܄фܚ5nr`Jqct˻/Dz5Zh+W 66/^āп8::J]PJTGPfPnQfhJnCۙdrH|8oUC3f/|!Z/)) AAA8r[J]!BDXx ܽ+D th=OOO( ~E~B!DaSS& +rԥB>q)K88p8[N驩O5r_I>4QC!!!Әg2 %˖-CC G98Mv/#Цtwww!D$5Bv풺CC G  $%ӧMݻ crɸ"51B!H]ơġ܄,\\;wq6ۘה,$B!h$== < qs޼>ƧMيlJ !BX*MwޥkJ+픮j4!.APfPnQfhCnM۶w˗#)}it !`mm-u 2r2G[r<;aqcLn;#F;/+@& "ĉ.APfPnQfhSn=zÇo ?M@!BVvT_>5M@FdQZ5t {nӱ ;o޼yy9ݻСC0 FFFZ*:tUV۷ !m۔矀CvݣԔ!X!((P(h=zwv qEclذؽ{7V^ SSB__م^ŋ:t(ann޽{Ϟ=áC0uTYѸqcQ#/%%vvvRQ(3q(7(3q17{{ , 4X1CurL9<--[bx/TW16qL&xLOO{)YYY]v86mڴ|1XqlӦMο{.d͍q:tlֵkWqի(̙3qf͚JKB?CC G͹͚1v 5/0f]b@eGVqrr .\d28;;y ώ?f$$$O>X^:{6o\͛71V#Z8z(lmmJ*{_OO .İaÐٳgf\k׮CC G͹z[mJoX!"ܨ)]IhBc vvvҥ ?ǜ>}pssC5oɒ%رc!ˋ8Ԯ]ר]6ӑc !]ָ1};0`x10SVyMaS_ 9qS23*1QiTbVhifs8*U|||0bĈc0f#** ÇXݻw;vl?yCff&~'tرĵbĈA~xyy ק䔸&RzK.ӥ.CPfPnQfJnnn9@@Η8&?jo"uM@PHHZlqLLت-uU=f,\7n̛lܸFFF*w | e&&e&.&;vmwD8BMDg|܌%K`CDDD*/ S`Ŋ*cll,6B!DU7?x )M@pvvFÆ m6DGGQFtxUre۷*T}M8p{g9r$t[naxEsrr0w\rXXX`ѢEk#BtUF|Szt4@?J=Etʸq0yd(?ĨQZۋE߿ٳd2DEEa8p֭>}8tOԯ_VZ&STZU24 e&&e&ֿ?77o#uE& Dxzzbʔ)(W<== ͛78G#G8xzzM@8={F||<>}p߯_?޽FFFqqqRQ(3q(7(3qt9s77h uE& D#) Q%%%1!Cܼ;w=vPPJtg3d 2$ߓѾ}{/ѥKQuҙ7o%hLM8L]M&)O^SVX{n00h \~]t:&e&&e&Vߔ! ;$wŋ8pGGGtΝ;FI]!y.Bh$ĦM.T\]]*uBVqu7* Z"E` Bׯ_#22BJjRq(3q(7(3q(3 ܸ!u5ڋ& "ŋ.APfPnQfPn#? XYM/_J]v !n:K88p8[~Mii5M@!BHÆΝ}RW}hB!B'tݧQehB!BH!f{AFRRF{B%ġ܄ġ܊۔^?+Tt'u 2r2r+ߔަ ߔ>wi>HVVFs!-- ^R6335J)"VgQH]ơġ܄ġ>A < G8]vE@@ڷoԱׯ7oիWJ!P~} BDXgK] a„ V!BHٚ9W1+lԄNBLL%hLM8LM8Lʭ8X@*4M@VXt%hLM8LM8LMr央@$''8~8ݻWqHHH7 }ԩ033̙rIU&u 2r2r2r#eMg' 7ڴi333888۶m+pBXXX}hԨ6mk׮;nŊx^zwA!Bf HNN,--1}tlݺrէϟ#44>|@jj*гgOdff{w!++ ,w nڥرKrf&v|gƧ}j|igM B>} =&++ +V<==!`kk-[Ç +_|*T`ccJ*[)1?Oq<Ϛ8Y>kgM8C5RZgΜׯ-Zv8|0Ə_邮{DULx/^Tb/9;ܹs*TرKrf&v|gƧ}j|IY=۷Mq,888?8]v9^uر/////5ڱcR%t/_ *֭[J^=cXcB!ݻwѣGKX4)yvv6 zUbȑJB!(%h4mB/ KKKr¢K"B!D|gffիyB!BJ& Ũ[.aÆ|+]߿ǨQ$B!ͣ= AAAxYǓ'Oq{TT ׯGǎѦM 4wAxx8ѪU+B!B4Ny&qmtHNNFrr2>|hڴ).]ݻٳ"""rJIjr Zj333899͛ԡI&O BOOp]UT\\\pRk/_D۶mQrey8xeikB&Rʗ/ fff$Yff͚P=JIIIy1333/_2 'O4 8t%&%իBCCcX5kLߦMѣGYƍ.G#\rׯ_߳ɓ'3GGGRk߿gn[ *HXfy kҤ eW|yv 8ݝx1إK$H߿YZZ2B!u)j\rys+,+S:}D$$$Ç#-- W^2.](}dm000?.KP(```kkk3f̙3i$cRQ^|Plܸ+V4kL4? wwwp'u)jÇ022B޽Æ Û7o+S4 ׮]C&M]&QFHNN* N> {{{066ƴiӰk.gϞŭ[.u)K.R  Kr^RR,--1k,T^ۻ?-hF!b eMlfXú kKq v #6bTtN:{r9g%5cvy|=<7x{{c˖-.QTT#Gt)՞\\\w^j|7K6؀FJJJ5GY"z 66K.t)Lbʔ)A/F,_ҥV9[n!''8pK q%888 //wƄ xtٳ͚5QW`ggDGG111Xv˪V؀ZFff& *;994%%%ptt4jՁ2{;{!,, 3gDXX˭LYɓQZZj+M6M6 TƬ"c~ւ 2l2[QcVoY 2Z{-11~+?~!Ss1vfj۶^nK0vn-[ɓ'8s N>VZgB 7@o{ ItM.++NNN>߅$I"!!Ak<==] ,._l-XOH|[!+}{2Fn{$ ((--5???(j׮-ťKRgYmڴ#SN  ԩ# `uG?X<ѧO,5j$nj-Ř?[&0Vn . 6+V0ySND€xyy<ΝCII RRRмyss[-hVjA"##hPRRb荻E>#==x:-ZjAߟgeddwަ.'N:!;;%%%￱w^;x!n޼i4TŐϋ/"::\%VK?eeeALL9˭؀Xؕ+Ws˶Vӿaaaaq"꿜Q\\l=ff?ff?ff?fff\l@,|V@EE]ff?ff?ff?fff\l@,yyy:P~}sT1307130713071307bba۷j"##CYNbfancfancfancfanš6mΝ;cݺu(++S׬Y7UafancfancfancfanƥtnΜ9(((P~wC$̛7uEBBu놠 DFFؾ};&NvYx KBKa͢+)$Iرc7oĪU:u`РAXݖ ̋  !"""""aBDDDDDf̆  """""26 DDDDDd6l@l؀ٰ!"""""aBDdD[l,˨Y&rssuɐe}E+W 44NNNe]We?hNIIA||<>|hm?؀'O0k֬*Idj^O&L@FFv܉TL43W6 DDñ}v[+++3v}"((^^^FLC IDATٮ>f'cBDdӦM On޼ YuVe,#>>^y=w\Ȳ <k׆+bccV0899ϯreeejժwY 0...Y&{nu*O9_G^zppp@yys眛hXl~b7o!2dYg !a[Ν;VTXx{?Y{n̝;ӦM4iDǕw܉`8:: :fggcpqqJ u .pDD֎  8;;c֬Y8|0_Nj|СƁ0n8XcǎEdd$ C!""su?sL!)) III(,,DHHnܸs1tزe ~'t CŶmtnݺسg JU|ݻΝ;ĉXt)~gSNg}h׮N:wAjj*RSSQ~fo>|Xt)ߏ-Z ** ~-q0`֯_F5k֠AѣI{e bذah߾=ۇ]v]vEVV@ <</_FBBN8D.""ADDFyf!I8w(//M6:tPw])oܸ!$I[nٖ$I">>^y=g!IXvzAAAB$q!eLѿeرcB$ѵkW߹sGԬYS=Zkٲܹh4Z4HxzzO>yi6B1c !˲,˗/+c5ҪE$IuʘFyXrr$I۷?2d!IOnnPTbZeeeK :TfUH!"2[[[,XgϞŮ]k񁭭-”1IЬY3ܺuKCzၮ]رcW";;Æ ZFEEO>믿Wѣh۶-ڴi5>rH! ѳgO[kI0h \zwtXv!2ƌ}>|jZT*C9~-ªUpyjFDdM؀ᅬ@̚5 F٦kԨQCt-*3V^=޽ ?~<5 $Zwqqy <]YnU9v'L#Gʕ+x)6n܈AU_'De?6668z(BCC`k...OQTTdA',^{W_}$T^+oKJJڇnMD?+??_i"\]]ׯ_ѫJ{nM6D~~sǞmӧc͚5ر#޽qƽ>*ۿKIiڴ)l~:vڅٳgؼy+폈!"2={W^7oέd`ggs}}sNosN8#FZh͛… ={^uLhh(/^,jJOLL$Iѣ_QXXh{nx{{SYF3f ֮]"88Xk[5j\Jׯo߾\[ӦM1c ٳN*!"2/ڵC~~>Zn!""B}l6mp*o";Rxnn.0j(<|sA5aĈ#P~}###iiiHJJҫJ&M¶mзo_CÆ q!,_cǎAX޽1{lԪU HOOǎ;t;v,,YsaӦM:Wacc-[QF7o&Ok׮wpvvƽ{ogFzz:Ll$!99iiiZ9؀YUG#ڶm(l߾]gu IV}q:ۭjKE̙36lбcGl۶ M4Q 3g`7pvv?>+RRR'͚5Ò%Kkvރ/L\x{{#))I^hРt邋/bذa:˻w8lݺ ݺuÌ3닕+Wbx,_B .ԩS>'Y 111U>LG@qm\v _~%T*bbb,]%"7ƍѣGdff")) .Slx̆  """""26 DDDDDd6l@l؀ٰ!"""""aBDDDDDfBf$IENDB`pyzmq-16.0.2/examples/bench/msgs_sec_ratio.png000066400000000000000000001157041301503633700213750ustar00rootroot00000000000000PNG  IHDR XvpsBIT|d pHYsaa?i IDATxy| ?לD!B,'AjIܒrlQRZbVZPT{ۢrjJ-E{XZIU$$S9Yf3s|<sfy+Ӛwf>3("S"""""*:؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀b؀Q___˰Ht:8p@R%"gȁt:/WWWTP۷Ǻu uݹ}EEE>!t1bD_~y`󈢈 6{R Q|y7?~}yyF|Fm߾}VXN{t:l޼屘Qn]?wܱwVD7W "K ""WbӦM0… e7"""{/ hР箮X~=>3,Y?puuEFF$''_Ǟ={PLt>>>>vڅI&a/e˖Qe߳|||r]')+?'ӛ7o^GΝ-imڴ<|?.]رcq}l HDCQYM?|"ko7--Ml޼(8yd"""DAݻ +VZƍN{! [|!kNAܹd3DAĪUݓ9,oooW$~Kw^Q1222ҥK={4Oާ(y333^{MA>|]'qKD[4EhРpI$$$@ӡm۶9.ӠA޽{yȑ8z(^{5|G6ҥ ThϢ!bh}0U6mڄeZ|₹so߾YsAbʔ)Q/???,Xjfs!44*T t/@&MRJI&/!b볥M6לHԬYŋG͚51c A!$$[ӦMQdI+Wo0}tx]6fϞOh_ b>frF#>|={\t:F8~8 ,,,ױ6lNÄ l{???[ɓ'ѫW/TXŋǭcD?HcDQDݺu{իøpBCC /亾o?bbbrC ~/oʕ+ѬY3\?`ҤI(^xۘ9s&֭[o/{tӧt:6l؀ӧLjZhժ1j(o`ȑ#!6mڄc6sV?E{Ǝ;/bx ֬Yg溾/;v@hh(:vǏc8}4Ξ= 777ez3gΠO>(Vbcc'Ob֭gϞW -۴i+nݺge0gǺu|r+V/[ `Vu/^DϞ=QT)C[lA>}PԨQ'Na˖-8t]u㖈IK/Dd-[1~gQY܂aL(8PAܳgOۋA+T ^~<ٷDDGGׯ_u:;?߶m7nX݂SMtk̠rʢN:, bffyŊ+ӧOӳIq޼yV[v(جY3155<ѣGb&MDAĵkZoС6kݺ>|ע bv,jKNNEAĐen*T^jEAuYm[~brryzZZ, Y@}+Q=+33SXꫯܧvju V ŋ߷ڵk bVl&MĿⳇ^^^x.\( `Uo~["?ނE`?(522fB͗'N5jz*U`A xf͚Y|6eaϞ=u/}Q!r@QQQ={6Ν]vEXf -Zd#GDbb"6nhzj?@nݐ%KؼE%'#F@bb"bccql߾}^s{P4iK*99YjyJJJVXӧO͛իem `t]\ڒ}n+gtRhxqKDҰ!Ҹ˗O|MΟ?k֬Aݺun:/1bp] ><_?Yܗ#GX}v1Z4Sٿ߭OMMŅ lnѣG*"g5ĉHMMEF싋y;dԀ[ @.p|4<7=z􀗗V^m~HO?/_FQZBxqKD= DN`x%7oĀAAAXfMkԨw<:uw}:7nDhh(v؁5k^C5]v͛Ö-[PB|[maaaزe ֯_t 667oD~f̴i0dh}E%O?ѣGx饗p ?Э[7<}6mBFp%/nݺGhh(\]]e\~]v5GrnݺV4O< E{FŊqTX1ۻr wɒ%W_O>xWЧOT^qqqQreH-q[xm HKD9lyD^/>xgt:rtǏu:g׋]v+U$k0Ĵ4#,{_o;DQ/BlܸŒ%K7Yr/+WG-޿_lӦ}HKKg͚%8c b^ܗ)O<}jժ%Ξ=[|Iˉ'vډN3י>SNN>yo- 9ΓS޳gOB -wܑ\S6K^qKD n޽h׮ UV]$.\@˖-c Q$M9pڴiӧcܹZW6mpA>97-ZݻwqB_w֭qa\r5k,4ttB_5n8+ׯ(ݻ7.]vI?cƌ{Vs;v,wĉ'pAtԉ͇œ%"y8HΞ=-[ >>;v@Ѹqc˒CذaN>Ǐ^zjpz-\t -[ݻm۶޽{;vl=}I+W.`/Yo_ WWW̞=POys㖈aBQNBDDz=z+V]R  pX=z#99%KD@@>_:) >cܺu 5\g㖈l"""""R ǀё#GбcG-[Y&}]g%NΝ;KXjʗ/#** UD"""""űO777=s,笙]?5kJ5cMc 缍 t;ٳgn1ȑ#oook swSm=s,笙]?5kJ5cMc dǎ֭o ǏGTTq)Z^GPP[gd0uVf&sܤcf07ixVpb']tۡ0h k9 {Z5DDDDDEςSNԩe ]vUaf07阙>>HOO9t:ٸq.As?Aj(С%h3Ian46 Ye˖ϟ]Ҭ%Kb̙HIIATTe׭[`xzzBףA7o^se֭*V777TX;wƮ],_|&MJB&M_BEt:֭[8p *V^ƍ#&&b]vAaذa6kLOOGQB<}T~ҴcǢVZXl]eM0\z!"{=tAyDD:usΘ6mBCCq-[b7xƍCbb"F7|;v,x OJJBVp > ѿ,Z<_ǎQV-_z6l؀$ :ŊXKf&sHq"98W(93Aի(6lA{e1LC kO `0 swvAԩ#V߹sk׊ ͚5SSS=z$6iDA\v~ b7 *nnn-Z$ .YĪ`Ezj+w%h3IanɂD}x!..y@BBaT*Mݺ^_tVnݺhٲ%9e˖LOOB3]1b+]aaa}6|||_~%_ut ۷oݻѾ}\m߾=F#>͛[|?UV O?d}6*VhG}iӦ!22fFZ7n`6 (0Vo|2|}}m64o?>[NMMyرcǐj^˗/?o>@F> Ǐf,[ :FQ^v an13y) 9YfL2;w.=zdy;2"""h &@E ><?~ۧݻgɓ'驩xw322o['ƍ裏PX1 0j0)SÑ#GСCx{{{(g|ctL&3x 9ebƌ:uϛ7oSb…@hh(z=v܉ . 88SL^}UPNիWǃpAaʕLM-[~z{y&gd.׿#G ((*Xp!|}})Q> &[?#3Ian4^!3a@2ϟ믿6߮5w\pu֗Ϟ=۷oG- ,Mmuu"&&K.Er|r,_ʕҥK\~gcժUX IDATf jժkbҤI9֕}ťjժ֭}""""^!M3777\~=틾}Z=;wFΝO3cƌJ*a͚59y$m]yJxVB233駟͍_C]03ytLFJcBD8(8wƌʕ+]SYp%h3Ian4ނED݋(.]Çǂ .|wj9L&3؀9ƺ;UC.AsR:""""`&ݸqQQQpssÎ;l6бcGرbںu OOOz4hCzz>>>ŃUDŋ-MHHNC۶msApssý{d59 ]03ytLFJcBj*dddwި_~󺹹ԩSjժW\_ҥK?+Uoݺ5i...9r$qFzjcԨQ[DDDD[.FũVre$$$^&%%erʙ{)Sדl#Gܹs|rs|rcСSbb43Ian46 E!D`ݻ? e<==III6'%%Y̗-99rJ*0 شi\w"!!aaa&kذaغueh 3Ian4>4iС(V6n܈K.:o!"F<._ ___CY_9ڷoQFV0]Xl7{EFF]03ytLFJcBH}ӟ%KHKKÅ ,ނn˩S "㓴H{.AscȳQ!mWaf07阙<̍1 vRfMԬYbhLٳزe ct7V,*DK.Uaf07阙<̍DAt9>iFԩS^G^b ;UFDDDMl@ 6]vQP>ݸqCs""l ܺ&?"r0.0~<Ф pQn؀($::˗GϞ=. `0]03y[ݹ\ԮmN2[ÇjW tcBNСCj9L&3,.xкgf`E``MO:unT 5Rr .Taf07阙<-gFiLFPt%24 9|HK3ݚ5e 7m78vf̀Ck!GJan13yjf~,X?۲mSnZ͍ 9{Lͫ2WW`d~jz!aBDDDm>UP/c<ԫgz|oVڕ6 ,Xv an13y?FӉxʹ, Я鑽#,v!v an13y?3p̼LcB5{xplDt B\\.Hun|q#Ыը'=?CF `2xVpBDDDk^-I[]݁ ۵ ծH:6 DDD䰌FaC\9+q u뚚+-[L_,%l@)$W@13ytLf:dÇ @A׮[_Qɍr Æ Saf07阙~ٱ!v an13ybn4$)FJcBN!&&F4ܤcf܌Fz-_3+ ̍&(]#((qqq T"""E2PjWB3RR'9(`BDDD;pl6 DΏ h__+!"{cBNaСj9L&3f4~u s#!Jan13yRn =[ۯRf؀١CХKxyy kFTTe90Kf&sSr۷=$`)J&FJsUgbРA۷/֬Y2eڵksڥ9 SZUJH l@~èQ0zh,YWo߾vُ 8~(N?D%z)Pb?;v }J~z111s1___4l'NZI+ vҼys;v /^DݺuW⣏>¤I,aGMDDE/k۶]] Q|xN6lxPW9pq^yEJHIl@w;wZL߱c_V&g%X3I) @@҅=07Rb'۷G׮]CE ݺu@ Y5.Asw̔Hil@) 6L4ܤcf8[nǏ+gL)̍Bdd%h3Iq23ۀ8[fJan46  阙 ,[v%D<|/^Dffڥi֝;KAD`O>?z(U֭[*VGDD]{ijD@؀4iӦrϡ{Xv@@ jWBD 7o~>|Ǹq0{lٳG )/A4ܤcf8KnFr_9KfJcn46 >} !"ڵkfyƍSaf07阙<ΐۍ/gL ̍@jp!m&աCKf&s t@l2Ss#YfiӦXt)oWieʨ] 9W ppqqѣG1c L4ٹsЫW/#""Q45 ] 9^PX1̞=wƜ9sO_i&LUQys_V>>>~.233)HÌF R"rTl@|WVolݺ {="""hB""[ g 2kܸqؽ{75k۷o]"СCj9L&3G]-f) H +V]a…j9L&3G=jjBj@#`n46 yvN4ܤcfh17(Wh@k13GHiC3(ٳ9s&..Asʕ+*U]ow6L&3Gk^ ZQ07R PmܸӧOǵkj׮sO> !00Pmԃ_EGC] }|xu >@ŊG~~]&CڿE a`ѣ6nk.z9s!""ʁԬ x{] i_n|󆇇[ܖE)!!A4ܤcfh)jRf؀4*55gSNUaf07阙{(ETrz匙ܤcfh!7hR-d战)MEQT=xcƌ͛SA@ffuf)_u ,/^SL CYLСCj۷]cʔ)NJ<b?eOvP瑘(HLLtX8U.GaWȮǕHLLt c>EÆ 1qD4 ,, 6Ĵi쾭~}f)`0`֭j)L&3Ǒs^x=3Gjș92& ނd̙3%K`0TRV{#>ol޼5j>`"##.AsMծHY<_+"{dcãҷ?ݝ}#!l KMdM:(f?gq%8Jt槂dF0 A@fuDa $$Iw羮\;NBTS?_׊#..xWQVVhqXx1TH/V|m@<}q̙9sG֪ 3dWBDFU+\Վan131z"17ҚϞ /pˬf3"##ѻwoOʨ1ee 3cfb {ˮ:fw̍泓ЯU ʕ+k\%8n W] <^?^[oSN'Oo8?`ٲ%"";}^~EDK8uꔖ DEݻˮ ܹ~G]m6`SK0f&17:Pq/_M13#`n5 ֽދg}K,AII |f͚k.tQftSN]031M=f&Fo۶+ef̍Ub̘1غu+ ((o+WDdd$222cz@DDz7=@β!kI"##i&]7nDAA1d9y$ɕaM:ɮ /f3FѣG.HwVW7o/QqyL%3cfbŋ;&뙞23FZcqeb]""==]v 07=i`{:̌8 @JJJmꫯpe<3g&""ң'8r`x857 +`̘1hҤ;ôio.Hs]Z!//OvDDDRlxt"26 7qy̛7m۶] bҤIK0f&%7Z]I07dµm6N> ȐUe`8L sSKnV+0l*F/ s#q,=Pm~~~hٲ%Ə.]hRWU ""=9q>~g@|K ""ҝCH- \OFyyRZ݁FdWBDބ /lق",, -Z@XX [.`ͲK0f&MQ(dgfT̍1l0aԩx饗0m4=zÆ eH7o an131s;|3V";3bn5NBc2[eee1bn:IMJKK" CafbzL-y8V*33*g@83ϸ43f`۶m*/N3#;78 ?3bn56 p#00`4H# "`[oł j|n…޽ɳgp," 6 fΜ+Vo߾HMMŧ~T 09fΜ)DErr an1312sZ`i%XHk!D,]xǝ[h>hժ an1312sZM XHk> VEE6l؀Ν;Eطo N:d2iV WU ""l6Ǎxxyπ7|tYvIDDD ry1hٲ%eBDD$OٕLn..p3#+7$tXHkl@D1k,>Hf̘!afbzL**3Wkbi'˰jNjUM]WPan131ZaqPMGqan5Z/"N:UsOƋ/qEDDDڰZs@S'N'O$y+?]"--Mv 07-r;u cM s#l?bɒ% Xz5,Y{C;Ke`8L sS"Cx[icM s#EQd!CJJJvGRRkFBB8!}xbǻ&00ڵCHH Ǝ ???DFFbtR DD{.]] xxÖ.]X?p+!"_:8}4뽯ӧO+\qH~Pٕ/b-[` C-Aa֭|GѥKvcTL%3cfb(\egg#11.̙slCbb"rss]"99e[ii)yf4iRڒ<>f͚C˿txW->ӽ}}{}?|}^>}{<E=0}j!u8 QVVu!88ع #F@PP֭[W}x:/ʕ+q !sߜDDD @`jk3 pyg\ƌ3m6UٳgkaÆΏe˖QQQη@DDT'V+0p "wB`xl׬Y3_W^ƍvZ4jԨ^5UY l̚%"el@zX`j-\ݻwW@ <ŋb`РAµs:/0sLX}Ejj*>Sb1sL|;{HM昙3ܬV "907πHLLҥKܹE裏j\Aŋxb\ |UcfbzL'rZ?/ןcM s#qkv۷F\\fNqU""r * xeZy1ܹ2nvGD$6 6mBϞ=M6zN'""#ZF]eWBDg!C`ӧ rךL&K6oތ.P3ܬV`P@+5DZ&|Z˭Vj͛_*131M=f&Ɲ;vnٝnqan5NBNjWZZe 3cfbܙڵQ@n.]ǚϋOݝwމۿ?=/N3ܬVyscGR8076l@QQQaÆ DDDFVc+ H-?DDdX.\~g+V`ɘ4io&O1qDL<={\-&99Yv 07wq#(рpan5]+''| LZznݺIVZ.p3ܬVm[ukN807W۶mC߾}Uݺv-] wZkv%?99Y+!"gyCʭZl@~"!!00ͰX,GҷDžnan131jt6uCA&؀Xd ~aeee2e &MHt?3f̐]031M=f&]XHkl@[HNNFjj*GAZZۇHj3|%3cfb۱cCՀpan56 ݋aÆ9䭪4jgo!<.!3cfb;즂 cM s#`2łP9s\&MpAg={ ʮ۷DZcz+>s_|7"26 7O?]tފO 6sΕ]031M=f&>8<{ ǚFZk|2bҥKqe<#xG$WH)--]031M=f&>Y0p 251̍fRE])✠IHHH@VVzGDDix-[dWB}xV+WDndADDT'vc,_ç/*))+ƍ׿5YYYx'wqy:""2~ـ~?ܹ38_}O?ya߾}˥Z.p3#  XHk>ۀ̞=xWn:,\?8t~i.j1yd%3cfbDsZo\pan5j^SO=֡C :_0|ՑZ)))K0f&ĈVUl̘zcM s#;?ݷo_xX zL sS-;(*kbig??@W.$""zZ0woٕݘ^.MHUU`޽^ ""ZAٕݘϞz{v~em&RMZZ an131js|ؼw/8D17ҚϞye@n)S.P36;2n@807ҚIQEv䐝dee/""R%%xmM>}}gx+ XСl>HkJJ}+"26 DDDe PY yD%3cfbfM:y XHkl@+L:Uv 075Y& 251̍Ut*Z@t4p!T'eWCxV<BDDdPVl>H؀!""2ǁ؀!gN IDAT0i$%3cfbjmz!(807 Ç]031M=f&ܬVG]ɁcM s#q, DDTn <벫!-<^?!""2Cs@8 XqJcB^aͲK0f&,76 "B cM s#!0o<%3cfbn8VWqan56 sN >111@hh(zE.+-[Lv 07gp,pan5?x/K.2e bbbPRRtOٳg1k,%z%3cfbn h\pan56 2x` eWBD$`i,**Jv DDdP6a/""cca HKKÚ5kO.$''.p3SSn;wl@ncM s#,{GpBbK/iӦIjJv 07rZPTǚFZ3).›?~Νŋ_"55gFJJJfgg#!!YYYիx\FnDkK<,&&zСCow:u_3zh$&&|.[n}>4mHLLD~~9s`ܹ.򐘘\NӖ"11M1iҤj%%%}}}}} >0߇otXll,zӧW3 1qDl۶ }uy5Ȗ- !]<^?ئM`Xо}{٥xWj07sZHGIǚFZc!>(f͚+Wb˖-/Ca…xꩧ-D2c %3cfbj ,9ǚFZ*X4DQQ"""УG,]oeuϟ/afbzL̵[& XHkl@plʛg@$*-Ǝ |o`BDD$IU lk'"""$t K0f&j( ] |лM=f&؀WHOO]031M=fV߱zL s#qpRox]ُW^yFv5Dg@4 /i!""""lL&`2ɮH{l@4wcݾ}%KEvEDDr!0i$%3cfɓ-[:. kzL s#!0|p%3 QYDF|=7L s#q, DDޥ9ص1sgQ}xx't"""ہv6DDW!""r3Exq>n]vEDD9 6o,afbz٫o ,X{>|1bfbi yy.p榞eᇎ??rsf&8 ]G8I\ii)BBBda(L sSϗ2k`?`ѢhЗrsf& ũ3̾q#w9܉an56 DDDtp@׮ex!"!6 DDDp.瑑U@h슈 yd%3̊g>JJkFܷooSFZIb Z]031M=oͬ~`>`& 6ֽ"""^~xu` K0f&M-_<䓎Oܴ07 3f̐]031M=olFw~W=%7-131̍e`8L sS2۽;> ian56 zL sS?Xfρmsan56 DDD7p.W_+""2>.KDDT2 18{غhLvEDDށg@+̝;Wv 07@v6z5б517٘FZ K0f&g:XSrf&L( l$$$ ++ z]O߁{2Ev5D7<^?^EDDw4/惈S؀x7| &m۶@TTF-[.jz5?0{jYhΜ9d?%KPVVCb:K0f&gvx;xd]1rf&؀x~z<#4hƎr ^z%yɓ'.p榞3ۿnW/EvEzMFZc!M4-00qqq8q℄[JJ an93g7lK 8XvEW97bfbi .^K:\B=f&̊Q `ZaCknz07G}eee5kR|ZE_G}j%"""3 y7DϞ=oѣG#11tyݺu되X}QlFbbbfs̩vԼ<$&&"77e{jj*]"117ovٞI&U-))Cشi3&Mr4O?}x߇GzzX,66=zӫTRRRRɤ7}]VV@Ҩ2{.p榞2{)E1OdWrszg@<^p~]Ζ]031M==ec]=SnF07ҚIQEvo̙ٳg_HHH@VV'ٲe 3""2*'{믿9s`ȑ}v'2""=˲!"ml@;4}圈ȗ.=Gǎ倿슈+Դ3ԓY~.硡@X2qs( edb8k5!0uT%3ԓYi)0v,pu+p-Pok;~8>9ߩM"2(mڢmT[F:?oB%@v±FZ*X:Uꯪ1cz`wo;t,GFN߂@K Fu$j#Gpapc)f111ڜFŢqHc8Qnk3 DD5󟁵k+|xs%ϐG7cɽKpO{|m-nU*xZcXe׆1 J6=y36 DD5RR+]ȑPv_~ |{[^{} ~ElT,aX.ȅgN::AkmT[4 k kjBff&^e 3*w^|q8_kE"w2r2:T٫0`=:C7z"#нiwtoڽsvŎSNU;{r!|s)>|m%]\uϱ W]|q\l@+JL sSOV`Tǝνڿ rá5lcn_]Gfuޗ'23h-#ZbPA՞/(£.sO_8GQZY|mι&?iŭו5NBNj""RoWƌ-,rH299XJ+Kq[ې Fvn( ΖuiLv^u8~hݠu˺\3,πaXno_|Yòaž((F=ܠ0>~ СaL3IIڤ d2apC4 nͫ']=ҪA+Mflvˣ]WۦmMQ㗵 ^ yI&aŲ0f&._vh `feKZW4[oEFNY3g& 0УiMH՗;F&s&3,& ,fKj^{ٯ,Xl@+ >\v 07ܙ7]m׺( {{>ʼnhN@R|e?zLKu]Zw}G}45n3al@ꉫ`WU "1EO/w!m)gv"#'98Zxނ@R| xx WO6ZO 8e0`i "cbC#? EX@BCP_wse)"2?t`pl]oؗycϹ= }CT{'-K$"bCy 6DIe J*KP\Q 8Zx\篽̈́Tkjnܤ~[_k87oތJF07_';>^|э霌vgӱNី{ʰW0pjZZHkl@thB )+vU9jMʕK*JP\yݟ+qtm%%(*MZm7k|kt,67/O^}2ƍF}0\zjx| qcܠ0({wϧFZ$t&݆Z綛5>= nڤcm-#Z"A b"b0VRii)BBBda8M= ڴV 4'K|rdd`-bTQHO˜ci b <0nwҥ!M MKz:+,eDKgS2%b">4ATP6)F sSO$gg=4VpX;Wr 9xt#,f F%.=AD`[ Hkl@H*?"- mmv~./lj8^tXd=N]:Ҥ\mRkN|M w%%֭@Ʋ+2 eE{']:߇ %WHD y ق<9om8S|jsr8N\r<+؇o|SN؝_r91v[lRܠx`>`F 6VvESt+rW #'C 184eGGDr!W_}Yh-n*{ILʕeﹽ78]|ڥI umNĠek&+^]3Sᇁo StLX+(,gX˶3  wÛ{Z}ϧFZcB^UVnۗyY֍T*Iyι|}kt V.1^| sS͚,Y|_y(-299XJ+Kq[wXϧFZ*X:UKOx&ʶ3g\6'Wd?6 x5੧dW_ ;~ZVot ǏGۨK$z<^?!?Z5huUӎ?#VX3g\A`ueQEQPeBU*k>e{m]vh߰=.˗=<T*a=bEFNҸ $#)> qdHD %(֑: _SaK\.rJ\DEް9yh@(슽ƃkݽJq~T4׮|殿N:!q<ǣkoXX~/oi0q"8A6 mDFN> СaL3IIڤ!N:.#,hm"5.䥓.w]iRNg!37JϹ~Q>Fl2?~-0s˶+q sӯ6b!\r`5(,/sΎq<8V Zl2{.l@b"pffc_ay㮟w\9nSzMAR|z6,ț-$FZcB^aƌ/e!M_ FU8Ytٜ:U<$tܟXS.r6$9rٟK8VIҸ!i8[8-#ZeM?ǏF9r@`ڴ_h0 c_ǃVCtE o6bۛ|b[ 8 ]G8I\^^WPknxqu4$9ɞs{PZY 3%^4Wt^w8n4mЬܨ d/؏e&ЪA+tFqΔgT=f& ũ3SL&sQF9;u-9O}mQAQΦ˹6{k3++g`5vqKyEE9:8ou^Uan56 DD2ƍunm8|s9z|+}T+B]mH9s-b-X@o:_vڼqWpj߰=𛮿A\tFtpW""r'6 DD:`1[!:DwunU.sLGx7]MÚV;[8 z;(>_~ dfɮȱxÍ./w.&";w`J)3gDD؀W;w.ye 3untnw]wnU`~s?9WJo*2KCҵIWtiE̝;UU-ƎkŮqdLx̙UbTQ.LjW533Hkl@+.p13t|TUڞ*`R??o9]tE&]$gUuc7霟кAkK\չqgc`-6IDATxe˖R\ (,/%SW&Ѯa;E.\28.ƟQOcfbi`WU wQ?l6qw{<w쪪2]ּۜKQ9 #rPc)&EXY<"rWDVģAU-AMTi)+)V׸ْ׵oQ*Sm"DTo<^?&֡LWF< ]덶_Y >Qǵ[,7?hۯ6#W>ng꿟l >_=8€\ ɰ%8p3B#+x!_ʀZ멪&LynǕ4/ȅ#λڇ;_4ČaC͛P^mZ?wplk|m&/tof6_c  9&9x;ܿ$I<7l_ E`y =FE\t咩[Bo%SDDT7l@txFO|4jHvJnA3bl/(-pY+\osըK\V*),3g~idYX3tZ܆uFld,-o=NL s#!0yd|嗲0f&sƠփ0 gK:+1Y{p-._ҁCŐ6C\.nǚf&؀WHII]031̭fMBIl ܦ( CĈ;F)8cfbi yB3d2Yx34Lv)ı3Hkf`BDDDDDaB^!--Mv 073Hkl@+dgg.p3cfbiͤ(2LFBB8!HxV<BDDDDDaBDDDDDaBDDDDDaB^!11Qv 073Hkl@+L:Uv 073Hk\KGxx44Bff an131M=f&؀xPqq1f̘Çq0xdΝ+afbzL sSan56 Ewd2I;5nXv 073Hk~ fmڴ x$WDDDDD$πhDnuO.{._筙ccMsqi5k /O1ǚ58pDZ&cjKtw^쿰ٷ5ޛ'sw][3?ǚVXXjkrڕ㴲2ߛx'tI&HII?_k>#L8QʈHKw2 g@tdĈXt)ڴi`uq1Bv)DG5jNH  C$t""""" πxؚ5kPRRK.rrr|rwKȧpرcwAd‘#GЪU+i is@ f׮]HHH@xx8 .Ix b`ղ1#G`̘1Fxx8Ç.K׊Я_?4lѣ֬Y#,C?>f3֭[' CXXd%fìYТE DDDw޲Kҵ~9ٌNvi;"$$e+l@ faܸqxGp%?~tk׮׿N:d2.1~xڵ+dk裏py`o~#,8w}]mVv)`2K.ҥKxWed3gđ#Gg!--MvIֳgOt>4m]b„ x뭷PZZٳg#)) /_]n1۷?0`8uv-2}2e ٥Fn"44>}:dkh׮nߟsT_g} s+)**¢EpB4hн{wU˒%K0ag^-N8@=={Vre@ك]:l6ѹsgH|-[/ CGpp0~i,[Lv9c@vYQTT+W]uXbl6.].] &&Fvi@-,, %%%*"_p1s)_`ذalƓO>yךfL6w1UAMz\TD$4P(iC@RC 84LM4k2[LJl BwJ&(ƙ{}m?||~8sn][[,ܻwr?X V !l}:jjjcOW_O2m6$$$W_EEEݍ`477L&"##q磦 .""IDD@ !ٳgewwz>$$D+իWBZBȬ,833S !d^^Y@)jd2IN'XUUB`[[[!C՘>}4Lfmcccȑ#-{=vm255U*"RQy56zh?B>s]L&9e9~x5V]]-믿Vc7oޔr֭j,77W !1 RȔxWWqqqjNk+""w@=Q__~mv {{{̞=[ !0n8ܸq8cN`TUU.]$$$h4gΜ9_b#&&~)L<&M2ېRcYf^PťK  v(Xbc8qF#,Yb.aaa.777Kؾ};كFs#"LX z HOO%a̎h]j#Fpmի`Yl{Znu+zͫ'`5k~ŋzԩSGvv6jrJtvvyDDMDDorrr>̢HyV{ՇۚzD?M-"\\\YYYx7z֭[SO}؃ŋ}aԩ}6=zϤxzzСC+W ?QPPD F,@جYo`˖-uuuz+**zGo~T}Z[[QSSK&Lܹs8'<<999hnn/Ƌ!@XXXsPaL& #GT9::bŊCmm-d?Hh4\rQQQO<6OOOȑ#8|_HD4(!">hkkĉոbbbN4 V_(b-n0e˖޽{̄#69s0|,]Ԅ^ǺuPTT(|xQYY]v!11^^^} 汈ddd`СGcc#>l6118{, ~~~{bŰF-[>˗gggܹs?F_111hhh0[g" "~fnɓ~)HKKhDxx8***0fr&.q$$$SNEQQƎ ř3g$~~~Xpc0...EZZ}tvvbܸqErrry`0 zb0j(̘1 ,· -- W9s&RSSO>?@u+NNî]`0 mۆ 6j^DD8LDD4ܽ{Xv&$"; DDԸy&._?k׮DDkxqAt:܂EDDDDD6; DDDDDd3,@fXͰ!"""""aBDDDDD6l """"" DDDDDd3h2B&uIENDB`pyzmq-16.0.2/examples/bench/plot_latency.py000066400000000000000000000042651301503633700207340ustar00rootroot00000000000000"""Plot latency data from messaging benchmarks. To generate the data for each library, I started the server and then did the following for each client:: from xmlrpc_client import client for i in range(9): s = '0'*10**i print s %timeit client.echo(s) """ from matplotlib.pylab import * rawdata = """# Data in milliseconds Bytes JSONRPC PYRO XMLRPC pyzmq_copy pyzmq_nocopy 1 2.15 0.186 2.07 0.111 0.136 10 2.49 0.187 1.87 0.115 0.137 100 2.5 0.189 1.9 0.126 0.138 1000 2.54 0.196 1.91 0.129 0.141 10000 2.91 0.271 2.77 0.204 0.197 100000 6.65 1.44 9.17 0.961 0.546 1000000 50.2 15.8 81.5 8.39 2.25 10000000 491 159 816 91.7 25.2 100000000 5010 1560 8300 893 248 """ with open('latency.csv','w') as f: f.writelines(rawdata) data = csv2rec('latency.csv',delimiter='\t') loglog(data.bytes, data.xmlrpc*1000, label='XMLRPC') loglog(data.bytes, data.jsonrpc*1000, label='JSONRPC') loglog(data.bytes, data.pyro*1000, label='Pyro') loglog(data.bytes, data.pyzmq_nocopy*1000, label='PyZMQ') loglog(data.bytes, len(data.bytes)*[60], label='Ping') legend(loc=2) title('Latency') xlabel('Number of bytes') ylabel('Round trip latency ($\mu s$)') grid(True) show() savefig('latency.png') clf() semilogx(data.bytes, 1000/data.xmlrpc, label='XMLRPC') semilogx(data.bytes, 1000/data.jsonrpc, label='JSONRPC') semilogx(data.bytes, 1000/data.pyro, label='Pyro') semilogx(data.bytes, 1000/data.pyzmq_nocopy, label='PyZMQ') legend(loc=1) xlabel('Number of bytes') ylabel('Message/s') title('Message Throughput') grid(True) show() savefig('msgs_sec.png') clf() loglog(data.bytes, 1000/data.xmlrpc, label='XMLRPC') loglog(data.bytes, 1000/data.jsonrpc, label='JSONRPC') loglog(data.bytes, 1000/data.pyro, label='Pyro') loglog(data.bytes, 1000/data.pyzmq_nocopy, label='PyZMQ') legend(loc=3) xlabel('Number of bytes') ylabel('Message/s') title('Message Throughput') grid(True) show() savefig('msgs_sec_log.png') clf() semilogx(data.bytes, data.pyro/data.pyzmq_nocopy, label="No-copy") semilogx(data.bytes, data.pyro/data.pyzmq_copy, label="Copy") xlabel('Number of bytes') ylabel('Ratio throughputs') title('PyZMQ Throughput/Pyro Throughput') grid(True) legend(loc=2) show() savefig('msgs_sec_ratio.png') pyzmq-16.0.2/examples/bench/pyro_client.py000066400000000000000000000001241301503633700205540ustar00rootroot00000000000000import Pyro.core client = Pyro.core.getProxyForURI("PYROLOC://localhost:7766/echo")pyzmq-16.0.2/examples/bench/pyro_server.py000066400000000000000000000004401301503633700206050ustar00rootroot00000000000000import Pyro.core class Echo(Pyro.core.ObjBase): def __init__(self): Pyro.core.ObjBase.__init__(self) def echo(self, x): return x Pyro.core.initServer() daemon=Pyro.core.Daemon() uri=daemon.connect(Echo(),"echo") daemon.requestLoop() pyzmq-16.0.2/examples/bench/pyzmq_client.py000066400000000000000000000003671301503633700207540ustar00rootroot00000000000000import zmq c = zmq.Context() s = c.socket(zmq.REQ) s.connect('tcp://127.0.0.1:10001') def echo(msg): s.send(msg, copy=False) msg2 = s.recv(copy=False) return msg2 class Client(object): pass client = Client() client.echo = echo pyzmq-16.0.2/examples/bench/pyzmq_server.py000066400000000000000000000002171301503633700207760ustar00rootroot00000000000000import zmq c = zmq.Context() s = c.socket(zmq.REP) s.bind('tcp://127.0.0.1:10001') while True: msg = s.recv(copy=False) s.send(msg) pyzmq-16.0.2/examples/bench/xmlrpc_client.py000066400000000000000000000002011301503633700210640ustar00rootroot00000000000000from timeit import default_timer as timer from xmlrpclib import ServerProxy client = ServerProxy('http://localhost:10002') pyzmq-16.0.2/examples/bench/xmlrpc_server.py000066400000000000000000000002641301503633700211250ustar00rootroot00000000000000from SimpleXMLRPCServer import SimpleXMLRPCServer def echo(x): return x server = SimpleXMLRPCServer(('localhost',10002)) server.register_function(echo) server.serve_forever()pyzmq-16.0.2/examples/chat/000077500000000000000000000000001301503633700155165ustar00rootroot00000000000000pyzmq-16.0.2/examples/chat/display.py000066400000000000000000000024411301503633700175360ustar00rootroot00000000000000"""The display part of a simply two process chat app.""" # # Copyright (c) 2010 Andrew Gwozdziewycz # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . import zmq def main(addrs): context = zmq.Context() socket = context.socket(zmq.SUB) socket.setsockopt(zmq.SUBSCRIBE, "") for addr in addrs: print "Connecting to: ", addr socket.connect(addr) while True: msg = socket.recv_pyobj() print "%s: %s" % (msg[1], msg[0]) if __name__ == '__main__': import sys if len(sys.argv) < 2: print "usage: display.py

    [,
    ...]" raise SystemExit main(sys.argv[1:]) pyzmq-16.0.2/examples/chat/prompt.py000066400000000000000000000022631301503633700174140ustar00rootroot00000000000000"""The prompt part of a simply two process chat app.""" # # Copyright (c) 2010 Andrew Gwozdziewycz # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . import zmq def main(addr, who): ctx = zmq.Context() socket = ctx.socket(zmq.PUB) socket.bind(addr) while True: msg = raw_input("%s> " % who) socket.send_pyobj((msg, who)) if __name__ == '__main__': import sys if len(sys.argv) != 3: print "usage: prompt.py
    " raise SystemExit main(sys.argv[1], sys.argv[2]) pyzmq-16.0.2/examples/device/000077500000000000000000000000001301503633700160365ustar00rootroot00000000000000pyzmq-16.0.2/examples/device/device.py000066400000000000000000000031361301503633700176520ustar00rootroot00000000000000"""Demonstrate usin zmq.proxy device for message relay""" # This example is places in the Public Domain # It may also be used under the Creative Commons CC-0 License, (C) PyZMQ Developers from __future__ import print_function from threading import Thread import time import zmq MSGS = 10 PRODUCERS = 2 def produce(url, ident): """Produce messages""" ctx = zmq.Context.instance() s = ctx.socket(zmq.PUSH) s.connect(url) print("Producing %s" % ident) for i in range(MSGS): s.send((u'%s: %i' % (ident, time.time())).encode('utf8')) time.sleep(1) print("Producer %s done" % ident) s.close() def consume(url): """Consume messages""" ctx = zmq.Context.instance() s = ctx.socket(zmq.PULL) s.connect(url) print("Consuming") for i in range(MSGS * PRODUCERS): msg = s.recv() print(msg.decode('ascii')) print("Consumer done") s.close() def proxy(in_url, out_url): ctx = zmq.Context.instance() in_s = ctx.socket(zmq.PULL) in_s.bind(in_url) out_s = ctx.socket(zmq.PUSH) out_s.bind(out_url) try: zmq.proxy(in_s, out_s) except zmq.ContextTerminated: print("proxy terminated") in_s.close() out_s.close() in_url = 'tcp://127.0.0.1:5555' out_url = 'tcp://127.0.0.1:5556' consumer = Thread(target=consume, args=(out_url,)) proxy_thread = Thread(target=proxy, args=(in_url, out_url)) producers = [ Thread(target=produce, args=(in_url, i)) for i in range(PRODUCERS) ] consumer.start() proxy_thread.start() [ p.start() for p in producers ] consumer.join() zmq.Context.instance().term() pyzmq-16.0.2/examples/eventloop/000077500000000000000000000000001301503633700166125ustar00rootroot00000000000000pyzmq-16.0.2/examples/eventloop/asyncweb.py000066400000000000000000000047121301503633700210030ustar00rootroot00000000000000"""Async web request example with tornado. Requests to localhost:8888 will be relayed via 0MQ to a slow responder, who will take 1-5 seconds to respond. The tornado app will remain responsive duriung this time, and when the worker replies, the web request will finish. A '.' is printed every 100ms to demonstrate that the zmq request is not blocking the event loop. """ import sys import random import threading import time import zmq from zmq.eventloop import ioloop, zmqstream """ ioloop.install() must be called prior to instantiating *any* tornado objects, and ideally before importing anything from tornado, just to be safe. install() sets the singleton instance of tornado.ioloop.IOLoop with zmq's IOLoop. If this is not done properly, multiple IOLoop instances may be created, which will have the effect of some subset of handlers never being called, because only one loop will be running. """ ioloop.install() import tornado from tornado import web def slow_responder(): """thread for slowly responding to replies.""" ctx = zmq.Context.instance() socket = ctx.socket(zmq.REP) socket.linger = 0 socket.bind('tcp://127.0.0.1:5555') i=0 while True: msg = socket.recv() print "\nworker received %r\n" % msg time.sleep(random.randint(1,5)) socket.send(msg + " to you too, #%i" % i) i+=1 def dot(): """callback for showing that IOLoop is still responsive while we wait""" sys.stdout.write('.') sys.stdout.flush() class TestHandler(web.RequestHandler): @web.asynchronous def get(self): ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect('tcp://127.0.0.1:5555') # send request to worker s.send('hello') self.stream = zmqstream.ZMQStream(s) self.stream.on_recv(self.handle_reply) def handle_reply(self, msg): # finish web request with worker's reply reply = msg[0] print "\nfinishing with %r\n" % reply, self.stream.close() self.write(reply) self.finish() def main(): worker = threading.Thread(target=slow_responder) worker.daemon=True worker.start() application = web.Application([(r"/", TestHandler)]) beat = ioloop.PeriodicCallback(dot, 100) beat.start() application.listen(8888) try: ioloop.IOLoop.instance().start() except KeyboardInterrupt: print ' Interrupted' if __name__ == "__main__": main() pyzmq-16.0.2/examples/eventloop/coroutines.py000066400000000000000000000025341301503633700213620ustar00rootroot00000000000000"""Example using zmq with tornado coroutines""" # Copyright (c) PyZMQ Developers. # This example is in the public domain (CC-0) import time import zmq from zmq.eventloop.future import Context, Poller from zmq.eventloop.ioloop import IOLoop from tornado import gen url = 'tcp://127.0.0.1:5555' ctx = Context() @gen.coroutine def ping(): """print dots to indicate idleness""" while True: yield gen.sleep(0.25) print('.') @gen.coroutine def receiver(): """receive messages with poll and timeout""" pull = ctx.socket(zmq.PULL) pull.connect(url) poller = Poller() poller.register(pull, zmq.POLLIN) while True: events = yield poller.poll(timeout=500) if pull in dict(events): print("recving", events) msg = yield pull.recv_multipart() print('recvd', msg) else: print("nothing to recv") @gen.coroutine def sender(): """send a message every second""" tic = time.time() push = ctx.socket(zmq.PUSH) push.bind(url) poller = Poller() poller.register(push, zmq.POLLOUT) while True: print("sending") yield push.send_multipart([str(time.time() - tic).encode('ascii')]) yield gen.sleep(1) loop = IOLoop.instance() loop.add_callback(ping) loop.add_callback(receiver) loop.add_callback(sender) loop.start() pyzmq-16.0.2/examples/eventloop/echo.py000066400000000000000000000010311301503633700200750ustar00rootroot00000000000000#!/usr/bin/env python """A trivial ZMQ echo server using the eventloop. Authors ------- * MinRK """ import zmq from zmq.eventloop import ioloop loop = ioloop.IOLoop.instance() ctx = zmq.Context() s = ctx.socket(zmq.REP) s.bind('tcp://127.0.0.1:5555') def rep_handler(sock, events): # We don't know how many recv's we can do? msg = sock.recv() # No guarantee that we can do the send. We need a way of putting the # send in the event loop. sock.send(msg) loop.add_handler(s, rep_handler, zmq.POLLIN) loop.start()pyzmq-16.0.2/examples/eventloop/echostream.py000066400000000000000000000006571301503633700213260ustar00rootroot00000000000000#!/usr/bin/env python """Adapted echo.py to put the send in the event loop using a ZMQStream. Authors ------- * MinRK """ import zmq from zmq.eventloop import ioloop, zmqstream loop = ioloop.IOLoop.instance() ctx = zmq.Context() s = ctx.socket(zmq.REP) s.bind('tcp://127.0.0.1:5555') stream = zmqstream.ZMQStream(s, loop) def echo(msg): print " ".join(msg) stream.send_multipart(msg) stream.on_recv(echo) loop.start()pyzmq-16.0.2/examples/eventloop/web.py000066400000000000000000000022521301503633700177420ustar00rootroot00000000000000import zmq from zmq.eventloop import ioloop, zmqstream """ ioloop.install() must be called prior to instantiating *any* tornado objects, and ideally before importing anything from tornado, just to be safe. install() sets the singleton instance of tornado.ioloop.IOLoop with zmq's IOLoop. If this is not done properly, multiple IOLoop instances may be created, which will have the effect of some subset of handlers never being called, because only one loop will be running. """ ioloop.install() import tornado import tornado.web """ this application can be used with echostream.py, start echostream.py, start web.py, then every time you hit http://localhost:8888/, echostream.py will print out 'hello' """ def printer(msg): print (msg) ctx = zmq.Context() s = ctx.socket(zmq.REQ) s.connect('tcp://127.0.0.1:5555') stream = zmqstream.ZMQStream(s) stream.on_recv(printer) class TestHandler(tornado.web.RequestHandler): def get(self): print ("sending hello") stream.send("hello") self.write("hello") application = tornado.web.Application([(r"/", TestHandler)]) if __name__ == "__main__": application.listen(8888) ioloop.IOLoop.instance().start() pyzmq-16.0.2/examples/gevent/000077500000000000000000000000001301503633700160675ustar00rootroot00000000000000pyzmq-16.0.2/examples/gevent/poll.py000066400000000000000000000022011301503633700174020ustar00rootroot00000000000000import gevent from zmq import green as zmq # Connect to both receiving sockets and send 10 messages def sender(): sender = context.socket(zmq.PUSH) sender.connect('inproc://polltest1') sender.connect('inproc://polltest2') for i in xrange(10): sender.send('test %d' % i) gevent.sleep(1) # create zmq context, and bind to pull sockets context = zmq.Context() receiver1 = context.socket(zmq.PULL) receiver1.bind('inproc://polltest1') receiver2 = context.socket(zmq.PULL) receiver2.bind('inproc://polltest2') gevent.spawn(sender) # Create poller and register both receiver sockets poller = zmq.Poller() poller.register(receiver1, zmq.POLLIN) poller.register(receiver2, zmq.POLLIN) # Read 10 messages from both receiver sockets msgcnt = 0 while msgcnt < 10: socks = dict(poller.poll()) if receiver1 in socks and socks[receiver1] == zmq.POLLIN: print "Message from receiver1: %s" % receiver1.recv() msgcnt += 1 if receiver2 in socks and socks[receiver2] == zmq.POLLIN: print "Message from receiver2: %s" % receiver2.recv() msgcnt += 1 print "%d messages received" % msgcnt pyzmq-16.0.2/examples/gevent/reqrep.py000066400000000000000000000021241301503633700177360ustar00rootroot00000000000000""" Complex example which is a combination of the rr* examples from the zguide. """ from gevent import spawn import zmq.green as zmq # server context = zmq.Context() socket = context.socket(zmq.REP) socket.connect("tcp://localhost:5560") def serve(socket): while True: message = socket.recv() print "Received request: ", message socket.send("World") server = spawn(serve, socket) # client context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect("tcp://localhost:5559") # Do 10 requests, waiting each time for a response def client(): for request in range(1,10): socket.send("Hello") message = socket.recv() print "Received reply ", request, "[", message, "]" # broker frontend = context.socket(zmq.ROUTER) backend = context.socket(zmq.DEALER); frontend.bind("tcp://*:5559") backend.bind("tcp://*:5560") def proxy(socket_from, socket_to): while True: m = socket_from.recv_multipart() socket_to.send_multipart(m) a = spawn(proxy, frontend, backend) b = spawn(proxy, backend, frontend) spawn(client).join() pyzmq-16.0.2/examples/gevent/simple.py000066400000000000000000000016621301503633700177370ustar00rootroot00000000000000from gevent import spawn, spawn_later import zmq.green as zmq # server print zmq.Context ctx = zmq.Context() sock = ctx.socket(zmq.PUSH) sock.bind('ipc:///tmp/zmqtest') spawn(sock.send_pyobj, ('this', 'is', 'a', 'python', 'tuple')) spawn_later(1, sock.send_pyobj, {'hi': 1234}) spawn_later(2, sock.send_pyobj, ({'this': ['is a more complicated object', ':)']}, 42, 42, 42)) spawn_later(3, sock.send_pyobj, 'foobar') spawn_later(4, sock.send_pyobj, 'quit') # client ctx = zmq.Context() # create a new context to kick the wheels sock = ctx.socket(zmq.PULL) sock.connect('ipc:///tmp/zmqtest') def get_objs(sock): while True: o = sock.recv_pyobj() print 'received python object:', o if o == 'quit': print 'exiting.' break def print_every(s, t=None): print s if t: spawn_later(t, print_every, s, t) print_every('printing every half second', 0.5) spawn(get_objs, sock).join() pyzmq-16.0.2/examples/heartbeat/000077500000000000000000000000001301503633700165365ustar00rootroot00000000000000pyzmq-16.0.2/examples/heartbeat/heart.py000066400000000000000000000013731301503633700202170ustar00rootroot00000000000000#!/usr/bin/env python """This launches an echoing rep socket device, and runs a blocking numpy action. The rep socket should remain responsive to pings during this time. Use heartbeater.py to ping this heart, and see the responsiveness. Authors ------- * MinRK """ import time import numpy import zmq from zmq import devices ctx = zmq.Context() dev = devices.ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.DEALER) dev.setsockopt_in(zmq.SUBSCRIBE, "") dev.connect_in('tcp://127.0.0.1:5555') dev.connect_out('tcp://127.0.0.1:5556') dev.start() #wait for connections time.sleep(1) A = numpy.random.random((2**11,2**11)) print "starting blocking loop" while True: tic = time.time() numpy.dot(A,A.transpose()) print "blocked for %.3f s"%(time.time()-tic) pyzmq-16.0.2/examples/heartbeat/heartbeater.py000066400000000000000000000051061301503633700214000ustar00rootroot00000000000000#!/usr/bin/env python """ For use with heart.py A basic heartbeater using PUB and ROUTER sockets. pings are sent out on the PUB, and hearts are tracked based on their DEALER identities. You can start many hearts with heart.py, and the heartbeater will monitor all of them, and notice when they stop responding. Authors ------- * MinRK """ import time import zmq from zmq.eventloop import ioloop, zmqstream class HeartBeater(object): """A basic HeartBeater class pingstream: a PUB stream pongstream: an ROUTER stream""" def __init__(self, loop, pingstream, pongstream, period=1000): self.loop = loop self.period = period self.pingstream = pingstream self.pongstream = pongstream self.pongstream.on_recv(self.handle_pong) self.hearts = set() self.responses = set() self.lifetime = 0 self.tic = time.time() self.caller = ioloop.PeriodicCallback(self.beat, period, self.loop) self.caller.start() def beat(self): toc = time.time() self.lifetime += toc-self.tic self.tic = toc print self.lifetime # self.message = str(self.lifetime) goodhearts = self.hearts.intersection(self.responses) heartfailures = self.hearts.difference(goodhearts) newhearts = self.responses.difference(goodhearts) # print newhearts, goodhearts, heartfailures map(self.handle_new_heart, newhearts) map(self.handle_heart_failure, heartfailures) self.responses = set() print "%i beating hearts: %s"%(len(self.hearts),self.hearts) self.pingstream.send(str(self.lifetime)) def handle_new_heart(self, heart): print "yay, got new heart %s!"%heart self.hearts.add(heart) def handle_heart_failure(self, heart): print "Heart %s failed :("%heart self.hearts.remove(heart) def handle_pong(self, msg): "if heart is beating" if msg[1] == str(self.lifetime): self.responses.add(msg[0]) else: print "got bad heartbeat (possibly old?): %s"%msg[1] # sub.setsockopt(zmq.SUBSCRIBE) if __name__ == '__main__': loop = ioloop.IOLoop() context = zmq.Context() pub = context.socket(zmq.PUB) pub.bind('tcp://127.0.0.1:5555') router = context.socket(zmq.ROUTER) router.bind('tcp://127.0.0.1:5556') outstream = zmqstream.ZMQStream(pub, loop) instream = zmqstream.ZMQStream(router, loop) hb = HeartBeater(loop, outstream, instream) loop.start() pyzmq-16.0.2/examples/heartbeat/ping.py000066400000000000000000000013041301503633700200430ustar00rootroot00000000000000#!/usr/bin/env python """For use with pong.py This script simply pings a process started by pong.py or tspong.py, to demonstrate that zmq remains responsive while Python blocks. Authors ------- * MinRK """ from __future__ import print_function import sys import time import numpy import zmq ctx = zmq.Context() req = ctx.socket(zmq.REQ) req.connect('tcp://127.0.0.1:10111') #wait for connects time.sleep(1) n=0 while True: time.sleep(numpy.random.random()) for i in range(4): n+=1 msg = 'ping %i' % n tic = time.time() req.send_string(msg) resp = req.recv_string() print("%s: %.2f ms" % (msg, 1000*(time.time()-tic))) assert msg == resp pyzmq-16.0.2/examples/heartbeat/pong.py000066400000000000000000000013751301503633700200610ustar00rootroot00000000000000#!/usr/bin/env python """This launches an echoing rep socket device using zmq.devices.ThreadDevice, and runs a blocking numpy action. The rep socket should remain responsive to pings during this time. Use ping.py to see how responsive it is. Authors ------- * MinRK """ from __future__ import print_function import time import numpy import zmq from zmq import devices ctx = zmq.Context() dev = devices.ThreadDevice(zmq.FORWARDER, zmq.REP, -1) dev.bind_in('tcp://127.0.0.1:10111') dev.setsockopt_in(zmq.IDENTITY, b"whoda") dev.start() #wait for connections time.sleep(1) A = numpy.random.random((2**11,2**12)) print("starting blocking loop") while True: tic = time.time() numpy.dot(A,A.transpose()) print("blocked for %.3f s"%(time.time()-tic)) pyzmq-16.0.2/examples/logger/000077500000000000000000000000001301503633700160565ustar00rootroot00000000000000pyzmq-16.0.2/examples/logger/zmqlogger.py000066400000000000000000000041131301503633700204360ustar00rootroot00000000000000""" Simple example of using zmq log handlers This starts a number of subprocesses with PUBHandlers that generate log messages at a regular interval. The main process has a SUB socket, which aggregates and logs all of the messages to the root logger. """ import logging from multiprocessing import Process import os import random import sys import time import zmq from zmq.log.handlers import PUBHandler LOG_LEVELS = (logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL) def sub_logger(port, level=logging.DEBUG): ctx = zmq.Context() sub = ctx.socket(zmq.SUB) sub.bind('tcp://127.0.0.1:%i' % port) sub.setsockopt(zmq.SUBSCRIBE, b"") logging.basicConfig(level=level) while True: level, message = sub.recv_multipart() message = message.decode('ascii') if message.endswith('\n'): # trim trailing newline, which will get appended again message = message[:-1] log = getattr(logging, level.lower().decode('ascii')) log(message) def log_worker(port, interval=1, level=logging.DEBUG): ctx = zmq.Context() pub = ctx.socket(zmq.PUB) pub.connect('tcp://127.0.0.1:%i' % port) logger = logging.getLogger(str(os.getpid())) logger.setLevel(level) handler = PUBHandler(pub) logger.addHandler(handler) print("starting logger at %i with level=%s" % (os.getpid(), level)) while True: level = random.choice(LOG_LEVELS) logger.log(level, "Hello from %i!" % os.getpid()) time.sleep(interval) if __name__ == '__main__': if len(sys.argv) > 1: n = int(sys.argv[1]) else: n = 2 if len(sys.argv) > 2: port = int(sys.argv[2]) else: port = 5558 # start the log generators workers = [Process(target=log_worker, args=(port,), kwargs=dict(level=random.choice(LOG_LEVELS))) for i in range(n)] [w.start() for w in workers] # start the log watcher try: sub_logger(port) except KeyboardInterrupt: pass finally: [w.terminate() for w in workers] pyzmq-16.0.2/examples/mongodb/000077500000000000000000000000001301503633700162245ustar00rootroot00000000000000pyzmq-16.0.2/examples/mongodb/client.py000066400000000000000000000025701301503633700200600ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Copyright (c) 2010 Justin Riley # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import json import zmq class MongoZMQClient(object): """ Client that connects with MongoZMQ server to add/fetch docs """ def __init__(self, connect_addr='tcp://127.0.0.1:5000'): self._context = zmq.Context() self._socket = self._context.socket(zmq.DEALER) self._socket.connect(connect_addr) def _send_recv_msg(self, msg): self._socket.send_multipart(msg) return self._socket.recv_multipart()[0] def get_doc(self, keys): msg = ['get', json.dumps(keys)] json_str = self._send_recv_msg(msg) return json.loads(json_str) def add_doc(self, doc): msg = ['add', json.dumps(doc)] return self._send_recv_msg(msg) def main(): client = MongoZMQClient() for i in range(10): doc = {'job': str(i)} print "Adding doc", doc print client.add_doc(doc) for i in range(10): query = {'job': str(i)} print "Getting doc matching query:", query print client.get_doc(query) if __name__ == "__main__": main() pyzmq-16.0.2/examples/mongodb/controller.py000066400000000000000000000057541301503633700207740ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Copyright (c) 2010 Justin Riley # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys import zmq import pymongo import pymongo.json_util import json class MongoZMQ(object): """ ZMQ server that adds/fetches documents (ie dictionaries) to a MongoDB. NOTE: mongod must be started before using this class """ def __init__(self, db_name, table_name, bind_addr="tcp://127.0.0.1:5000"): """ bind_addr: address to bind zmq socket on db_name: name of database to write to (created if doesn't exist) table_name: name of mongodb 'table' in the db to write to (created if doesn't exist) """ self._bind_addr = bind_addr self._db_name = db_name self._table_name = table_name self._conn = pymongo.Connection() self._db = self._conn[self._db_name] self._table = self._db[self._table_name] def _doc_to_json(self, doc): return json.dumps(doc,default=pymongo.json_util.default) def add_document(self, doc): """ Inserts a document (dictionary) into mongo database table """ print 'adding docment %s' % (doc) try: self._table.insert(doc) except Exception,e: return 'Error: %s' % e def get_document_by_keys(self, keys): """ Attempts to return a single document from database table that matches each key/value in keys dictionary. """ print 'attempting to retrieve document using keys: %s' % keys try: return self._table.find_one(keys) except Exception,e: return 'Error: %s' % e def start(self): context = zmq.Context() socket = context.socket(zmq.ROUTER) socket.bind(self._bind_addr) while True: msg = socket.recv_multipart() print "Received msg: ", msg if len(msg) != 3: error_msg = 'invalid message received: %s' % msg print error_msg reply = [msg[0], error_msg] socket.send_multipart(reply) continue id = msg[0] operation = msg[1] contents = json.loads(msg[2]) # always send back the id with ROUTER reply = [id] if operation == 'add': self.add_document(contents) reply.append("success") elif operation == 'get': doc = self.get_document_by_keys(contents) json_doc = self._doc_to_json(doc) reply.append(json_doc) else: print 'unknown request' socket.send_multipart(reply) def main(): MongoZMQ('ipcontroller','jobs').start() if __name__ == "__main__": main() pyzmq-16.0.2/examples/monitoring/000077500000000000000000000000001301503633700167645ustar00rootroot00000000000000pyzmq-16.0.2/examples/monitoring/simple_monitor.py000066400000000000000000000043701301503633700224020ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Simple example demonstrating the use of the socket monitoring feature.""" # This file is part of pyzmq. # # Distributed under the terms of the New BSD License. The full # license is in the file COPYING.BSD, distributed as part of this # software. from __future__ import print_function __author__ = 'Guido Goldstein' import threading import time import zmq from zmq.utils.monitor import recv_monitor_message line = lambda: print('-' * 40) print("libzmq-%s" % zmq.zmq_version()) if zmq.zmq_version_info() < (4, 0): raise RuntimeError("monitoring in libzmq version < 4.0 is not supported") EVENT_MAP = {} print("Event names:") for name in dir(zmq): if name.startswith('EVENT_'): value = getattr(zmq, name) print("%21s : %4i" % (name, value)) EVENT_MAP[value] = name def event_monitor(monitor): while monitor.poll(): evt = recv_monitor_message(monitor) evt.update({'description': EVENT_MAP[evt['event']]}) print("Event: {}".format(evt)) if evt['event'] == zmq.EVENT_MONITOR_STOPPED: break monitor.close() print() print("event monitor thread done!") ctx = zmq.Context.instance() rep = ctx.socket(zmq.REP) req = ctx.socket(zmq.REQ) monitor = req.get_monitor_socket() t = threading.Thread(target=event_monitor, args=(monitor,)) t.start() line() print("bind req") req.bind("tcp://127.0.0.1:6666") req.bind("tcp://127.0.0.1:6667") time.sleep(1) line() print("connect rep") rep.connect("tcp://127.0.0.1:6667") time.sleep(0.2) rep.connect("tcp://127.0.0.1:6666") time.sleep(1) line() print("disconnect rep") rep.disconnect("tcp://127.0.0.1:6667") time.sleep(1) rep.disconnect("tcp://127.0.0.1:6666") time.sleep(1) line() print("close rep") rep.close() time.sleep(1) line() print("disabling event monitor") req.disable_monitor() line() print("event monitor thread should now terminate") # Create a new socket to connect to listener, no more # events should be observed. rep = ctx.socket(zmq.REP) line() print("connect rep") rep.connect("tcp://127.0.0.1:6667") time.sleep(0.2) line() print("disconnect rep") rep.disconnect("tcp://127.0.0.1:6667") time.sleep(0.2) line() print("close rep") rep.close() line() print("close req") req.close() print("END") ctx.term() pyzmq-16.0.2/examples/poll/000077500000000000000000000000001301503633700155455ustar00rootroot00000000000000pyzmq-16.0.2/examples/poll/pair.py000066400000000000000000000025701301503633700170560ustar00rootroot00000000000000"""A thorough test of polling PAIR sockets.""" #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import time import zmq print "Running polling tests for PAIR sockets..." addr = 'tcp://127.0.0.1:5555' ctx = zmq.Context() s1 = ctx.socket(zmq.PAIR) s2 = ctx.socket(zmq.PAIR) s1.bind(addr) s2.connect(addr) # Sleep to allow sockets to connect. time.sleep(1.0) poller = zmq.Poller() poller.register(s1, zmq.POLLIN|zmq.POLLOUT) poller.register(s2, zmq.POLLIN|zmq.POLLOUT) # Now make sure that both are send ready. socks = dict(poller.poll()) assert socks[s1] == zmq.POLLOUT assert socks[s2] == zmq.POLLOUT # Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN s1.send('msg1') s2.send('msg2') time.sleep(1.0) socks = dict(poller.poll()) assert socks[s1] == zmq.POLLOUT|zmq.POLLIN assert socks[s2] == zmq.POLLOUT|zmq.POLLIN # Make sure that both are in POLLOUT after recv. s1.recv() s2.recv() socks = dict(poller.poll()) assert socks[s1] == zmq.POLLOUT assert socks[s2] == zmq.POLLOUT poller.unregister(s1) poller.unregister(s2) # Wait for everything to finish. time.sleep(1.0) print "Finished."pyzmq-16.0.2/examples/poll/pubsub.py000066400000000000000000000025741301503633700174270ustar00rootroot00000000000000"""A thorough test of polling PUB/SUB sockets.""" #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import time import zmq print "Running polling tets for PUB/SUB sockets..." addr = 'tcp://127.0.0.1:5555' ctx = zmq.Context() s1 = ctx.socket(zmq.PUB) s2 = ctx.socket(zmq.SUB) s2.setsockopt(zmq.SUBSCRIBE, '') s1.bind(addr) s2.connect(addr) # Sleep to allow sockets to connect. time.sleep(1.0) poller = zmq.Poller() poller.register(s1, zmq.POLLIN|zmq.POLLOUT) poller.register(s2, zmq.POLLIN|zmq.POLLOUT) # Now make sure that both are send ready. socks = dict(poller.poll()) assert socks[s1] == zmq.POLLOUT assert not socks.has_key(s2) # Make sure that s1 stays in POLLOUT after a send. s1.send('msg1') socks = dict(poller.poll()) assert socks[s1] == zmq.POLLOUT # Make sure that s2 is POLLIN after waiting. time.sleep(0.5) socks = dict(poller.poll()) assert socks[s2] == zmq.POLLIN # Make sure that s2 goes into 0 after recv. s2.recv() socks = dict(poller.poll()) assert not socks.has_key(s2) poller.unregister(s1) poller.unregister(s2) # Wait for everything to finish. time.sleep(1.0) print "Finished." pyzmq-16.0.2/examples/poll/reqrep.py000066400000000000000000000033601301503633700174170ustar00rootroot00000000000000"""A thorough test of polling REQ/REP sockets.""" #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import time import zmq print "Running polling tests for REQ/REP sockets..." addr = 'tcp://127.0.0.1:5555' ctx = zmq.Context() s1 = ctx.socket(zmq.REP) s2 = ctx.socket(zmq.REQ) s1.bind(addr) s2.connect(addr) # Sleep to allow sockets to connect. time.sleep(1.0) poller = zmq.Poller() poller.register(s1, zmq.POLLIN|zmq.POLLOUT) poller.register(s2, zmq.POLLIN|zmq.POLLOUT) # Make sure that s1 is in state 0 and s2 is in POLLOUT socks = dict(poller.poll()) assert not socks.has_key(s1) assert socks[s2] == zmq.POLLOUT # Make sure that s2 goes immediately into state 0 after send. s2.send('msg1') socks = dict(poller.poll()) assert not socks.has_key(s2) # Make sure that s1 goes into POLLIN state after a time.sleep(). time.sleep(0.5) socks = dict(poller.poll()) assert socks[s1] == zmq.POLLIN # Make sure that s1 goes into POLLOUT after recv. s1.recv() socks = dict(poller.poll()) assert socks[s1] == zmq.POLLOUT # Make sure s1 goes into state 0 after send. s1.send('msg2') socks = dict(poller.poll()) assert not socks.has_key(s1) # Wait and then see that s2 is in POLLIN. time.sleep(0.5) socks = dict(poller.poll()) assert socks[s2] == zmq.POLLIN # Make sure that s2 is in POLLOUT after recv. s2.recv() socks = dict(poller.poll()) assert socks[s2] == zmq.POLLOUT poller.unregister(s1) poller.unregister(s2) # Wait for everything to finish. time.sleep(1.0) print "Finished." pyzmq-16.0.2/examples/pubsub/000077500000000000000000000000001301503633700160775ustar00rootroot00000000000000pyzmq-16.0.2/examples/pubsub/publisher.py000066400000000000000000000027311301503633700204510ustar00rootroot00000000000000"""A test that publishes NumPy arrays. Uses REQ/REP (on PUB/SUB socket + 1) to synchronize """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys import time import zmq import numpy def sync(bind_to): # use bind socket + 1 sync_with = ':'.join(bind_to.split(':')[:-1] + [str(int(bind_to.split(':')[-1]) + 1)]) ctx = zmq.Context.instance() s = ctx.socket(zmq.REP) s.bind(sync_with) print "Waiting for subscriber to connect..." s.recv() print " Done." s.send('GO') def main(): if len (sys.argv) != 4: print 'usage: publisher ' sys.exit (1) try: bind_to = sys.argv[1] array_size = int(sys.argv[2]) array_count = int (sys.argv[3]) except (ValueError, OverflowError), e: print 'array-size and array-count must be integers' sys.exit (1) ctx = zmq.Context() s = ctx.socket(zmq.PUB) s.bind(bind_to) sync(bind_to) print "Sending arrays..." for i in range(array_count): a = numpy.random.rand(array_size, array_size) s.send_pyobj(a) print " Done." if __name__ == "__main__": main() pyzmq-16.0.2/examples/pubsub/subscriber.py000066400000000000000000000036211301503633700206160ustar00rootroot00000000000000"""A test that subscribes to NumPy arrays. Uses REQ/REP (on PUB/SUB socket + 1) to synchronize """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys import time import zmq import numpy def sync(connect_to): # use connect socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] ) ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main(): if len (sys.argv) != 3: print 'usage: subscriber ' sys.exit (1) try: connect_to = sys.argv[1] array_count = int (sys.argv[2]) except (ValueError, OverflowError), e: print 'array-count must be integers' sys.exit (1) ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print "Receiving arrays..." for i in range(array_count): a = s.recv_pyobj() print " Done." end = time.clock() elapsed = (end - start) * 1000000 if elapsed == 0: elapsed = 1 throughput = (1000000.0 * float (array_count)) / float (elapsed) message_size = a.nbytes megabits = float (throughput * message_size * 8) / 1000000 print "message size: %.0f [B]" % (message_size, ) print "array count: %.0f" % (array_count, ) print "mean throughput: %.0f [msg/s]" % (throughput, ) print "mean throughput: %.3f [Mb/s]" % (megabits, ) time.sleep(1.0) if __name__ == "__main__": main() pyzmq-16.0.2/examples/pubsub/topics_pub.py000077500000000000000000000036401301503633700206260ustar00rootroot00000000000000#!/usr/bin/env python """Simple example of publish/subscribe illustrating topics. Publisher and subscriber can be started in any order, though if publisher starts first, any messages sent before subscriber starts are lost. More than one subscriber can listen, and they can listen to different topics. Topic filtering is done simply on the start of the string, e.g. listening to 's' will catch 'sports...' and 'stocks' while listening to 'w' is enough to catch 'weather'. """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import itertools import sys import time import zmq def main(): if len (sys.argv) != 2: print 'usage: publisher ' sys.exit (1) bind_to = sys.argv[1] all_topics = ['sports.general','sports.football','sports.basketball', 'stocks.general','stocks.GOOG','stocks.AAPL', 'weather'] ctx = zmq.Context() s = ctx.socket(zmq.PUB) s.bind(bind_to) print "Starting broadcast on topics:" print " %s" % all_topics print "Hit Ctrl-C to stop broadcasting." print "Waiting so subscriber sockets can connect..." print time.sleep(1.0) msg_counter = itertools.count() try: for topic in itertools.cycle(all_topics): msg_body = str(msg_counter.next()) print ' Topic: %s, msg:%s' % (topic, msg_body) s.send_multipart([topic, msg_body]) # short wait so we don't hog the cpu time.sleep(0.1) except KeyboardInterrupt: pass print "Waiting for message queues to flush..." time.sleep(0.5) print "Done." if __name__ == "__main__": main() pyzmq-16.0.2/examples/pubsub/topics_sub.py000077500000000000000000000031751301503633700206340ustar00rootroot00000000000000#!/usr/bin/env python """Simple example of publish/subscribe illustrating topics. Publisher and subscriber can be started in any order, though if publisher starts first, any messages sent before subscriber starts are lost. More than one subscriber can listen, and they can listen to different topics. Topic filtering is done simply on the start of the string, e.g. listening to 's' will catch 'sports...' and 'stocks' while listening to 'w' is enough to catch 'weather'. """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger, Fernando Perez # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys import time import zmq import numpy def main(): if len (sys.argv) < 2: print 'usage: subscriber [topic topic ...]' sys.exit (1) connect_to = sys.argv[1] topics = sys.argv[2:] ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) # manage subscriptions if not topics: print "Receiving messages on ALL topics..." s.setsockopt(zmq.SUBSCRIBE,'') else: print "Receiving messages on topics: %s ..." % topics for t in topics: s.setsockopt(zmq.SUBSCRIBE,t) print try: while True: topic, msg = s.recv_multipart() print ' Topic: %s, msg:%s' % (topic, msg) except KeyboardInterrupt: pass print "Done." if __name__ == "__main__": main() pyzmq-16.0.2/examples/security/000077500000000000000000000000001301503633700164465ustar00rootroot00000000000000pyzmq-16.0.2/examples/security/asyncio-ironhouse.py000066400000000000000000000065211301503633700225020ustar00rootroot00000000000000#!/usr/bin/env python ''' Ironhouse extends Stonehouse with client public key authentication. This is the strongest security model we have today, protecting against every attack we know about, except end-point attacks (where an attacker plants spyware on a machine to capture data before it's encrypted, or after it's decrypted). Author: Steven Armstrong Based on ./ironhouse.py by Chris Laws ''' import logging import os import sys import asyncio import zmq import zmq.auth from zmq.auth.asyncio import AsyncioAuthenticator from zmq.asyncio import Context, Poller, ZMQEventLoop @asyncio.coroutine def run(): ''' Run Ironhouse example ''' # These directories are generated by the generate_certificates script base_dir = os.path.dirname(__file__) keys_dir = os.path.join(base_dir, 'certificates') public_keys_dir = os.path.join(base_dir, 'public_keys') secret_keys_dir = os.path.join(base_dir, 'private_keys') if not (os.path.exists(keys_dir) and os.path.exists(public_keys_dir) and os.path.exists(secret_keys_dir)): logging.critical("Certificates are missing - run generate_certificates.py script first") sys.exit(1) ctx = Context.instance() # Start an authenticator for this context. auth = AsyncioAuthenticator(ctx) auth.start() auth.allow('127.0.0.1') # Tell authenticator to use the certificate in a directory auth.configure_curve(domain='*', location=public_keys_dir) server = ctx.socket(zmq.PUSH) server_secret_file = os.path.join(secret_keys_dir, "server.key_secret") server_public, server_secret = zmq.auth.load_certificate(server_secret_file) server.curve_secretkey = server_secret server.curve_publickey = server_public server.curve_server = True # must come before bind server.bind('tcp://*:9000') client = ctx.socket(zmq.PULL) # We need two certificates, one for the client and one for # the server. The client must know the server's public key # to make a CURVE connection. client_secret_file = os.path.join(secret_keys_dir, "client.key_secret") client_public, client_secret = zmq.auth.load_certificate(client_secret_file) client.curve_secretkey = client_secret client.curve_publickey = client_public server_public_file = os.path.join(public_keys_dir, "server.key") server_public, _ = zmq.auth.load_certificate(server_public_file) # The client must know the server's public key to make a CURVE connection. client.curve_serverkey = server_public client.connect('tcp://127.0.0.1:9000') yield from server.send(b"Hello") if (yield from client.poll(1000)): msg = yield from client.recv() if msg == b"Hello": logging.info("Ironhouse test OK") else: logging.error("Ironhouse test FAIL") # close sockets server.close() client.close() # stop auth task auth.stop() if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) if '-v' in sys.argv: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") loop = ZMQEventLoop() asyncio.set_event_loop(loop) loop.run_until_complete(run()) loop.close() pyzmq-16.0.2/examples/security/generate_certificates.py000066400000000000000000000034771301503633700233520ustar00rootroot00000000000000#!/usr/bin/env python """ Generate client and server CURVE certificate files then move them into the appropriate store directory, private_keys or public_keys. The certificates generated by this script are used by the stonehouse and ironhouse examples. In practice this would be done by hand or some out-of-band process. Author: Chris Laws """ import os import shutil import zmq.auth def generate_certificates(base_dir): ''' Generate client and server CURVE certificate files''' keys_dir = os.path.join(base_dir, 'certificates') public_keys_dir = os.path.join(base_dir, 'public_keys') secret_keys_dir = os.path.join(base_dir, 'private_keys') # Create directories for certificates, remove old content if necessary for d in [keys_dir, public_keys_dir, secret_keys_dir]: if os.path.exists(d): shutil.rmtree(d) os.mkdir(d) # create new keys in certificates dir server_public_file, server_secret_file = zmq.auth.create_certificates(keys_dir, "server") client_public_file, client_secret_file = zmq.auth.create_certificates(keys_dir, "client") # move public keys to appropriate directory for key_file in os.listdir(keys_dir): if key_file.endswith(".key"): shutil.move(os.path.join(keys_dir, key_file), os.path.join(public_keys_dir, '.')) # move secret keys to appropriate directory for key_file in os.listdir(keys_dir): if key_file.endswith(".key_secret"): shutil.move(os.path.join(keys_dir, key_file), os.path.join(secret_keys_dir, '.')) if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) generate_certificates(os.path.dirname(__file__)) pyzmq-16.0.2/examples/security/grasslands.py000066400000000000000000000010721301503633700211610ustar00rootroot00000000000000#!/usr/bin/env python ''' No protection at all. All connections are accepted, there is no authentication, and no privacy. This is how ZeroMQ always worked until we built security into the wire protocol in early 2013. Internally, it uses a security mechanism called "NULL". Author: Chris Laws ''' import zmq ctx = zmq.Context.instance() server = ctx.socket(zmq.PUSH) server.bind('tcp://*:9000') client = ctx.socket(zmq.PULL) client.connect('tcp://127.0.0.1:9000') server.send(b"Hello") msg = client.recv() if msg == b"Hello": print("Grasslands test OK") pyzmq-16.0.2/examples/security/ioloop-ironhouse.py000066400000000000000000000076321301503633700223420ustar00rootroot00000000000000#!/usr/bin/env python ''' Ironhouse extends Stonehouse with client public key authentication. This is the strongest security model we have today, protecting against every attack we know about, except end-point attacks (where an attacker plants spyware on a machine to capture data before it's encrypted, or after it's decrypted). This example demonstrates using the IOLoopAuthenticator. Author: Chris Laws ''' import logging import os import sys import zmq import zmq.auth from zmq.auth.ioloop import IOLoopAuthenticator from zmq.eventloop import ioloop, zmqstream def echo(server, msg): logging.debug("server recvd %s", msg) reply = msg + [b'World'] logging.debug("server sending %s", reply) server.send_multipart(reply) def setup_server(server_secret_file, endpoint='tcp://127.0.0.1:9000'): """setup a simple echo server with CURVE auth""" server = zmq.Context.instance().socket(zmq.ROUTER) server_public, server_secret = zmq.auth.load_certificate(server_secret_file) server.curve_secretkey = server_secret server.curve_publickey = server_public server.curve_server = True # must come before bind server.bind(endpoint) server_stream = zmqstream.ZMQStream(server) # simple echo server_stream.on_recv_stream(echo) return server_stream def client_msg_recvd(msg): logging.debug("client recvd %s", msg) logging.info("Ironhouse test OK") # stop the loop when we get the reply ioloop.IOLoop.instance().stop() def setup_client(client_secret_file, server_public_file, endpoint='tcp://127.0.0.1:9000'): """setup a simple client with CURVE auth""" client = zmq.Context.instance().socket(zmq.DEALER) # We need two certificates, one for the client and one for # the server. The client must know the server's public key # to make a CURVE connection. client_public, client_secret = zmq.auth.load_certificate(client_secret_file) client.curve_secretkey = client_secret client.curve_publickey = client_public server_public, _ = zmq.auth.load_certificate(server_public_file) # The client must know the server's public key to make a CURVE connection. client.curve_serverkey = server_public client.connect(endpoint) client_stream = zmqstream.ZMQStream(client) client_stream.on_recv(client_msg_recvd) return client_stream def run(): '''Run Ironhouse example''' # These direcotries are generated by the generate_certificates script base_dir = os.path.dirname(__file__) keys_dir = os.path.join(base_dir, 'certificates') public_keys_dir = os.path.join(base_dir, 'public_keys') secret_keys_dir = os.path.join(base_dir, 'private_keys') if not (os.path.exists(keys_dir) and os.path.exists(public_keys_dir) and os.path.exists(secret_keys_dir)): logging.critical("Certificates are missing - run generate_certificates script first") sys.exit(1) # Start an authenticator for this context. auth = IOLoopAuthenticator() auth.allow('127.0.0.1') # Tell authenticator to use the certificate in a directory auth.configure_curve(domain='*', location=public_keys_dir) server_secret_file = os.path.join(secret_keys_dir, "server.key_secret") server = setup_server(server_secret_file) server_public_file = os.path.join(public_keys_dir, "server.key") client_secret_file = os.path.join(secret_keys_dir, "client.key_secret") client = setup_client(client_secret_file, server_public_file) client.send(b'Hello') auth.start() ioloop.IOLoop.instance().start() if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) if '-v' in sys.argv: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") run() pyzmq-16.0.2/examples/security/ironhouse.py000066400000000000000000000060151301503633700210350ustar00rootroot00000000000000#!/usr/bin/env python ''' Ironhouse extends Stonehouse with client public key authentication. This is the strongest security model we have today, protecting against every attack we know about, except end-point attacks (where an attacker plants spyware on a machine to capture data before it's encrypted, or after it's decrypted). Author: Chris Laws ''' import logging import os import sys import zmq import zmq.auth from zmq.auth.thread import ThreadAuthenticator def run(): ''' Run Ironhouse example ''' # These directories are generated by the generate_certificates script base_dir = os.path.dirname(__file__) keys_dir = os.path.join(base_dir, 'certificates') public_keys_dir = os.path.join(base_dir, 'public_keys') secret_keys_dir = os.path.join(base_dir, 'private_keys') if not (os.path.exists(keys_dir) and os.path.exists(public_keys_dir) and os.path.exists(secret_keys_dir)): logging.critical("Certificates are missing - run generate_certificates.py script first") sys.exit(1) ctx = zmq.Context.instance() # Start an authenticator for this context. auth = ThreadAuthenticator(ctx) auth.start() auth.allow('127.0.0.1') # Tell authenticator to use the certificate in a directory auth.configure_curve(domain='*', location=public_keys_dir) server = ctx.socket(zmq.PUSH) server_secret_file = os.path.join(secret_keys_dir, "server.key_secret") server_public, server_secret = zmq.auth.load_certificate(server_secret_file) server.curve_secretkey = server_secret server.curve_publickey = server_public server.curve_server = True # must come before bind server.bind('tcp://*:9000') client = ctx.socket(zmq.PULL) # We need two certificates, one for the client and one for # the server. The client must know the server's public key # to make a CURVE connection. client_secret_file = os.path.join(secret_keys_dir, "client.key_secret") client_public, client_secret = zmq.auth.load_certificate(client_secret_file) client.curve_secretkey = client_secret client.curve_publickey = client_public server_public_file = os.path.join(public_keys_dir, "server.key") server_public, _ = zmq.auth.load_certificate(server_public_file) # The client must know the server's public key to make a CURVE connection. client.curve_serverkey = server_public client.connect('tcp://127.0.0.1:9000') server.send(b"Hello") if client.poll(1000): msg = client.recv() if msg == b"Hello": logging.info("Ironhouse test OK") else: logging.error("Ironhouse test FAIL") # stop auth thread auth.stop() if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) if '-v' in sys.argv: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") run() pyzmq-16.0.2/examples/security/stonehouse.py000066400000000000000000000060421301503633700212160ustar00rootroot00000000000000#!/usr/bin/env python ''' Stonehouse uses the "CURVE" security mechanism. This gives us strong encryption on data, and (as far as we know) unbreakable authentication. Stonehouse is the minimum you would use over public networks, and assures clients that they are speaking to an authentic server, while allowing any client to connect. Author: Chris Laws ''' import logging import os import sys import time import zmq import zmq.auth from zmq.auth.thread import ThreadAuthenticator def run(): ''' Run Stonehouse example ''' # These directories are generated by the generate_certificates script base_dir = os.path.dirname(__file__) keys_dir = os.path.join(base_dir, 'certificates') public_keys_dir = os.path.join(base_dir, 'public_keys') secret_keys_dir = os.path.join(base_dir, 'private_keys') if not (os.path.exists(keys_dir) and os.path.exists(public_keys_dir) and os.path.exists(secret_keys_dir)): logging.critical("Certificates are missing: run generate_certificates.py script first") sys.exit(1) ctx = zmq.Context.instance() # Start an authenticator for this context. auth = ThreadAuthenticator(ctx) auth.start() auth.allow('127.0.0.1') # Tell the authenticator how to handle CURVE requests auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) server = ctx.socket(zmq.PUSH) server_secret_file = os.path.join(secret_keys_dir, "server.key_secret") server_public, server_secret = zmq.auth.load_certificate(server_secret_file) server.curve_secretkey = server_secret server.curve_publickey = server_public server.curve_server = True # must come before bind server.bind('tcp://*:9000') client = ctx.socket(zmq.PULL) # We need two certificates, one for the client and one for # the server. The client must know the server's public key # to make a CURVE connection. client_secret_file = os.path.join(secret_keys_dir, "client.key_secret") client_public, client_secret = zmq.auth.load_certificate(client_secret_file) client.curve_secretkey = client_secret client.curve_publickey = client_public # The client must know the server's public key to make a CURVE connection. server_public_file = os.path.join(public_keys_dir, "server.key") server_public, _ = zmq.auth.load_certificate(server_public_file) client.curve_serverkey = server_public client.connect('tcp://127.0.0.1:9000') server.send(b"Hello") if client.poll(1000): msg = client.recv() if msg == b"Hello": logging.info("Stonehouse test OK") else: logging.error("Stonehouse test FAIL") # stop auth thread auth.stop() if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) if '-v' in sys.argv: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") run() pyzmq-16.0.2/examples/security/strawhouse.py000066400000000000000000000042111301503633700212220ustar00rootroot00000000000000#!/usr/bin/env python ''' Allow or deny clients based on IP address. Strawhouse, which is plain text with filtering on IP addresses. It still uses the NULL mechanism, but we install an authentication hook that checks the IP address against a whitelist or blacklist and allows or denies it accordingly. Author: Chris Laws ''' import logging import sys import zmq import zmq.auth from zmq.auth.thread import ThreadAuthenticator def run(): '''Run strawhouse client''' allow_test_pass = False deny_test_pass = False ctx = zmq.Context.instance() # Start an authenticator for this context. auth = ThreadAuthenticator(ctx) auth.start() # Part 1 - demonstrate allowing clients based on IP address auth.allow('127.0.0.1') server = ctx.socket(zmq.PUSH) server.zap_domain = b'global' # must come before bind server.bind('tcp://*:9000') client_allow = ctx.socket(zmq.PULL) client_allow.connect('tcp://127.0.0.1:9000') server.send(b"Hello") msg = client_allow.recv() if msg == b"Hello": allow_test_pass = True client_allow.close() # Part 2 - demonstrate denying clients based on IP address auth.stop() auth = ThreadAuthenticator(ctx) auth.start() auth.deny('127.0.0.1') client_deny = ctx.socket(zmq.PULL) client_deny.connect('tcp://127.0.0.1:9000') if server.poll(50, zmq.POLLOUT): server.send(b"Hello") if client_deny.poll(50): msg = client_deny.recv() else: deny_test_pass = True else: deny_test_pass = True client_deny.close() auth.stop() # stop auth thread if allow_test_pass and deny_test_pass: logging.info("Strawhouse test OK") else: logging.error("Strawhouse test FAIL") if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) if '-v' in sys.argv: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") run() pyzmq-16.0.2/examples/security/woodhouse.py000066400000000000000000000043651301503633700210440ustar00rootroot00000000000000#!/usr/bin/env python ''' Woodhouse extends Strawhouse with a name and password check. This uses the PLAIN mechanism which does plain-text username and password authentication). It's not really secure, and anyone sniffing the network (trivial with WiFi) can capture passwords and then login. Author: Chris Laws ''' import logging import sys import zmq import zmq.auth from zmq.auth.thread import ThreadAuthenticator def run(): '''Run woodhouse example''' valid_client_test_pass = False invalid_client_test_pass = False ctx = zmq.Context.instance() # Start an authenticator for this context. auth = ThreadAuthenticator(ctx) auth.start() auth.allow('127.0.0.1') # Instruct authenticator to handle PLAIN requests auth.configure_plain(domain='*', passwords={'admin': 'secret'}) server = ctx.socket(zmq.PUSH) server.plain_server = True # must come before bind server.bind('tcp://*:9000') client = ctx.socket(zmq.PULL) client.plain_username = b'admin' client.plain_password = b'secret' client.connect('tcp://127.0.0.1:9000') server.send(b"Hello") if client.poll(): msg = client.recv() if msg == b"Hello": valid_client_test_pass = True client.close() # now use invalid credentials - expect no msg received client2 = ctx.socket(zmq.PULL) client2.plain_username = b'admin' client2.plain_password = b'bogus' client2.connect('tcp://127.0.0.1:9000') server.send(b"World") if client2.poll(50): msg = client.recv() if msg == "World": invalid_client_test_pass = False else: # no message is expected invalid_client_test_pass = True # stop auth thread auth.stop() if valid_client_test_pass and invalid_client_test_pass: logging.info("Woodhouse test OK") else: logging.error("Woodhouse test FAIL") if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) if '-v' in sys.argv: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") run() pyzmq-16.0.2/examples/serialization/000077500000000000000000000000001301503633700174545ustar00rootroot00000000000000pyzmq-16.0.2/examples/serialization/serialsocket.py000066400000000000000000000044041301503633700225200ustar00rootroot00000000000000"""A Socket subclass that adds some serialization methods.""" import zlib import pickle import numpy import zmq class SerializingSocket(zmq.Socket): """A class with some extra serialization methods send_zipped_pickle is just like send_pyobj, but uses zlib to compress the stream before sending. send_array sends numpy arrays with metadata necessary for reconstructing the array on the other side (dtype,shape). """ def send_zipped_pickle(self, obj, flags=0, protocol=-1): """pack and compress an object with pickle and zlib.""" pobj = pickle.dumps(obj, protocol) zobj = zlib.compress(pobj) print('zipped pickle is %i bytes' % len(zobj)) return self.send(zobj, flags=flags) def recv_zipped_pickle(self, flags=0): """reconstruct a Python object sent with zipped_pickle""" zobj = self.recv(flags) pobj = zlib.decompress(zobj) return pickle.loads(pobj) def send_array(self, A, flags=0, copy=True, track=False): """send a numpy array with metadata""" md = dict( dtype = str(A.dtype), shape = A.shape, ) self.send_json(md, flags|zmq.SNDMORE) return self.send(A, flags, copy=copy, track=track) def recv_array(self, flags=0, copy=True, track=False): """recv a numpy array""" md = self.recv_json(flags=flags) msg = self.recv(flags=flags, copy=copy, track=track) A = numpy.frombuffer(msg, dtype=md['dtype']) return A.reshape(md['shape']) class SerializingContext(zmq.Context): _socket_class = SerializingSocket def main(): ctx = SerializingContext() req = ctx.socket(zmq.REQ) rep = ctx.socket(zmq.REP) rep.bind('inproc://a') req.connect('inproc://a') A = numpy.ones((1024,1024)) print ("Array is %i bytes" % (A.nbytes)) # send/recv with pickle+zip req.send_zipped_pickle(A) B = rep.recv_zipped_pickle() # now try non-copying version rep.send_array(A, copy=False) C = req.recv_array(copy=False) print ("Checking zipped pickle...") print ("Okay" if (A==B).all() else "Failed") print ("Checking send_array...") print ("Okay" if (C==B).all() else "Failed") if __name__ == '__main__': main() pyzmq-16.0.2/examples/win32-interrupt/000077500000000000000000000000001301503633700175735ustar00rootroot00000000000000pyzmq-16.0.2/examples/win32-interrupt/display.py000066400000000000000000000024111301503633700216100ustar00rootroot00000000000000"""The display part of a simply two process chat app.""" # This file has been placed in the public domain. import zmq from zmq.utils.win32 import allow_interrupt def main(addrs): context = zmq.Context() control = context.socket(zmq.PUB) control.bind('inproc://control') updates = context.socket(zmq.SUB) updates.setsockopt(zmq.SUBSCRIBE, "") updates.connect('inproc://control') for addr in addrs: print "Connecting to: ", addr updates.connect(addr) def interrupt_polling(): """Fix CTRL-C on Windows using "self pipe trick".""" control.send_multipart(['', 'quit']) with allow_interrupt(interrupt_polling): message = '' while message != 'quit': message = updates.recv_multipart() if len(message) < 2: print 'Invalid message.' continue account = message[0] message = ' '.join(message[1:]) if message == 'quit': print 'Killed by "%s".' % account break print '%s: %s' % (account, message) if __name__ == '__main__': import sys if len(sys.argv) < 2: print "usage: display.py
    [,
    ...]" raise SystemExit main(sys.argv[1:]) pyzmq-16.0.2/examples/win32-interrupt/prompt.py000066400000000000000000000023131301503633700214650ustar00rootroot00000000000000"""The prompt part of a simply two process chat app.""" # # Copyright (c) 2010 Andrew Gwozdziewycz # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . import zmq def main(addr, account): ctx = zmq.Context() socket = ctx.socket(zmq.PUB) socket.bind(addr) while True: message = raw_input("%s> " % account) socket.send_multipart((account, message)) if __name__ == '__main__': import sys if len(sys.argv) != 3: print "usage: prompt.py
    " raise SystemExit main(sys.argv[1], sys.argv[2]) pyzmq-16.0.2/perf/000077500000000000000000000000001301503633700137155ustar00rootroot00000000000000pyzmq-16.0.2/perf/perf.py000066400000000000000000000122371301503633700152300ustar00rootroot00000000000000#!/usr/bin/env python # coding: utf-8 # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. # # Some original test code Copyright (c) 2007-2010 iMatix Corporation, # Used under LGPLv3 import argparse from multiprocessing import Process import time try: now = time.monotonic except AttributeError: now = time.time import zmq def parse_args(argv=None): parser = argparse.ArgumentParser(description='Run a zmq performance test') parser.add_argument('-p', '--poll', action='store_true', help='use a zmq Poller instead of raw send/recv') parser.add_argument('-c', '--copy', action='store_true', help='copy messages instead of using zero-copy') parser.add_argument('-s', '--size', type=int, default=10240, help='size (in bytes) of the test message') parser.add_argument('-n', '--count', type=int, default=10240, help='number of test messages to send') parser.add_argument('--url', dest='url', type=str, default='tcp://127.0.0.1:5555', help='the zmq URL on which to run the test') parser.add_argument(dest='test', type=str, default='lat', choices=['lat', 'thr'], help='which test to run') return parser.parse_args(argv) def latency_echo(url, count, poll, copy): """echo messages on a REP socket Should be started before `latency` """ ctx = zmq.Context() s = ctx.socket(zmq.REP) if poll: p = zmq.Poller() p.register(s) s.bind(url) block = zmq.NOBLOCK if poll else 0 for i in range(count): if poll: res = p.poll() msg = s.recv(block, copy=copy) if poll: res = p.poll() s.send(msg, block, copy=copy) msg = s.recv() assert msg == b'done' s.close() ctx.term() def latency(url, count, size, poll, copy): """Perform a latency test""" ctx = zmq.Context() s = ctx.socket(zmq.REQ) s.setsockopt(zmq.LINGER, -1) s.connect(url) if poll: p = zmq.Poller() p.register(s) msg = b' ' * size block = zmq.NOBLOCK if poll else 0 time.sleep(1) start = now() for i in range (0, count): if poll: res = p.poll() assert(res[0][1] & zmq.POLLOUT) s.send(msg, block, copy=copy) if poll: res = p.poll() assert(res[0][1] & zmq.POLLIN) msg = s.recv(block, copy=copy) assert len(msg) == size elapsed = now() - start s.send(b'done') latency = 1e6 * elapsed / (count * 2.) print ("message size : %8i [B]" % (size, )) print ("roundtrip count: %8i [msgs]" % (count, )) print ("mean latency : %12.3f [µs]" % (latency, )) print ("test time : %12.3f [s]" % (elapsed, )) def pusher(url, count, size, poll, copy): """send a bunch of messages on a PUSH socket""" ctx = zmq.Context() s = ctx.socket(zmq.PUSH) # Add your socket options here. # For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM. if poll: p = zmq.Poller() p.register(s) s.connect(url) msg = zmq.Message(b' ' * size) block = zmq.NOBLOCK if poll else 0 for i in range(count): if poll: res = p.poll() assert(res[0][1] & zmq.POLLOUT) s.send(msg, block, copy=copy) s.close() ctx.term() def throughput(url, count, size, poll, copy): """recv a bunch of messages on a PULL socket Should be started before `pusher` """ ctx = zmq.Context() s = ctx.socket(zmq.PULL) # Add your socket options here. # For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM. if poll: p = zmq.Poller() p.register(s) s.bind(url) block = zmq.NOBLOCK if poll else 0 # Wait for the other side to connect. msg = s.recv() assert len (msg) == size start = now() for i in range (count-1): if poll: res = p.poll() msg = s.recv(block, copy=copy) elapsed = now() - start throughput = (float(count)) / float(elapsed) megabits = float(throughput * size * 8) / 1e6 print ("message size : %8i [B]" % (size, )) print ("message count : %8i [msgs]" % (count, )) print ("mean throughput: %8.0f [msg/s]" % (throughput, )) print ("mean throughput: %12.3f [Mb/s]" % (megabits, )) print ("test time : %12.3f [s]" % (elapsed, )) def main(): args = parse_args() tic = time.time() if args.test == 'lat': bg = Process(target=latency_echo, args=(args.url, args.count, args.poll, args.copy)) bg.start() latency(args.url, args.count, args.size, args.poll, args.copy) elif args.test == 'thr': bg = Process(target=throughput, args=(args.url, args.count, args.size, args.poll, args.copy)) bg.start() pusher(args.url, args.count, args.size, args.poll, args.copy) bg.join() toc = time.time() if (toc - tic) < 3: print ("For best results, tests should take at least a few seconds.") if __name__ == '__main__': main() pyzmq-16.0.2/setup.cfg.android000066400000000000000000000006671301503633700162320ustar00rootroot00000000000000[global] # the prefix with which libzmq was configured / installed zmq_prefix = /tmp/zeromq-android have_sys_un_h = False [build_ext] libraries = python2.6 # path to your python-for-android # the result of: # wget http://python-for-android.googlecode.com/files/python-lib_r16.zip # unzip python-lib_r16.zip -dpython-lib library_dirs = ../python-lib/lib/ include_dirs = ../python-lib/include/python2.6 [bdist_egg] plat-name = linux-armv pyzmq-16.0.2/setup.cfg.template000066400000000000000000000012161301503633700164140ustar00rootroot00000000000000[global] # zmq_prefix = /usr/local # (adds zmq_prefix/include to include_dirs and zmq_prefix/lib to library_dirs) # have_sys_un_h = False # does sys/un.h exist? pyzmq will try to detect it, but you can override # skip_check_zmq = True # skip checking zmq verson (if it doesn't work for some reason) # libzmq_extension = True # force building libzmq as an extension (same as --zmq=bundled) # no_libzmq_extension = True # prevent fallback on building libzmq as an extension if regular build fails [build_ext] # Edit these to add any paths you need to include (e.g. where libzmq is defined) library_dirs = /usr/local/lib include_dirs = /usr/local/include pyzmq-16.0.2/setup.py000077500000000000000000001324041301503633700145020ustar00rootroot00000000000000#!/usr/bin/env python #----------------------------------------------------------------------------- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. # # The `configure` subcommand is copied and adaped from h5py # h5py source used under the New BSD license # # h5py: # # The code to bundle libzmq as an Extension is from pyzmq-static # pyzmq-static source used under the New BSD license # # pyzmq-static: #----------------------------------------------------------------------------- from __future__ import with_statement, print_function import copy import os import shutil import subprocess import sys import time import errno import platform from traceback import print_exc # whether any kind of bdist is happening # do this before importing anything from distutils doing_bdist = any(arg.startswith('bdist') for arg in sys.argv[1:]) if any(bdist in sys.argv for bdist in ['bdist_wheel', 'bdist_egg']): import setuptools import distutils from distutils.core import setup, Command from distutils.ccompiler import get_default_compiler from distutils.ccompiler import new_compiler from distutils.extension import Extension from distutils.command.build_ext import build_ext from distutils.command.sdist import sdist from distutils.version import LooseVersion as V from glob import glob from os.path import splitext, basename, join as pjoin from subprocess import Popen, PIPE, check_call, CalledProcessError # local script imports: from buildutils import ( discover_settings, v_str, save_config, detect_zmq, merge, config_from_prefix, info, warn, fatal, debug, line, copy_and_patch_libzmq, localpath, fetch_libzmq, stage_platform_hpp, bundled_version, customize_mingw, compile_and_run, patch_lib_paths, ) # name of the libzmq library - can be changed by --libzmq libzmq_name = 'libzmq' #----------------------------------------------------------------------------- # Flags #----------------------------------------------------------------------------- pypy = 'PyPy' in sys.version # reference points for zmq compatibility min_legacy_zmq = (2,1,4) min_good_zmq = (3,2) target_zmq = bundled_version dev_zmq = (target_zmq[0], target_zmq[1] + 1, 0) # set dylib ext: if sys.platform.startswith('win'): lib_ext = '.dll' elif sys.platform == 'darwin': lib_ext = '.dylib' else: lib_ext = '.so' # allow `--zmq=foo` to be passed at any point, # but always assign it to configure configure_idx = -1 fetch_idx = -1 for idx, arg in enumerate(list(sys.argv)): # track index of configure and fetch_libzmq if arg == 'configure': configure_idx = idx elif arg == 'fetch_libzmq': fetch_idx = idx if arg.startswith('--zmq='): sys.argv.pop(idx) if configure_idx < 0: if fetch_idx < 0: configure_idx = 1 else: configure_idx = fetch_idx + 1 sys.argv.insert(configure_idx, 'configure') sys.argv.insert(configure_idx + 1, arg) break for idx, arg in enumerate(list(sys.argv)): if arg.startswith('--libzmq='): sys.argv.pop(idx) libzmq_name = arg.split("=",1)[1] break #----------------------------------------------------------------------------- # Configuration (adapted from h5py: http://h5py.googlecode.com) #----------------------------------------------------------------------------- # --- compiler settings ------------------------------------------------- def bundled_settings(debug): """settings for linking extensions against bundled libzmq""" settings = {} settings['libraries'] = [] settings['library_dirs'] = [] settings['include_dirs'] = [pjoin("bundled", "zeromq", "include")] settings['runtime_library_dirs'] = [] # add pthread on freebsd # is this necessary? if sys.platform.startswith('freebsd'): settings['libraries'].append('pthread') elif sys.platform.startswith('win'): # link against libzmq in build dir: plat = distutils.util.get_platform() temp = 'temp.%s-%i.%i' % (plat, sys.version_info[0], sys.version_info[1]) suffix = '' if sys.version_info >= (3,5): # Python 3.5 adds EXT_SUFFIX to libs ext_suffix = distutils.sysconfig.get_config_var('EXT_SUFFIX') suffix = os.path.splitext(ext_suffix)[0] if debug: suffix = '_d' + suffix release = 'Debug' else: release = 'Release' settings['libraries'].append(libzmq_name + suffix) settings['library_dirs'].append(pjoin('build', temp, release, 'buildutils')) return settings def check_pkgconfig(): """ pull compile / link flags from pkg-config if present. """ pcfg = None zmq_config = None try: check_call(['pkg-config', '--exists', 'libzmq']) # this would arguably be better with --variable=libdir / # --variable=includedir, but would require multiple calls pcfg = Popen(['pkg-config', '--libs', '--cflags', 'libzmq'], stdout=subprocess.PIPE) except OSError as osexception: if osexception.errno == errno.ENOENT: info('pkg-config not found') else: warn("Running pkg-config failed - %s." % osexception) except CalledProcessError: info("Did not find libzmq via pkg-config.") if pcfg is not None: output, _ = pcfg.communicate() output = output.decode('utf8', 'replace') bits = output.strip().split() zmq_config = {'library_dirs':[], 'include_dirs':[], 'libraries':[]} for tok in bits: if tok.startswith("-L"): zmq_config['library_dirs'].append(tok[2:]) if tok.startswith("-I"): zmq_config['include_dirs'].append(tok[2:]) if tok.startswith("-l"): zmq_config['libraries'].append(tok[2:]) info("Settings obtained from pkg-config: %r" % zmq_config) return zmq_config def settings_from_prefix(prefix=None, bundle_libzmq_dylib=False): """load appropriate library/include settings from ZMQ prefix""" settings = {} settings['libraries'] = [] settings['include_dirs'] = [] settings['library_dirs'] = [] settings['runtime_library_dirs'] = [] settings['extra_link_args'] = [] if sys.platform.startswith('win'): settings['libraries'].append(libzmq_name) if prefix: settings['include_dirs'] += [pjoin(prefix, 'include')] settings['library_dirs'] += [pjoin(prefix, 'lib')] else: # add pthread on freebsd if sys.platform.startswith('freebsd'): settings['libraries'].append('pthread') if sys.platform.startswith('sunos'): if platform.architecture()[0] == '32bit': settings['extra_link_args'] += ['-m32'] else: settings['extra_link_args'] += ['-m64'] if prefix: settings['libraries'].append('zmq') settings['include_dirs'] += [pjoin(prefix, 'include')] if not bundle_libzmq_dylib: if sys.platform.startswith('sunos') and platform.architecture()[0] == '64bit': settings['library_dirs'] += [pjoin(prefix, 'lib/amd64')] settings['library_dirs'] += [pjoin(prefix, 'lib')] else: # If prefix is not explicitly set, pull it from pkg-config by default. # this is probably applicable across platforms, but i don't have # sufficient test environments to confirm pkgcfginfo = check_pkgconfig() if pkgcfginfo is not None: # we can get all the zmq-specific values from pkgconfg for key, value in pkgcfginfo.items(): settings[key].extend(value) else: settings['libraries'].append('zmq') if sys.platform == 'darwin' and os.path.isdir('/opt/local/lib'): # allow macports default settings['include_dirs'] += ['/opt/local/include'] settings['library_dirs'] += ['/opt/local/lib'] if os.environ.get('VIRTUAL_ENV', None): # find libzmq installed in virtualenv env = os.environ['VIRTUAL_ENV'] settings['include_dirs'] += [pjoin(env, 'include')] settings['library_dirs'] += [pjoin(env, 'lib')] if bundle_libzmq_dylib: # bdist should link against bundled libzmq settings['library_dirs'].append('zmq') if sys.platform == 'darwin': pass # unused rpath args for OS X: # settings['extra_link_args'] = ['-Wl,-rpath','-Wl,$ORIGIN/..'] else: settings['runtime_library_dirs'] += ['$ORIGIN/..'] elif sys.platform != 'darwin': info("%r" % settings) settings['runtime_library_dirs'] += [ os.path.abspath(x) for x in settings['library_dirs'] ] return settings class LibZMQVersionError(Exception): pass #----------------------------------------------------------------------------- # Extra commands #----------------------------------------------------------------------------- class Configure(build_ext): """Configure command adapted from h5py""" description = "Discover ZMQ version and features" user_options = build_ext.user_options + [ ('zmq=', None, "libzmq install prefix"), ('build-base=', 'b', "base directory for build library"), # build_base from build ] def initialize_options(self): build_ext.initialize_options(self) self.zmq = None self.build_base = 'build' # DON'T REMOVE: distutils demands these be here even if they do nothing. def finalize_options(self): build_ext.finalize_options(self) self.tempdir = pjoin(self.build_temp, 'scratch') self.has_run = False self.config = discover_settings(self.build_base) if self.zmq is not None: merge(self.config, config_from_prefix(self.zmq)) self.init_settings_from_config() def save_config(self, name, cfg): """write config to JSON""" save_config(name, cfg, self.build_base) # write to zmq.utils.[name].json save_config(name, cfg, os.path.join('zmq', 'utils')) # also write to build_lib, because we might be run after copying to # build_lib has already happened. build_lib_utils = os.path.join(self.build_lib, 'zmq', 'utils') if os.path.exists(build_lib_utils): save_config(name, cfg, build_lib_utils) def init_settings_from_config(self): """set up compiler settings, based on config""" cfg = self.config if sys.platform == 'win32' and cfg.get('bundle_msvcp') is None: # default bundle_msvcp=True on: # Windows Python 3.5 bdist *without* DISTUTILS_USE_SDK if os.environ.get("PYZMQ_BUNDLE_CRT") or ( sys.version_info >= (3,5) and self.compiler_type == 'msvc' and not os.environ.get('DISTUTILS_USE_SDK') and doing_bdist ): cfg['bundle_msvcp'] = True if cfg['libzmq_extension']: settings = bundled_settings(self.debug) else: settings = settings_from_prefix(cfg['zmq_prefix'], self.bundle_libzmq_dylib) if 'have_sys_un_h' not in cfg: # don't link against anything when checking for sys/un.h minus_zmq = copy.deepcopy(settings) try: minus_zmq['libraries'] = [] except Exception: pass try: compile_and_run(self.tempdir, pjoin('buildutils', 'check_sys_un.c'), **minus_zmq ) except Exception as e: warn("No sys/un.h, IPC_PATH_MAX_LEN will be undefined: %s" % e) cfg['have_sys_un_h'] = False else: cfg['have_sys_un_h'] = True self.save_config('config', cfg) if cfg['have_sys_un_h']: settings['define_macros'] = [('HAVE_SYS_UN_H', 1)] settings.setdefault('define_macros', []) # include internal directories settings.setdefault('include_dirs', []) settings['include_dirs'] += [pjoin('zmq', sub) for sub in ( 'utils', pjoin('backend', 'cython'), 'devices', )] if sys.platform.startswith('win') and sys.version_info < (3, 3): settings['include_dirs'].insert(0, pjoin('buildutils', 'include_win32')) for ext in self.distribution.ext_modules: if ext.name.startswith('zmq.lib'): continue for attr, value in settings.items(): setattr(ext, attr, value) self.compiler_settings = settings self.save_config('compiler', settings) def create_tempdir(self): self.erase_tempdir() os.makedirs(self.tempdir) if sys.platform.startswith('win'): # fetch libzmq.dll into local dir local_dll = pjoin(self.tempdir, libzmq_name + '.dll') if not self.config['zmq_prefix'] and not os.path.exists(local_dll): fatal("ZMQ directory must be specified on Windows via setup.cfg" " or 'python setup.py configure --zmq=/path/to/zeromq2'") try: shutil.copy(pjoin(self.config['zmq_prefix'], 'lib', libzmq_name + '.dll'), local_dll) except Exception: if not os.path.exists(local_dll): warn("Could not copy " + libzmq_name + " into zmq/, which is usually necessary on Windows." "Please specify zmq prefix via configure --zmq=/path/to/zmq or copy " + libzmq_name + " into zmq/ manually.") def erase_tempdir(self): try: shutil.rmtree(self.tempdir) except Exception: pass @property def compiler_type(self): compiler = self.compiler if compiler is None: return get_default_compiler() elif isinstance(compiler, str): return compiler else: return compiler.compiler_type @property def cross_compiling(self): return self.config['bdist_egg'].get('plat-name', sys.platform) != sys.platform @property def bundle_libzmq_dylib(self): """ bundle_libzmq_dylib flag for whether external libzmq library will be included in pyzmq: only relevant when not building libzmq extension """ if 'bundle_libzmq_dylib' in self.config: return self.config['bundle_libzmq_dylib'] elif (sys.platform.startswith('win') or self.cross_compiling) \ and not self.config['libzmq_extension']: # always bundle libzmq on Windows and cross-compilation return True else: return False def check_zmq_version(self): """check the zmq version""" cfg = self.config # build test program zmq_prefix = cfg['zmq_prefix'] detected = self.test_build(zmq_prefix, self.compiler_settings) # now check the libzmq version vers = tuple(detected['vers']) vs = v_str(vers) if cfg['allow_legacy_libzmq']: min_zmq = min_legacy_zmq else: min_zmq = min_good_zmq if vers < min_zmq: msg = [ "Detected ZMQ version: %s, but require ZMQ >= %s" % (vs, v_str(min_zmq)), ] if zmq_prefix: msg.append(" ZMQ_PREFIX=%s" % zmq_prefix) if vers >= min_legacy_zmq: msg.append(" Explicitly allow legacy zmq by specifying `--zmq=/zmq/prefix`") raise LibZMQVersionError('\n'.join(msg)) if vers < min_good_zmq: msg = [ "Detected legacy ZMQ version: %s. It is STRONGLY recommended to use ZMQ >= %s" % (vs, v_str(min_good_zmq)), ] if zmq_prefix: msg.append(" ZMQ_PREFIX=%s" % zmq_prefix) warn('\n'.join(msg)) elif vers < target_zmq: warn("Detected ZMQ version: %s, but pyzmq targets ZMQ %s." % ( vs, v_str(target_zmq)) ) warn("libzmq features and fixes introduced after %s will be unavailable." % vs) line() elif vers >= dev_zmq: warn("Detected ZMQ version: %s. Some new features in libzmq may not be exposed by pyzmq." % vs) line() if sys.platform.startswith('win'): # fetch libzmq.dll into local dir local_dll = localpath('zmq', libzmq_name + '.dll') if not zmq_prefix and not os.path.exists(local_dll): fatal("ZMQ directory must be specified on Windows via setup.cfg or 'python setup.py configure --zmq=/path/to/zeromq2'") try: shutil.copy(pjoin(zmq_prefix, 'lib', libzmq_name + '.dll'), local_dll) except Exception: if not os.path.exists(local_dll): warn("Could not copy " + libzmq_name + " into zmq/, which is usually necessary on Windows." "Please specify zmq prefix via configure --zmq=/path/to/zmq or copy " + libzmq_name + " into zmq/ manually.") def bundle_libzmq_extension(self): bundledir = "bundled" ext_modules = self.distribution.ext_modules if ext_modules and any(m.name == 'zmq.libzmq' for m in ext_modules): # I've already been run return line() info("Using bundled libzmq") # fetch sources for libzmq extension: if not os.path.exists(bundledir): os.makedirs(bundledir) fetch_libzmq(bundledir) stage_platform_hpp(pjoin(bundledir, 'zeromq')) sources = [pjoin('buildutils', 'initlibzmq.c')] sources += glob(pjoin(bundledir, 'zeromq', 'src', '*.cpp')) includes = [ pjoin(bundledir, 'zeromq', 'include') ] if bundled_version < (4, 2, 0): tweetnacl = pjoin(bundledir, 'zeromq', 'tweetnacl') tweetnacl_sources = glob(pjoin(tweetnacl, 'src', '*.c')) randombytes = pjoin(tweetnacl, 'contrib', 'randombytes') if sys.platform.startswith('win'): tweetnacl_sources.append(pjoin(randombytes, 'winrandom.c')) else: tweetnacl_sources.append(pjoin(randombytes, 'devurandom.c')) sources += tweetnacl_sources includes.append(pjoin(tweetnacl, 'src')) includes.append(randombytes) else: # >= 4.2 sources += glob(pjoin(bundledir, 'zeromq', 'src', 'tweetnacl.c')) # construct the Extensions: libzmq = Extension( 'zmq.libzmq', sources=sources, include_dirs=includes, ) # register the extension: self.distribution.ext_modules.insert(0, libzmq) # use tweetnacl to provide CURVE support libzmq.define_macros.append(('ZMQ_HAVE_CURVE', 1)) libzmq.define_macros.append(('ZMQ_USE_TWEETNACL', 1)) # select polling subsystem based on platform if sys.platform == 'darwin' or 'bsd' in sys.platform: libzmq.define_macros.append(('ZMQ_USE_KQUEUE', 1)) elif 'linux' in sys.platform: libzmq.define_macros.append(('ZMQ_USE_EPOLL', 1)) elif sys.platform.startswith('win'): libzmq.define_macros.append(('ZMQ_USE_SELECT', 1)) else: # this may not be sufficiently precise libzmq.define_macros.append(('ZMQ_USE_POLL', 1)) if sys.platform.startswith('win'): # include defines from zeromq msvc project: libzmq.define_macros.append(('FD_SETSIZE', 16384)) libzmq.define_macros.append(('DLL_EXPORT', 1)) libzmq.define_macros.append(('_CRT_SECURE_NO_WARNINGS', 1)) # When compiling the C++ code inside of libzmq itself, we want to # avoid "warning C4530: C++ exception handler used, but unwind # semantics are not enabled. Specify /EHsc". if self.compiler_type == 'msvc': libzmq.extra_compile_args.append('/EHsc') elif self.compiler_type == 'mingw32': libzmq.define_macros.append(('ZMQ_HAVE_MINGW32', 1)) # And things like sockets come from libraries that must be named. libzmq.libraries.extend(['rpcrt4', 'ws2_32', 'advapi32']) # bundle MSCVP redist if self.config['bundle_msvcp']: cc = new_compiler(compiler=self.compiler_type) cc.initialize() # get vc_redist location via private API try: cc._vcruntime_redist except AttributeError: # fatal error if env set, warn otherwise msg = fatal if os.environ.get("PYZMQ_BUNDLE_CRT") else warn msg("Failed to get cc._vcruntime via private API, not bundling CRT") if cc._vcruntime_redist: redist_dir, dll = os.path.split(cc._vcruntime_redist) to_bundle = [ pjoin(redist_dir, dll.replace('vcruntime', name)) for name in ('msvcp', 'concrt') ] for src in to_bundle: dest = localpath('zmq', basename(src)) info("Copying %s -> %s" % (src, dest)) # copyfile to avoid permission issues shutil.copyfile(src, dest) else: libzmq.include_dirs.append(bundledir) # check if we need to link against Realtime Extensions library cc = new_compiler(compiler=self.compiler_type) cc.output_dir = self.build_temp if not sys.platform.startswith(('darwin', 'freebsd')): line() info("checking for timer_create") if not cc.has_function('timer_create'): info("no timer_create, linking librt") libzmq.libraries.append('rt') else: info("ok") if pypy: # seem to need explicit libstdc++ on linux + pypy # not sure why libzmq.libraries.append("stdc++") # copy the header files to the source tree. bundledincludedir = pjoin('zmq', 'include') if not os.path.exists(bundledincludedir): os.makedirs(bundledincludedir) if not os.path.exists(pjoin(self.build_lib, bundledincludedir)): os.makedirs(pjoin(self.build_lib, bundledincludedir)) for header in glob(pjoin(bundledir, 'zeromq', 'include', '*.h')): shutil.copyfile(header, pjoin(bundledincludedir, basename(header))) shutil.copyfile(header, pjoin(self.build_lib, bundledincludedir, basename(header))) # update other extensions, with bundled settings self.config['libzmq_extension'] = True self.init_settings_from_config() self.save_config('config', self.config) def fallback_on_bundled(self): """Couldn't build, fallback after waiting a while""" line() warn('\n'.join([ "Couldn't find an acceptable libzmq on the system.", "", "If you expected pyzmq to link against an installed libzmq, please check to make sure:", "", " * You have a C compiler installed", " * A development version of Python is installed (including headers)", " * A development version of ZMQ >= %s is installed (including headers)" % v_str(min_good_zmq), " * If ZMQ is not in a default location, supply the argument --zmq=", " * If you did recently install ZMQ to a default location,", " try rebuilding the ld cache with `sudo ldconfig`", " or specify zmq's location with `--zmq=/usr/local`", "", ])) info('\n'.join([ "You can skip all this detection/waiting nonsense if you know", "you want pyzmq to bundle libzmq as an extension by passing:", "", " `--zmq=bundled`", "", "I will now try to build libzmq as a Python extension", "unless you interrupt me (^C) in the next 10 seconds...", "", ])) for i in range(10,0,-1): sys.stdout.write('\r%2i...' % i) sys.stdout.flush() time.sleep(1) info("") return self.bundle_libzmq_extension() def test_build(self, prefix, settings): """do a test build ob libzmq""" self.create_tempdir() settings = settings.copy() if self.bundle_libzmq_dylib and not sys.platform.startswith('win'): # rpath slightly differently here, because libzmq not in .. but ../zmq: settings['library_dirs'] = ['zmq'] if sys.platform == 'darwin': pass # unused rpath args for OS X: # settings['extra_link_args'] = ['-Wl,-rpath','-Wl,$ORIGIN/../zmq'] else: settings['runtime_library_dirs'] = [ os.path.abspath(pjoin('.', 'zmq')) ] line() info("Configure: Autodetecting ZMQ settings...") info(" Custom ZMQ dir: %s" % prefix) try: detected = detect_zmq(self.tempdir, compiler=self.compiler_type, **settings) finally: self.erase_tempdir() info(" ZMQ version detected: %s" % v_str(detected['vers'])) return detected def finish_run(self): self.save_config('config', self.config) line() def run(self): cfg = self.config if cfg['libzmq_extension']: self.bundle_libzmq_extension() self.finish_run() return # When cross-compiling and zmq is given explicitly, we can't testbuild # (as we can't testrun the binary), we assume things are alright. if cfg['skip_check_zmq'] or self.cross_compiling: warn("Skipping zmq version check") self.finish_run() return zmq_prefix = cfg['zmq_prefix'] # There is no available default on Windows, so start with fallback unless # zmq was given explicitly, or libzmq extension was explicitly prohibited. if sys.platform.startswith("win") and \ not cfg['no_libzmq_extension'] and \ not zmq_prefix: self.fallback_on_bundled() self.finish_run() return if zmq_prefix and self.bundle_libzmq_dylib and not sys.platform.startswith('win'): copy_and_patch_libzmq(zmq_prefix, libzmq_name+lib_ext) # first try with given config or defaults try: self.check_zmq_version() except LibZMQVersionError as e: info("\nBad libzmq version: %s\n" % e) except Exception as e: # print the error as distutils would if we let it raise: info("\nerror: %s\n" % e) else: self.finish_run() return # try fallback on /usr/local on *ix if no prefix is given if not zmq_prefix and not sys.platform.startswith('win'): info("Failed with default libzmq, trying again with /usr/local") time.sleep(1) zmq_prefix = cfg['zmq_prefix'] = '/usr/local' self.init_settings_from_config() try: self.check_zmq_version() except LibZMQVersionError as e: info("\nBad libzmq version: %s\n" % e) except Exception as e: # print the error as distutils would if we let it raise: info("\nerror: %s\n" % e) else: # if we get here the second run succeeded, so we need to update compiler # settings for the extensions with /usr/local prefix self.finish_run() return # finally, fallback on bundled if cfg['no_libzmq_extension']: fatal("Falling back on bundled libzmq," " but config has explicitly prohibited building the libzmq extension." ) self.fallback_on_bundled() self.finish_run() class FetchCommand(Command): """Fetch libzmq sources, that's it.""" description = "Fetch libzmq sources into bundled/zeromq" user_options = [ ] def initialize_options(self): pass def finalize_options(self): pass def run(self): # fetch sources for libzmq extension: bundledir = "bundled" if os.path.exists(bundledir): info("Scrubbing directory: %s" % bundledir) shutil.rmtree(bundledir) if not os.path.exists(bundledir): os.makedirs(bundledir) fetch_libzmq(bundledir) for tarball in glob(pjoin(bundledir, '*.tar.gz')): os.remove(tarball) class TestCommand(Command): """Custom distutils command to run the test suite.""" description = "Test PyZMQ (must have been built inplace: `setup.py build_ext --inplace`)" user_options = [ ] def initialize_options(self): self._dir = os.getcwd() def finalize_options(self): pass def run(self): """Run the test suite with py.test""" # crude check for inplace build: try: import zmq except ImportError: print_exc() fatal('\n '.join(["Could not import zmq!", "You must build pyzmq with 'python setup.py build_ext --inplace' for 'python setup.py test' to work.", "If you did build pyzmq in-place, then this is a real error."])) sys.exit(1) info("Testing pyzmq-%s with libzmq-%s" % (zmq.pyzmq_version(), zmq.zmq_version())) p = Popen([sys.executable, '-m', 'pytest', '-v', os.path.join('zmq', 'tests')]) p.wait() sys.exit(p.returncode) class GitRevisionCommand(Command): """find the current git revision and add it to zmq.sugar.version.__revision__""" description = "Store current git revision in version.py" user_options = [ ] def initialize_options(self): self.version_py = pjoin('zmq','sugar','version.py') def run(self): try: p = Popen('git log -1'.split(), stdin=PIPE, stdout=PIPE, stderr=PIPE) except IOError: warn("No git found, skipping git revision") return if p.wait(): warn("checking git branch failed") info(p.stderr.read()) return line = p.stdout.readline().decode().strip() if not line.startswith('commit'): warn("bad commit line: %r" % line) return rev = line.split()[-1] # now that we have the git revision, we can apply it to version.py with open(self.version_py) as f: lines = f.readlines() for i,line in enumerate(lines): if line.startswith('__revision__'): lines[i] = "__revision__ = '%s'\n"%rev break with open(self.version_py, 'w') as f: f.writelines(lines) def finalize_options(self): pass class CleanCommand(Command): """Custom distutils command to clean the .so and .pyc files.""" user_options = [('all', 'a', "remove all build output, not just temporary by-products") ] boolean_options = ['all'] def initialize_options(self): self.all = None def finalize_options(self): pass def run(self): _clean_me = [] _clean_trees = [] for d in ('build', 'dist', 'conf'): if os.path.exists(d): _clean_trees.append(d) for root, dirs, files in os.walk('buildutils'): if any(root.startswith(pre) for pre in _clean_trees): continue for f in files: if os.path.splitext(f)[-1] == '.pyc': _clean_me.append(pjoin(root, f)) if '__pycache__' in dirs: _clean_trees.append(pjoin(root, '__pycache__')) for root, dirs, files in os.walk('zmq'): if any(root.startswith(pre) for pre in _clean_trees): continue for f in files: if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o', '.pyd', '.json'): _clean_me.append(pjoin(root, f)) for d in dirs: if d == '__pycache__': _clean_trees.append(pjoin(root, d)) # remove generated cython files if self.all: for root, dirs, files in os.walk(pjoin('zmq', 'backend', 'cython')): if os.path.splitext(f)[-1] == '.c': _clean_me.append(pjoin(root, f)) bundled = glob(pjoin('zmq', 'libzmq*')) _clean_me.extend([ b for b in bundled if b not in _clean_me ]) bundled_headers = glob(pjoin('zmq', 'include', '*.h')) _clean_me.extend([ h for h in bundled_headers if h not in _clean_me]) for clean_me in _clean_me: print("removing %s" % clean_me) try: os.unlink(clean_me) except Exception as e: print(e, file=sys.stderr) for clean_tree in _clean_trees: print("removing %s/" % clean_tree) try: shutil.rmtree(clean_tree) except Exception as e: print(e, file=sys.stderr) class CheckSDist(sdist): """Custom sdist that ensures Cython has compiled all pyx files to c.""" def initialize_options(self): sdist.initialize_options(self) self._pyxfiles = [] for root, dirs, files in os.walk('zmq'): for f in files: if f.endswith('.pyx'): self._pyxfiles.append(pjoin(root, f)) def run(self): self.run_command('fetch_libzmq') if 'cython' in cmdclass: self.run_command('cython') else: for pyxfile in self._pyxfiles: cfile = pyxfile[:-3]+'c' msg = "C-source file '%s' not found."%(cfile)+\ " Run 'setup.py cython' before sdist." assert os.path.isfile(cfile), msg sdist.run(self) class CheckingBuildExt(build_ext): """Subclass build_ext to get clearer report if Cython is necessary.""" def check_cython_extensions(self, extensions): for ext in extensions: for src in ext.sources: if not os.path.exists(src): fatal("""Cython-generated file '%s' not found. Cython >= %s is required to compile pyzmq from a development branch. Please install Cython or download a release package of pyzmq. """ % (src, min_cython_version)) def build_extensions(self): self.check_cython_extensions(self.extensions) self.check_extensions_list(self.extensions) if self.compiler.compiler_type == 'mingw32': customize_mingw(self.compiler) for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): build_ext.build_extension(self, ext) ext_path = self.get_ext_fullpath(ext.name) patch_lib_paths(ext_path, self.compiler.library_dirs) def run(self): # check version, to prevent confusing undefined constant errors self.distribution.run_command('configure') build_ext.run(self) class ConstantsCommand(Command): """Rebuild templated files for constants To be run after adding new constants to `utils/constant_names`. """ user_options = [] def initialize_options(self): return def finalize_options(self): pass def run(self): from buildutils.constants import render_constants render_constants() #----------------------------------------------------------------------------- # Extensions #----------------------------------------------------------------------------- cmdclass = {'test':TestCommand, 'clean':CleanCommand, 'revision':GitRevisionCommand, 'configure': Configure, 'fetch_libzmq': FetchCommand, 'sdist': CheckSDist, 'constants': ConstantsCommand, } def makename(path, ext): return os.path.abspath(pjoin('zmq', *path)) + ext pxd = lambda *path: makename(path, '.pxd') pxi = lambda *path: makename(path, '.pxi') pyx = lambda *path: makename(path, '.pyx') dotc = lambda *path: makename(path, '.c') libzmq = pxd('backend', 'cython', 'libzmq') buffers = pxd('utils', 'buffers') message = pxd('backend', 'cython', 'message') context = pxd('backend', 'cython', 'context') socket = pxd('backend', 'cython', 'socket') checkrc = pxd('backend', 'cython', 'checkrc') monqueue = pxd('devices', 'monitoredqueue') submodules = { 'backend.cython' : {'constants': [libzmq, pxi('backend', 'cython', 'constants')], 'error':[libzmq, checkrc], '_poll':[libzmq, socket, context, checkrc], 'utils':[libzmq, checkrc], 'context':[context, libzmq, checkrc], 'message':[libzmq, buffers, message, checkrc], 'socket':[context, message, socket, libzmq, buffers, checkrc], '_device':[libzmq, socket, context, checkrc], '_version':[libzmq], }, 'devices' : { 'monitoredqueue':[buffers, libzmq, monqueue, socket, context, checkrc], }, } min_cython_version = '0.20' try: import Cython if V(Cython.__version__) < V(min_cython_version): raise ImportError("Cython >= %s required for cython build, found %s" % ( min_cython_version, Cython.__version__)) from Cython.Distutils import build_ext as build_ext_c from Cython.Distutils import Extension cython = True except Exception: cython = False suffix = '.c' cmdclass['build_ext'] = CheckingBuildExt class MissingCython(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): try: import Cython except ImportError: warn("Cython is missing") else: cv = getattr(Cython, "__version__", None) if cv is None or V(cv) < V(min_cython_version): warn( "Cython >= %s is required for compiling Cython sources, " "found: %s" % (min_cython_version, cv or Cython) ) cmdclass['cython'] = MissingCython else: suffix = '.pyx' class CythonCommand(build_ext_c): """Custom distutils command subclassed from Cython.Distutils.build_ext to compile pyx->c, and stop there. All this does is override the C-compile method build_extension() with a no-op.""" description = "Compile Cython sources to C" def build_extension(self, ext): pass class zbuild_ext(build_ext_c): def build_extensions(self): if self.compiler.compiler_type == 'mingw32': customize_mingw(self.compiler) return build_ext_c.build_extensions(self) def build_extension(self, ext): build_ext_c.build_extension(self, ext) ext_path = self.get_ext_fullpath(ext.name) patch_lib_paths(ext_path, self.compiler.library_dirs) def run(self): self.distribution.run_command('configure') return build_ext_c.run(self) cmdclass['cython'] = CythonCommand cmdclass['build_ext'] = zbuild_ext extensions = [] ext_include_dirs = [pjoin('zmq', sub) for sub in ('utils',)] ext_kwargs = { 'include_dirs': ext_include_dirs, } if cython: # set binding so that compiled methods can be inspected ext_kwargs['cython_directives'] = {'binding': True} for submod, packages in submodules.items(): for pkg in sorted(packages): sources = [pjoin('zmq', submod.replace('.', os.path.sep), pkg+suffix)] ext = Extension( 'zmq.%s.%s'%(submod, pkg), sources = sources, **ext_kwargs ) extensions.append(ext) if pypy: # add dummy extension, to ensure build_ext runs dummy_ext = Extension('dummy', sources=[]) extensions = [dummy_ext] bld_ext = cmdclass['build_ext'] class pypy_build_ext(bld_ext): """hack to build pypy extension only after building bundled libzmq otherwise it will fail when libzmq is bundled. """ def build_extensions(self): self.extensions.remove(dummy_ext) bld_ext.build_extensions(self) # build ffi extension after bundled libzmq, # because it may depend on linking it if self.inplace: sys.path.insert(0, '') else: sys.path.insert(0, self.build_lib) try: from zmq.backend.cffi import ffi except ImportError as e: warn("Couldn't get CFFI extension: %s" % e) else: ext = ffi.verifier.get_extension() if not ext.name.startswith('zmq.'): ext.name = 'zmq.backend.cffi.' + ext.name self.extensions.append(ext) self.build_extension(ext) finally: sys.path.pop(0) # How many build_ext subclasses is this? 5? Gross. cmdclass['build_ext'] = pypy_build_ext package_data = {'zmq': ['*.pxd', '*' + lib_ext], 'zmq.backend.cython': ['*.pxd', '*.pxi'], 'zmq.backend.cffi': ['*.h', '*.c'], 'zmq.devices': ['*.pxd'], 'zmq.utils': ['*.pxd', '*.h', '*.json'], } def extract_version(): """extract pyzmq version from sugar/version.py, so it's not multiply defined""" with open(pjoin('zmq', 'sugar', 'version.py')) as f: while True: line = f.readline() if line.startswith('VERSION'): lines = [] while line and not line.startswith('def'): lines.append(line) line = f.readline() break ns = {} exec(''.join(lines), ns) return ns['__version__'] def find_packages(): """adapted from IPython's setupbase.find_packages()""" packages = [] for dir,subdirs,files in os.walk('zmq'): package = dir.replace(os.path.sep, '.') if '__init__.py' not in files: # not a package continue if sys.version_info < (3,3) and 'asyncio' in package and 'sdist' not in sys.argv: # Don't install asyncio packages on old Python # avoids issues with tools like compileall, pytest, etc. # that get confused by presence of Python 3-only sources, # even when they are never imported. continue packages.append(package) return packages #----------------------------------------------------------------------------- # Main setup #----------------------------------------------------------------------------- long_desc = \ """ PyZMQ is the official Python binding for the ZeroMQ Messaging Library (http://www.zeromq.org). See `the docs `_ for more info. """ setup_args = dict( name = "pyzmq", version = extract_version(), packages = find_packages(), ext_modules = extensions, package_data = package_data, author = "Brian E. Granger, Min Ragan-Kelley", author_email = "zeromq-dev@lists.zeromq.org", url = 'https://pyzmq.readthedocs.org', description = "Python bindings for 0MQ", long_description = long_desc, license = "LGPL+BSD", cmdclass = cmdclass, classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', 'License :: OSI Approved :: BSD License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Topic :: System :: Networking', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], ) if 'setuptools' in sys.modules: setup_args['zip_safe'] = False if pypy: setup_args['install_requires'] = [ 'py', 'cffi', ] setup(**setup_args) pyzmq-16.0.2/setupegg.py000066400000000000000000000005321301503633700151560ustar00rootroot00000000000000#!/usr/bin/env python """Wrapper to run setup.py using setuptools.""" import os, sys import warnings warnings.warn("setupegg.py is deprecated. Don't use it anymore, it's a bit silly.") # now, import setuptools and call the actual setup import setuptools try: execfile('setup.py') except NameError: exec( open('setup.py','rb').read() ) pyzmq-16.0.2/test-requirements.txt000066400000000000000000000004331301503633700172220ustar00rootroot00000000000000gevent; python_version == '2.7' and platform_python_implementation != "PyPy" pytest; python_version != '3.2' pytest < 3; python_version == '3.2' unittest2; python_version < '3' tornado; python_version != '3.2' tornado < 4.4 ; python_version == '3.2' aiohttp; python_version >= '3.4' pyzmq-16.0.2/tools/000077500000000000000000000000001301503633700141215ustar00rootroot00000000000000pyzmq-16.0.2/tools/release_windows.bat000066400000000000000000000030771301503633700200120ustar00rootroot00000000000000@echo off REM build a pyzmq release on Windows REM 32+64b eggs on Python 27, and wheels on 27, 34, 35 REM that's 10 bdists REM requires Windows SDK 7.0 (for py2) and 7.1 (for py3), and VS2015C for py3.5 REM and Python installed in the locations: C:\Python34 (32b) and C:\Python34_64 (64b) REM after running, upload with `twine upload dist/*` REM run with cmd /k $PWD/tools/release_windows.bat setlocal EnableDelayedExpansion set SDKS=C:\Program Files\Microsoft SDKs\Windows set SDK7=%SDKS%\v7.0 set SDK71=%SDKS%\v7.1 set PYROOT=C:\ set DISTUTILS_USE_SDK=1 for %%p in (35, 34, 27) do ( if "%%p"=="27" ( set SDK=%SDK7% set cmd=build bdist_egg bdist_wheel --zmq=bundled ) else ( set SDK=%SDK71% set cmd=build bdist_wheel --zmq=bundled ) for %%b in (64, 32) do ( if "%%b"=="64" ( set SUFFIX=_64 set ARCH=/x64 set VCARCH=amd64 ) else ( set SUFFIX= set ARCH=/x86 set VCARCH= ) set PY=%PYROOT%\Python%%p!SUFFIX!\Python echo !PY! !SDK! !PY! -m ensurepip !PY! -m pip install --upgrade setuptools pip wheel if !errorlevel! neq 0 exit /b !errorlevel! if "%%p"=="35" ( rem no SDK for 3.5, but force static-linking with DISTUTILS_USE_SDK=1 anyway rem to avoid missing MSVCP140.dll @call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" !VCARCH! ) else ( @call "!SDK!\Bin\SetEnv.cmd" /release !ARCH! if !errorlevel! neq 0 exit /b !errorlevel! ) @echo on !PY! setup.py !cmd! @echo off if !errorlevel! neq 0 exit !errorlevel! ) ) exit pyzmq-16.0.2/tools/run_with_env.cmd000066400000000000000000000066561301503633700173320ustar00rootroot00000000000000:: Copied FROM https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor/run_with_env.cmd :: To build extensions for 64 bit Python 3, we need to configure environment :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) :: :: To build extensions for 64 bit Python 2, we need to configure environment :: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) :: :: 32 bit builds, and 64-bit builds for 3.5 and beyond, do not require specific :: environment configurations. :: :: Note: this script needs to be run with the /E:ON and /V:ON flags for the :: cmd interpreter, at least for (SDK v7.0) :: :: More details at: :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows :: http://stackoverflow.com/a/13751649/163740 :: :: Author: Olivier Grisel :: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ :: :: Notes about batch files for Python people: :: :: Quotes in values are literally part of the values: :: SET FOO="bar" :: FOO is now five characters long: " b a r " :: If you don't want quotes, don't include them on the right-hand side. :: :: The CALL lines at the end of this file look redundant, but if you move them :: outside of the IF clauses, they do not run properly in the SET_SDK_64==Y :: case, I don't know why. @ECHO OFF SET COMMAND_TO_RUN=%* SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows SET WIN_WDK=c:\Program Files (x86)\Windows Kits\10\Include\wdf :: Extract the major and minor versions, and allow for the minor version to be :: more than 9. This requires the version number to have two dots in it. SET MAJOR_PYTHON_VERSION=%PYTHON_VERSION:~0,1% IF "%PYTHON_VERSION:~3,1%" == "." ( SET MINOR_PYTHON_VERSION=%PYTHON_VERSION:~2,1% ) ELSE ( SET MINOR_PYTHON_VERSION=%PYTHON_VERSION:~2,2% ) :: Based on the Python version, determine what SDK version to use, and whether :: to set the SDK for 64-bit. IF %MAJOR_PYTHON_VERSION% == 2 ( SET WINDOWS_SDK_VERSION="v7.0" SET SET_SDK_64=Y ) ELSE ( IF %MAJOR_PYTHON_VERSION% == 3 ( SET WINDOWS_SDK_VERSION="v7.1" IF %MINOR_PYTHON_VERSION% LEQ 4 ( SET SET_SDK_64=Y ) ELSE ( SET SET_SDK_64=N SET PYZMQ_BUNDLE_CRT=1 IF EXIST "%WIN_WDK%" ( :: See: https://connect.microsoft.com/VisualStudio/feedback/details/1610302/ REN "%WIN_WDK%" 0wdf ) ) ) ELSE ( ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%" EXIT 1 ) ) IF %PYTHON_ARCH% == 64 ( IF %SET_SDK_64% == Y ( ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture SET DISTUTILS_USE_SDK=1 SET MSSdk=1 "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) ELSE ( ECHO Using default MSVC build environment for 64 bit architecture ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) ) ELSE ( ECHO Using default MSVC build environment for 32 bit architecture ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 )pyzmq-16.0.2/tools/tasks.py000066400000000000000000000200151301503633700156160ustar00rootroot00000000000000#!/usr/bin/env python """ invoke script for releasing pyzmq usage: invoke release 14.3.1 """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from __future__ import print_function import glob import os import pipes import re import shutil import sys from contextlib import contextmanager from invoke import task, run as invoke_run import requests pjoin = os.path.join repo = "git@github.com:zeromq/pyzmq" _framework_py = lambda xy: "/Library/Frameworks/Python.framework/Versions/{0}/bin/python{0}".format(xy) py_exes = { '2.7' : _framework_py('2.7'), '3.4' : _framework_py('3.4'), '3.5' : _framework_py('3.5'), 'pypy': "/usr/local/bin/pypy", # FIXME: pypy3 can have releases when they support Python >= 3.3 # 'pypy3': "/usr/local/bin/pypy3", } egg_pys = {} # no more eggs! tmp = "/tmp" env_root = os.path.join(tmp, 'envs') repo_root = pjoin(tmp, 'pyzmq-release') sdist_root = pjoin(tmp, 'pyzmq-sdist') def _py(py): return py_exes[py] def run(cmd, **kwargs): """wrapper around invoke.run that accepts a Popen list""" if isinstance(cmd, list): cmd = " ".join(pipes.quote(s) for s in cmd) kwargs.setdefault('echo', True) return invoke_run(cmd, **kwargs) @contextmanager def cd(path): """Context manager for temporary CWD""" cwd = os.getcwd() os.chdir(path) try: yield finally: os.chdir(cwd) @task def clone_repo(reset=False): """Clone the repo""" if os.path.exists(repo_root) and reset: shutil.rmtree(repo_root) if os.path.exists(repo_root): with cd(repo_root): run("git pull") else: run("git clone %s %s" % (repo, repo_root)) @task def patch_version(vs): """Patch zmq/sugar/version.py for the current release""" major, minor, patch = vs_to_tup(vs) version_py = pjoin(repo_root, 'zmq', 'sugar', 'version.py') print("patching %s with %s" % (version_py, vs)) # read version.py, minus VERSION_ constants with open(version_py) as f: pre_lines = [] post_lines = [] lines = pre_lines for line in f: if line.startswith("VERSION_"): lines = post_lines else: lines.append(line) # write new version.py with given VERSION_ constants with open(version_py, 'w') as f: for line in pre_lines: f.write(line) f.write('VERSION_MAJOR = %s\n' % major) f.write('VERSION_MINOR = %s\n' % minor) f.write('VERSION_PATCH = %s\n' % patch) f.write('VERSION_EXTRA = ""\n') for line in post_lines: f.write(line) @task def tag(vs, push=False): """Make the tag (don't push)""" patch_version(vs) with cd(repo_root): run('git commit -a -m "release {}"'.format(vs)) run('git tag -a -m "release {0}" v{0}'.format(vs)) if push: run('git push') run('git push --tags') def make_env(py_exe, *packages): """Make a virtualenv Assumes `which python` has the `virtualenv` package """ py_exe = py_exes.get(py_exe, py_exe) if not os.path.exists(env_root): os.makedirs(env_root) env = os.path.join(env_root, os.path.basename(py_exe)) py = pjoin(env, 'bin', 'python') # new env if not os.path.exists(py): run('virtualenv {} -p {}'.format( pipes.quote(env), pipes.quote(py_exe), )) py = pjoin(env, 'bin', 'python') run([py, '-V']) install(py, 'pip', 'setuptools') install(py, *packages) return py def build_sdist(py, upload=False): """Build sdists Returns the path to the tarball """ with cd(repo_root): cmd = [py, 'setup.py', 'sdist'] run(cmd) if upload: run(['twine', 'upload', 'dist/*']) return glob.glob(pjoin(repo_root, 'dist', '*.tar.gz'))[0] @task def sdist(vs, upload=False): clone_repo() tag(vs, push=upload) py = make_env('3.5', 'cython', 'twine') tarball = build_sdist(py, upload=upload) return untar(tarball) def install(py, *packages): packages run([py, '-m', 'pip', 'install', '--upgrade'] + list(packages)) def vs_to_tup(vs): """version string to tuple""" return re.findall(r'\d+', vs) def tup_to_vs(tup): """tuple to version string""" return '.'.join(tup) def untar(tarball): if os.path.exists(sdist_root): shutil.rmtree(sdist_root) os.makedirs(sdist_root) with cd(sdist_root): run(['tar', '-xzf', tarball]) return glob.glob(pjoin(sdist_root, '*'))[0] def bdist(py, wheel=True, egg=False): py = make_env(py, 'wheel') cmd = [py, 'setup.py'] if wheel: cmd.append('bdist_wheel') if egg: cmd.append('bdist_egg') cmd.append('--zmq=bundled') run(cmd) @task def manylinux(vs, upload=False): """Build manylinux wheels with Matthew Brett's manylinux-builds""" manylinux = '/tmp/manylinux-builds' if not os.path.exists(manylinux): with cd('/tmp'): run("git clone --recursive https://github.com/minrk/manylinux-builds -b pyzmq") else: with cd(manylinux): run("git pull") run("git submodule update") base_cmd = "docker run --rm -e PYZMQ_VERSIONS='{vs}' -e PYTHON_VERSIONS='{pys}' -v $PWD:/io".format( vs=vs, pys='2.7 3.4 3.5' ) with cd(manylinux): run(base_cmd + " quay.io/pypa/manylinux1_x86_64 /io/build_pyzmqs.sh") run(base_cmd + " quay.io/pypa/manylinux1_i686 linux32 /io/build_pyzmqs.sh") if upload: py = make_env('3.5', 'twine') run(['twine', 'upload', os.path.join(manylinux, 'wheelhouse', '*')]) @task def release(vs, upload=False): """Release pyzmq""" # Ensure all our Pythons exist before we start: for v, path in py_exes.items(): if not os.path.exists(path): raise ValueError("Need %s at %s" % (v, path)) # start from scrach with clone and envs clone_repo(reset=True) if os.path.exists(env_root): shutil.rmtree(env_root) path = sdist(vs, upload=upload) with cd(path): for v in py_exes: bdist(v, wheel=True, egg=(v in egg_pys)) if upload: py = make_env('3.5', 'twine') run(['twine', 'upload', 'dist/*']) manylinux(vs, upload=upload) if upload: print("When AppVeyor finished building, upload artifacts with:") print(" invoke appveyor_artifacts {} --upload".format(vs)) _appveyor_api = 'https://ci.appveyor.com/api' _appveyor_project = 'minrk/pyzmq' def _appveyor_api_request(path): """Make an appveyor API request""" r = requests.get('{}/{}'.format(_appveyor_api, path), headers={ # 'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json', } ) r.raise_for_status() return r.json() @task def appveyor_artifacts(vs, dest='win-dist', upload=False): """Download appveyor artifacts If --upload is given, upload to PyPI """ if not os.path.exists(dest): os.makedirs(dest) build = _appveyor_api_request('projects/{}/branch/v{}'.format(_appveyor_project, vs)) jobs = build['build']['jobs'] artifact_urls = [] for job in jobs: artifacts = _appveyor_api_request('buildjobs/{}/artifacts'.format(job['jobId'])) artifact_urls.extend('{}/buildjobs/{}/artifacts/{}'.format( _appveyor_api, job['jobId'], artifact['fileName'] ) for artifact in artifacts) for url in artifact_urls: print("Downloading {} to {}".format(url, dest)) fname = url.rsplit('/', 1)[-1] r = requests.get(url, stream=True) r.raise_for_status() with open(os.path.join(dest, fname), 'wb') as f: for chunk in r.iter_content(1024): f.write(chunk) if upload: py = make_env('3.5', 'twine') run(['twine', 'upload', '{}/*'.format(dest)]) else: print("You can now upload these wheels with: ") print(" twine upload {}/*".format(dest)) pyzmq-16.0.2/zmq/000077500000000000000000000000001301503633700135705ustar00rootroot00000000000000pyzmq-16.0.2/zmq/__init__.py000066400000000000000000000034731301503633700157100ustar00rootroot00000000000000"""Python bindings for 0MQ.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. # load bundled libzmq, if there is one: def _load_libzmq(): """load bundled libzmq if there is one""" import sys, ctypes, platform dlopen = hasattr(sys, 'getdlopenflags') # unix-only if dlopen: dlflags = sys.getdlopenflags() sys.setdlopenflags(ctypes.RTLD_GLOBAL | dlflags) try: from . import libzmq except ImportError: pass else: # store libzmq as zmq._libzmq for backward-compat globals()['_libzmq'] = libzmq if platform.python_implementation().lower() == 'pypy': # pypy needs explicit CDLL load for some reason, # otherwise symbols won't be globally available ctypes.CDLL(libzmq.__file__, ctypes.RTLD_GLOBAL) finally: if dlopen: sys.setdlopenflags(dlflags) _load_libzmq() # zmq top-level imports from zmq import backend from zmq.backend import * from zmq import sugar from zmq.sugar import * def get_includes(): """Return a list of directories to include for linking against pyzmq with cython.""" from os.path import join, dirname, abspath, pardir, exists base = dirname(__file__) parent = abspath(join(base, pardir)) includes = [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ] if exists(join(parent, base, 'include')): includes.append(join(parent, base, 'include')) return includes def get_library_dirs(): """Return a list of directories used to link against pyzmq's bundled libzmq.""" from os.path import join, dirname, abspath, pardir base = dirname(__file__) parent = abspath(join(base, pardir)) return [ join(parent, base) ] __all__ = ['get_includes'] + sugar.__all__ + backend.__all__ pyzmq-16.0.2/zmq/asyncio/000077500000000000000000000000001301503633700152355ustar00rootroot00000000000000pyzmq-16.0.2/zmq/asyncio/__init__.py000066400000000000000000000240311301503633700173460ustar00rootroot00000000000000"""AsyncIO support for zmq Requires asyncio and Python 3. """ # Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. # Derived from Python 3.5.1 selectors._BaseSelectorImpl, used under PSF License from collections import Mapping import zmq as _zmq from zmq.eventloop import future as _future # TODO: support trollius for Legacy Python? (probably not) import asyncio from asyncio import SelectorEventLoop, Future try: import selectors except ImportError: from asyncio import selectors # py33 _aio2zmq_map = { selectors.EVENT_READ: _zmq.POLLIN, selectors.EVENT_WRITE: _zmq.POLLOUT, } _AIO_EVENTS = 0 for aio_evt in _aio2zmq_map: _AIO_EVENTS |= aio_evt def _aio2zmq(aio_evt): """Turn AsyncIO event mask into ZMQ event mask""" z_evt = 0 for aio_mask, z_mask in _aio2zmq_map.items(): if aio_mask & aio_evt: z_evt |= z_mask return z_evt def _zmq2aio(z_evt): """Turn ZMQ event mask into AsyncIO event mask""" aio_evt = 0 for aio_mask, z_mask in _aio2zmq_map.items(): if z_mask & z_evt: aio_evt |= aio_mask return aio_evt class _AsyncIO(object): _Future = Future _WRITE = selectors.EVENT_WRITE _READ = selectors.EVENT_READ def _default_loop(self): return asyncio.get_event_loop() def _fileobj_to_fd(fileobj): """Return a file descriptor from a file object. Parameters: fileobj -- file object or file descriptor Returns: corresponding file descriptor Raises: ValueError if the object is invalid """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError("Invalid file object: " "{!r}".format(fileobj)) from None if fd < 0: raise ValueError("Invalid file descriptor: {}".format(fd)) return fd class _SelectorMapping(Mapping): """Mapping of file objects to selector keys.""" def __init__(self, selector): self._selector = selector def __len__(self): return len(self._selector._fd_to_key) def __getitem__(self, fileobj): try: fd = self._selector._fileobj_lookup(fileobj) return self._selector._fd_to_key[fd] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None def __iter__(self): return iter(self._selector._fd_to_key) class ZMQSelector(selectors.BaseSelector): """zmq_poll-based selector for asyncio""" def __init__(self): super().__init__() # this maps file descriptors to keys self._fd_to_key = {} # read-only mapping returned by get_map() self._map = _SelectorMapping(self) self._zmq_poller = _zmq.Poller() def _fileobj_lookup(self, fileobj): """Return a zmq socket or a file descriptor from a file object. This wraps _fileobj_to_fd() to do an exhaustive search in case the object is invalid but we still have it in our map. This is used by unregister() so we can unregister an object that was previously registered even if it is closed. It is also used by _SelectorMapping. """ if isinstance(fileobj, _zmq.Socket): return fileobj else: try: return _fileobj_to_fd(fileobj) except ValueError: # Do an exhaustive search. for key in self._fd_to_key.values(): if key.fileobj is fileobj: return key.fd # Raise ValueError after all. raise def register(self, fileobj, events, data=None): """Register a file object. Parameters: fileobj -- zmq socket, file object or file descriptor events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) data -- attached data Returns: SelectorKey instance Raises: ValueError if events is invalid KeyError if fileobj is already registered OSError if fileobj is closed or otherwise is unacceptable to the underlying system call (if a system call is made) Note: OSError may or may not be raised """ if (not events) or (events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)): raise ValueError("Invalid events: {!r}".format(events)) key = selectors.SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) if key.fd in self._fd_to_key: raise KeyError("{!r} (FD {}) is already registered" .format(fileobj, key.fd)) self._fd_to_key[key.fd] = key self._zmq_poller.register(key.fd, _aio2zmq(events)) return key def unregister(self, fileobj): """Unregister a file object. Parameters: fileobj -- zmq socket, file object or file descriptor Returns: SelectorKey instance Raises: KeyError if fileobj is not registered Note: If fileobj is registered but has since been closed this does *not* raise OSError (even if the wrapped syscall does) """ try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None self._zmq_poller.unregister(key.fd) return key def modify(self, fileobj, events, data=None): try: key = self._fd_to_key[self._fileobj_lookup(fileobj)] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None if events != key.events: self.unregister(fileobj) key = self.register(fileobj, events, data) elif data != key.data: # Use a shortcut to update the data. key = key._replace(data=data) self._fd_to_key[key.fd] = key return key def select(self, timeout=None): """Perform the actual selection, until some monitored file objects are ready or a timeout expires. Parameters: timeout -- if timeout > 0, this specifies the maximum wait time, in seconds if timeout <= 0, the select() call won't block, and will report the currently ready file objects if timeout is None, select() will block until a monitored file object becomes ready Returns: list of (key, events) for ready file objects `events` is a bitwise mask of EVENT_READ|EVENT_WRITE """ if timeout is not None: if timeout < 0: timeout = 0 else: timeout = 1e3 * timeout fd_event_list = self._zmq_poller.poll(timeout) ready = [] for fd, event in fd_event_list: key = self._key_from_fd(fd) if key: events = _zmq2aio(event) ready.append((key, events)) return ready def close(self): """Close the selector. This must be called to make sure that any underlying resource is freed. """ self._fd_to_key.clear() self._map = None self._zmq_poller = None def get_map(self): return self._map def _key_from_fd(self, fd): """Return the key associated to a given file descriptor. Parameters: fd -- file descriptor Returns: corresponding key, or None if not found """ try: return self._fd_to_key[fd] except KeyError: return None class Poller(_AsyncIO, _future._AsyncPoller): """Poller returning asyncio.Future for poll results.""" def _watch_raw_socket(self, loop, socket, evt, f): """Schedule callback for a raw socket""" if evt & self._READ: loop.add_reader(socket, lambda *args: f()) if evt & self._WRITE: loop.add_writer(socket, lambda *args: f()) def _unwatch_raw_sockets(self, loop, *sockets): """Unschedule callback for a raw socket""" for socket in sockets: loop.remove_reader(socket) loop.remove_writer(socket) class Socket(_AsyncIO, _future._AsyncSocket): """Socket returning asyncio Futures for send/recv/poll methods.""" _poller_class = Poller def _add_io_state(self, state): """Add io_state to poller.""" if not self._state & state: self._state = self._state | state if state & self._READ: self.io_loop.add_reader(self, self._handle_recv) if state & self._WRITE: self.io_loop.add_writer(self, self._handle_send) def _drop_io_state(self, state): """Stop poller from watching an io_state.""" if self._state & state: self._state = self._state & (~state) if state & self._READ: self.io_loop.remove_reader(self) if state & self._WRITE: self.io_loop.remove_writer(self) def _init_io_state(self): """initialize the ioloop event handler""" pass def _clear_io_state(self): """clear any ioloop event handler called once at close """ self._drop_io_state(self._state) class Context(_zmq.Context): """Context for creating asyncio-compatible Sockets""" _socket_class = Socket class ZMQEventLoop(SelectorEventLoop): """AsyncIO eventloop using zmq_poll""" def __init__(self, selector=None): if selector is None: selector = ZMQSelector() return super(ZMQEventLoop, self).__init__(selector) _loop = None def install(): """Install and return the global ZMQEventLoop registers the loop with asyncio.set_event_loop """ global _loop if _loop is None: _loop = ZMQEventLoop() asyncio.set_event_loop(_loop) return _loop __all__ = [ 'Context', 'Socket', 'Poller', 'ZMQEventLoop', 'install', ] pyzmq-16.0.2/zmq/auth/000077500000000000000000000000001301503633700145315ustar00rootroot00000000000000pyzmq-16.0.2/zmq/auth/__init__.py000066400000000000000000000005101301503633700166360ustar00rootroot00000000000000"""Utilities for ZAP authentication. To run authentication in a background thread, see :mod:`zmq.auth.thread`. For integration with the tornado eventloop, see :mod:`zmq.auth.ioloop`. For integration with the asyncio event loop, see :mod:`zmq.auth.asyncio`. .. versionadded:: 14.1 """ from .base import * from .certs import * pyzmq-16.0.2/zmq/auth/asyncio/000077500000000000000000000000001301503633700161765ustar00rootroot00000000000000pyzmq-16.0.2/zmq/auth/asyncio/__init__.py000066400000000000000000000025411301503633700203110ustar00rootroot00000000000000"""ZAP Authenticator integrated with the asyncio IO loop. .. versionadded:: 15.2 """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import asyncio import zmq from zmq.asyncio import Poller from ..base import Authenticator class AsyncioAuthenticator(Authenticator): """ZAP authentication for use in the asyncio IO loop""" def __init__(self, context=None, loop=None): super().__init__(context) self.loop = loop or asyncio.get_event_loop() self.__poller = None self.__task = None @asyncio.coroutine def __handle_zap(self): while True: events = yield from self.__poller.poll() if self.zap_socket in dict(events): msg = yield from self.zap_socket.recv_multipart() self.handle_zap_message(msg) def start(self): """Start ZAP authentication""" super().start() self.__poller = Poller() self.__poller.register(self.zap_socket, zmq.POLLIN) self.__task = asyncio.async(self.__handle_zap()) def stop(self): """Stop ZAP authentication""" if self.__task: self.__task.cancel() if self.__poller: self.__poller.unregister(self.zap_socket) self.__poller = None super().stop() __all__ = ['AsyncioAuthenticator'] pyzmq-16.0.2/zmq/auth/base.py000066400000000000000000000265361301503633700160310ustar00rootroot00000000000000"""Base implementation of 0MQ authentication.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import logging import zmq from zmq.utils import z85 from zmq.utils.strtypes import bytes, unicode, b, u from zmq.error import _check_version from .certs import load_certificates CURVE_ALLOW_ANY = '*' VERSION = b'1.0' class Authenticator(object): """Implementation of ZAP authentication for zmq connections. Note: - libzmq provides four levels of security: default NULL (which the Authenticator does not see), and authenticated NULL, PLAIN, CURVE, and GSSAPI, which the Authenticator can see. - until you add policies, all incoming NULL connections are allowed. (classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied. - GSSAPI requires no configuration. """ def __init__(self, context=None, encoding='utf-8', log=None): _check_version((4,0), "security") self.context = context or zmq.Context.instance() self.encoding = encoding self.allow_any = False self.zap_socket = None self.whitelist = set() self.blacklist = set() # passwords is a dict keyed by domain and contains values # of dicts with username:password pairs. self.passwords = {} # certs is dict keyed by domain and contains values # of dicts keyed by the public keys from the specified location. self.certs = {} self.log = log or logging.getLogger('zmq.auth') def start(self): """Create and bind the ZAP socket""" self.zap_socket = self.context.socket(zmq.REP) self.zap_socket.linger = 1 self.zap_socket.bind("inproc://zeromq.zap.01") self.log.debug("Starting") def stop(self): """Close the ZAP socket""" if self.zap_socket: self.zap_socket.close() self.zap_socket = None def allow(self, *addresses): """Allow (whitelist) IP address(es). Connections from addresses not in the whitelist will be rejected. - For NULL, all clients from this address will be accepted. - For real auth setups, they will be allowed to continue with authentication. whitelist is mutually exclusive with blacklist. """ if self.blacklist: raise ValueError("Only use a whitelist or a blacklist, not both") self.log.debug("Allowing %s", ','.join(addresses)) self.whitelist.update(addresses) def deny(self, *addresses): """Deny (blacklist) IP address(es). Addresses not in the blacklist will be allowed to continue with authentication. Blacklist is mutually exclusive with whitelist. """ if self.whitelist: raise ValueError("Only use a whitelist or a blacklist, not both") self.log.debug("Denying %s", ','.join(addresses)) self.blacklist.update(addresses) def configure_plain(self, domain='*', passwords=None): """Configure PLAIN authentication for a given domain. PLAIN authentication uses a plain-text password file. To cover all domains, use "*". You can modify the password file at any time; it is reloaded automatically. """ if passwords: self.passwords[domain] = passwords self.log.debug("Configure plain: %s", domain) def configure_curve(self, domain='*', location=None): """Configure CURVE authentication for a given domain. CURVE authentication uses a directory that holds all public client certificates, i.e. their public keys. To cover all domains, use "*". You can add and remove certificates in that directory at any time. To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location. """ # If location is CURVE_ALLOW_ANY then allow all clients. Otherwise # treat location as a directory that holds the certificates. self.log.debug("Configure curve: %s[%s]", domain, location) if location == CURVE_ALLOW_ANY: self.allow_any = True else: self.allow_any = False try: self.certs[domain] = load_certificates(location) except Exception as e: self.log.error("Failed to load CURVE certs from %s: %s", location, e) def configure_gssapi(self, domain='*', location=None): """Configure GSSAPI authentication Currently this is a no-op because there is nothing to configure with GSSAPI. """ pass def handle_zap_message(self, msg): """Perform ZAP authentication""" if len(msg) < 6: self.log.error("Invalid ZAP message, not enough frames: %r", msg) if len(msg) < 2: self.log.error("Not enough information to reply") else: self._send_zap_reply(msg[1], b"400", b"Not enough frames") return version, request_id, domain, address, identity, mechanism = msg[:6] credentials = msg[6:] domain = u(domain, self.encoding, 'replace') address = u(address, self.encoding, 'replace') if (version != VERSION): self.log.error("Invalid ZAP version: %r", msg) self._send_zap_reply(request_id, b"400", b"Invalid version") return self.log.debug("version: %r, request_id: %r, domain: %r," " address: %r, identity: %r, mechanism: %r", version, request_id, domain, address, identity, mechanism, ) # Is address is explicitly whitelisted or blacklisted? allowed = False denied = False reason = b"NO ACCESS" if self.whitelist: if address in self.whitelist: allowed = True self.log.debug("PASSED (whitelist) address=%s", address) else: denied = True reason = b"Address not in whitelist" self.log.debug("DENIED (not in whitelist) address=%s", address) elif self.blacklist: if address in self.blacklist: denied = True reason = b"Address is blacklisted" self.log.debug("DENIED (blacklist) address=%s", address) else: allowed = True self.log.debug("PASSED (not in blacklist) address=%s", address) # Perform authentication mechanism-specific checks if necessary username = u("user") if not denied: if mechanism == b'NULL' and not allowed: # For NULL, we allow if the address wasn't blacklisted self.log.debug("ALLOWED (NULL)") allowed = True elif mechanism == b'PLAIN': # For PLAIN, even a whitelisted address must authenticate if len(credentials) != 2: self.log.error("Invalid PLAIN credentials: %r", credentials) self._send_zap_reply(request_id, b"400", b"Invalid credentials") return username, password = [ u(c, self.encoding, 'replace') for c in credentials ] allowed, reason = self._authenticate_plain(domain, username, password) elif mechanism == b'CURVE': # For CURVE, even a whitelisted address must authenticate if len(credentials) != 1: self.log.error("Invalid CURVE credentials: %r", credentials) self._send_zap_reply(request_id, b"400", b"Invalid credentials") return key = credentials[0] allowed, reason = self._authenticate_curve(domain, key) elif mechanism == b'GSSAPI': if len(credentials) != 1: self.log.error("Invalid GSSAPI credentials: %r", credentials) self._send_zap_reply(request_id, b"400", b"Invalid credentials") return principal = u(credentials[0], 'replace') allowed, reason = self._authenticate_gssapi(domain, principal) if allowed: self._send_zap_reply(request_id, b"200", b"OK", username) else: self._send_zap_reply(request_id, b"400", reason) def _authenticate_plain(self, domain, username, password): """PLAIN ZAP authentication""" allowed = False reason = b"" if self.passwords: # If no domain is not specified then use the default domain if not domain: domain = '*' if domain in self.passwords: if username in self.passwords[domain]: if password == self.passwords[domain][username]: allowed = True else: reason = b"Invalid password" else: reason = b"Invalid username" else: reason = b"Invalid domain" if allowed: self.log.debug("ALLOWED (PLAIN) domain=%s username=%s password=%s", domain, username, password, ) else: self.log.debug("DENIED %s", reason) else: reason = b"No passwords defined" self.log.debug("DENIED (PLAIN) %s", reason) return allowed, reason def _authenticate_curve(self, domain, client_key): """CURVE ZAP authentication""" allowed = False reason = b"" if self.allow_any: allowed = True reason = b"OK" self.log.debug("ALLOWED (CURVE allow any client)") else: # If no explicit domain is specified then use the default domain if not domain: domain = '*' if domain in self.certs: # The certs dict stores keys in z85 format, convert binary key to z85 bytes z85_client_key = z85.encode(client_key) if self.certs[domain].get(z85_client_key): allowed = True reason = b"OK" else: reason = b"Unknown key" status = "ALLOWED" if allowed else "DENIED" self.log.debug("%s (CURVE) domain=%s client_key=%s", status, domain, z85_client_key, ) else: reason = b"Unknown domain" return allowed, reason def _authenticate_gssapi(self, domain, principal): """Nothing to do for GSSAPI, which has already been handled by an external service.""" self.log.debug("ALLOWED (GSSAPI) domain=%s principal=%s", domain, principal) return True, b'OK' def _send_zap_reply(self, request_id, status_code, status_text, user_id='user'): """Send a ZAP reply to finish the authentication.""" user_id = user_id if status_code == b'200' else b'' if isinstance(user_id, unicode): user_id = user_id.encode(self.encoding, 'replace') metadata = b'' # not currently used self.log.debug("ZAP reply code=%s text=%s", status_code, status_text) reply = [VERSION, request_id, status_code, status_text, user_id, metadata] self.zap_socket.send_multipart(reply) __all__ = ['Authenticator', 'CURVE_ALLOW_ANY'] pyzmq-16.0.2/zmq/auth/certs.py000066400000000000000000000101231301503633700162200ustar00rootroot00000000000000"""0MQ authentication related functions and classes.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import datetime import glob import io import os import zmq from zmq.utils.strtypes import bytes, unicode, b, u _cert_secret_banner = u("""# **** Generated on {0} by pyzmq **** # ZeroMQ CURVE **Secret** Certificate # DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions. """) _cert_public_banner = u("""# **** Generated on {0} by pyzmq **** # ZeroMQ CURVE Public Certificate # Exchange securely, or use a secure mechanism to verify the contents # of this file after exchange. Store public certificates in your home # directory, in the .curve subdirectory. """) def _write_key_file(key_filename, banner, public_key, secret_key=None, metadata=None, encoding='utf-8'): """Create a certificate file""" if isinstance(public_key, bytes): public_key = public_key.decode(encoding) if isinstance(secret_key, bytes): secret_key = secret_key.decode(encoding) with io.open(key_filename, 'w', encoding='utf8') as f: f.write(banner.format(datetime.datetime.now())) f.write(u('metadata\n')) if metadata: for k, v in metadata.items(): if isinstance(k, bytes): k = k.decode(encoding) if isinstance(v, bytes): v = v.decode(encoding) f.write(u(" {0} = {1}\n").format(k, v)) f.write(u('curve\n')) f.write(u(" public-key = \"{0}\"\n").format(public_key)) if secret_key: f.write(u(" secret-key = \"{0}\"\n").format(secret_key)) def create_certificates(key_dir, name, metadata=None): """Create zmq certificates. Returns the file paths to the public and secret certificate files. """ public_key, secret_key = zmq.curve_keypair() base_filename = os.path.join(key_dir, name) secret_key_file = "{0}.key_secret".format(base_filename) public_key_file = "{0}.key".format(base_filename) now = datetime.datetime.now() _write_key_file(public_key_file, _cert_public_banner.format(now), public_key) _write_key_file(secret_key_file, _cert_secret_banner.format(now), public_key, secret_key=secret_key, metadata=metadata) return public_key_file, secret_key_file def load_certificate(filename): """Load public and secret key from a zmq certificate. Returns (public_key, secret_key) If the certificate file only contains the public key, secret_key will be None. If there is no public key found in the file, ValueError will be raised. """ public_key = None secret_key = None if not os.path.exists(filename): raise IOError("Invalid certificate file: {0}".format(filename)) with open(filename, 'rb') as f: for line in f: line = line.strip() if line.startswith(b'#'): continue if line.startswith(b'public-key'): public_key = line.split(b"=", 1)[1].strip(b' \t\'"') if line.startswith(b'secret-key'): secret_key = line.split(b"=", 1)[1].strip(b' \t\'"') if public_key and secret_key: break if public_key is None: raise ValueError("No public key found in %s" % filename) return public_key, secret_key def load_certificates(directory='.'): """Load public keys from all certificates in a directory""" certs = {} if not os.path.isdir(directory): raise IOError("Invalid certificate directory: {0}".format(directory)) # Follow czmq pattern of public keys stored in *.key files. glob_string = os.path.join(directory, "*.key") cert_files = glob.glob(glob_string) for cert_file in cert_files: public_key, _ = load_certificate(cert_file) if public_key: certs[public_key] = True return certs __all__ = ['create_certificates', 'load_certificate', 'load_certificates'] pyzmq-16.0.2/zmq/auth/ioloop.py000066400000000000000000000021001301503633700163750ustar00rootroot00000000000000"""ZAP Authenticator integrated with the tornado IOLoop. .. versionadded:: 14.1 """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from zmq.eventloop import ioloop, zmqstream from .base import Authenticator class IOLoopAuthenticator(Authenticator): """ZAP authentication for use in the tornado IOLoop""" def __init__(self, context=None, encoding='utf-8', log=None, io_loop=None): super(IOLoopAuthenticator, self).__init__(context, encoding, log) self.zap_stream = None self.io_loop = io_loop or ioloop.IOLoop.instance() def start(self): """Start ZAP authentication""" super(IOLoopAuthenticator, self).start() self.zap_stream = zmqstream.ZMQStream(self.zap_socket, self.io_loop) self.zap_stream.on_recv(self.handle_zap_message) def stop(self): """Stop ZAP authentication""" if self.zap_stream: self.zap_stream.close() self.zap_stream = None super(IOLoopAuthenticator, self).stop() __all__ = ['IOLoopAuthenticator'] pyzmq-16.0.2/zmq/auth/thread.py000066400000000000000000000145411301503633700163570ustar00rootroot00000000000000"""ZAP Authenticator in a Python Thread. .. versionadded:: 14.1 """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import time import logging from threading import Thread, Event import zmq from zmq.utils import jsonapi from zmq.utils.strtypes import bytes, unicode, b, u import sys from .base import Authenticator class AuthenticationThread(Thread): """A Thread for running a zmq Authenticator This is run in the background by ThreadedAuthenticator """ def __init__(self, context, endpoint, encoding='utf-8', log=None, authenticator=None): super(AuthenticationThread, self).__init__() self.context = context or zmq.Context.instance() self.encoding = encoding self.log = log = log or logging.getLogger('zmq.auth') self.started = Event() self.authenticator = authenticator or Authenticator(context, encoding=encoding, log=log) # create a socket to communicate back to main thread. self.pipe = context.socket(zmq.PAIR) self.pipe.linger = 1 self.pipe.connect(endpoint) def run(self): """Start the Authentication Agent thread task""" self.authenticator.start() self.started.set() zap = self.authenticator.zap_socket poller = zmq.Poller() poller.register(self.pipe, zmq.POLLIN) poller.register(zap, zmq.POLLIN) while True: try: socks = dict(poller.poll()) except zmq.ZMQError: break # interrupted if self.pipe in socks and socks[self.pipe] == zmq.POLLIN: terminate = self._handle_pipe() if terminate: break if zap in socks and socks[zap] == zmq.POLLIN: self._handle_zap() self.pipe.close() self.authenticator.stop() def _handle_zap(self): """ Handle a message from the ZAP socket. """ msg = self.authenticator.zap_socket.recv_multipart() if not msg: return self.authenticator.handle_zap_message(msg) def _handle_pipe(self): """ Handle a message from front-end API. """ terminate = False # Get the whole message off the pipe in one go msg = self.pipe.recv_multipart() if msg is None: terminate = True return terminate command = msg[0] self.log.debug("auth received API command %r", command) if command == b'ALLOW': addresses = [u(m, self.encoding) for m in msg[1:]] try: self.authenticator.allow(*addresses) except Exception as e: self.log.exception("Failed to allow %s", addresses) elif command == b'DENY': addresses = [u(m, self.encoding) for m in msg[1:]] try: self.authenticator.deny(*addresses) except Exception as e: self.log.exception("Failed to deny %s", addresses) elif command == b'PLAIN': domain = u(msg[1], self.encoding) json_passwords = msg[2] self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords)) elif command == b'CURVE': # For now we don't do anything with domains domain = u(msg[1], self.encoding) # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise # treat location as a directory that holds the certificates. location = u(msg[2], self.encoding) self.authenticator.configure_curve(domain, location) elif command == b'TERMINATE': terminate = True else: self.log.error("Invalid auth command from API: %r", command) return terminate def _inherit_docstrings(cls): """inherit docstrings from Authenticator, so we don't duplicate them""" for name, method in cls.__dict__.items(): if name.startswith('_'): continue upstream_method = getattr(Authenticator, name, None) if not method.__doc__: method.__doc__ = upstream_method.__doc__ return cls @_inherit_docstrings class ThreadAuthenticator(object): """Run ZAP authentication in a background thread""" def __init__(self, context=None, encoding='utf-8', log=None): self.context = context or zmq.Context.instance() self.log = log self.encoding = encoding self.pipe = None self.pipe_endpoint = "inproc://{0}.inproc".format(id(self)) self.thread = None def allow(self, *addresses): self.pipe.send_multipart([b'ALLOW'] + [b(a, self.encoding) for a in addresses]) def deny(self, *addresses): self.pipe.send_multipart([b'DENY'] + [b(a, self.encoding) for a in addresses]) def configure_plain(self, domain='*', passwords=None): self.pipe.send_multipart([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})]) def configure_curve(self, domain='*', location=''): domain = b(domain, self.encoding) location = b(location, self.encoding) self.pipe.send_multipart([b'CURVE', domain, location]) def start(self): """Start the authentication thread""" # create a socket to communicate with auth thread. self.pipe = self.context.socket(zmq.PAIR) self.pipe.linger = 1 self.pipe.bind(self.pipe_endpoint) self.thread = AuthenticationThread(self.context, self.pipe_endpoint, encoding=self.encoding, log=self.log) self.thread.start() # Event.wait:Changed in version 2.7: Previously, the method always returned None. if sys.version_info < (2,7): self.thread.started.wait(timeout=10) else: if not self.thread.started.wait(timeout=10): raise RuntimeError("Authenticator thread failed to start") def stop(self): """Stop the authentication thread""" if self.pipe: self.pipe.send(b'TERMINATE') if self.is_alive(): self.thread.join() self.thread = None self.pipe.close() self.pipe = None def is_alive(self): """Is the ZAP thread currently running?""" if self.thread and self.thread.is_alive(): return True return False def __del__(self): self.stop() __all__ = ['ThreadAuthenticator'] pyzmq-16.0.2/zmq/backend/000077500000000000000000000000001301503633700151575ustar00rootroot00000000000000pyzmq-16.0.2/zmq/backend/__init__.py000066400000000000000000000024201301503633700172660ustar00rootroot00000000000000"""Import basic exposure of libzmq C API as a backend""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import os import platform import sys from .select import public_api, select_backend if 'PYZMQ_BACKEND' in os.environ: backend = os.environ['PYZMQ_BACKEND'] if backend in ('cython', 'cffi'): backend = 'zmq.backend.%s' % backend _ns = select_backend(backend) else: # default to cython, fallback to cffi # (reverse on PyPy) if platform.python_implementation() == 'PyPy': first, second = ('zmq.backend.cffi', 'zmq.backend.cython') else: first, second = ('zmq.backend.cython', 'zmq.backend.cffi') try: _ns = select_backend(first) except Exception: exc_info = sys.exc_info() exc = exc_info[1] try: _ns = select_backend(second) except ImportError: # prevent 'During handling of the above exception...' on py3 # can't use `raise ... from` on Python 2 if hasattr(exc, '__cause__'): exc.__cause__ = None # raise the *first* error, not the fallback from zmq.utils.sixcerpt import reraise reraise(*exc_info) globals().update(_ns) __all__ = public_api pyzmq-16.0.2/zmq/backend/cffi/000077500000000000000000000000001301503633700160665ustar00rootroot00000000000000pyzmq-16.0.2/zmq/backend/cffi/__init__.py000066400000000000000000000011501301503633700201740ustar00rootroot00000000000000"""CFFI backend (for PyPY)""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from zmq.backend.cffi import (constants, error, message, context, socket, _poll, devices, utils) __all__ = [] for submod in (constants, error, message, context, socket, _poll, devices, utils): __all__.extend(submod.__all__) from .constants import * from .error import * from .message import * from .context import * from .socket import * from .devices import * from ._poll import * from ._cffi import zmq_version_info, ffi from .utils import * pyzmq-16.0.2/zmq/backend/cffi/_cdefs.h000066400000000000000000000037071301503633700174710ustar00rootroot00000000000000void zmq_version(int *major, int *minor, int *patch); void* zmq_socket(void *context, int type); int zmq_close(void *socket); int zmq_bind(void *socket, const char *endpoint); int zmq_connect(void *socket, const char *endpoint); int zmq_errno(void); const char * zmq_strerror(int errnum); int zmq_device(int device, void *frontend, void *backend); int zmq_unbind(void *socket, const char *endpoint); int zmq_disconnect(void *socket, const char *endpoint); void* zmq_ctx_new(); int zmq_ctx_destroy(void *context); int zmq_ctx_get(void *context, int opt); int zmq_ctx_set(void *context, int opt, int optval); int zmq_proxy(void *frontend, void *backend, void *capture); int zmq_socket_monitor(void *socket, const char *addr, int events); int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key); int zmq_has (const char *capability); typedef struct { ...; } zmq_msg_t; typedef ... zmq_free_fn; int zmq_msg_init(zmq_msg_t *msg); int zmq_msg_init_size(zmq_msg_t *msg, size_t size); int zmq_msg_init_data(zmq_msg_t *msg, void *data, size_t size, zmq_free_fn *ffn, void *hint); size_t zmq_msg_size(zmq_msg_t *msg); void *zmq_msg_data(zmq_msg_t *msg); int zmq_msg_close(zmq_msg_t *msg); int zmq_msg_send(zmq_msg_t *msg, void *socket, int flags); int zmq_msg_recv(zmq_msg_t *msg, void *socket, int flags); int zmq_getsockopt(void *socket, int option_name, void *option_value, size_t *option_len); int zmq_setsockopt(void *socket, int option_name, const void *option_value, size_t option_len); typedef struct { void *socket; int fd; short events; short revents; } zmq_pollitem_t; int zmq_poll(zmq_pollitem_t *items, int nitems, long timeout); // miscellany void * memcpy(void *restrict s1, const void *restrict s2, size_t n); int get_ipc_path_max_len(void); pyzmq-16.0.2/zmq/backend/cffi/_cffi.py000066400000000000000000000074521301503633700175160ustar00rootroot00000000000000# coding: utf-8 """The main CFFI wrapping of libzmq""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import json import os from os.path import dirname, join from cffi import FFI from zmq.utils.constant_names import all_names, no_prefix base_zmq_version = (3,2,2) def load_compiler_config(): """load pyzmq compiler arguments""" import zmq zmq_dir = dirname(zmq.__file__) zmq_parent = dirname(zmq_dir) fname = join(zmq_dir, 'utils', 'compiler.json') if os.path.exists(fname): with open(fname) as f: cfg = json.load(f) else: cfg = {} cfg.setdefault("include_dirs", []) cfg.setdefault("library_dirs", []) cfg.setdefault("runtime_library_dirs", []) cfg.setdefault("libraries", ["zmq"]) # cast to str, because cffi can't handle unicode paths (?!) cfg['libraries'] = [str(lib) for lib in cfg['libraries']] for key in ("include_dirs", "library_dirs", "runtime_library_dirs"): # interpret paths relative to parent of zmq (like source tree) abs_paths = [] for p in cfg[key]: if p.startswith('zmq'): p = join(zmq_parent, p) abs_paths.append(str(p)) cfg[key] = abs_paths return cfg def zmq_version_info(): """Get libzmq version as tuple of ints""" major = ffi.new('int*') minor = ffi.new('int*') patch = ffi.new('int*') C.zmq_version(major, minor, patch) return (int(major[0]), int(minor[0]), int(patch[0])) cfg = load_compiler_config() ffi = FFI() def _make_defines(names): _names = [] for name in names: define_line = "#define %s ..." % (name) _names.append(define_line) return "\n".join(_names) c_constant_names = ['PYZMQ_DRAFT_API'] for name in all_names: if no_prefix(name): c_constant_names.append(name) else: c_constant_names.append("ZMQ_" + name) # load ffi definitions here = os.path.dirname(__file__) with open(os.path.join(here, '_cdefs.h')) as f: _cdefs = f.read() with open(os.path.join(here, '_verify.c')) as f: _verify = f.read() ffi.cdef(_cdefs) ffi.cdef(_make_defines(c_constant_names)) try: C = ffi.verify(_verify, modulename='_cffi_ext', libraries=cfg['libraries'], include_dirs=cfg['include_dirs'], library_dirs=cfg['library_dirs'], runtime_library_dirs=cfg['runtime_library_dirs'], ) _version_info = zmq_version_info() except Exception as e: raise ImportError("PyZMQ CFFI backend couldn't find zeromq: %s\n" "Please check that you have zeromq headers and libraries." % e) if _version_info < (3,2,2): raise ImportError("PyZMQ CFFI backend requires zeromq >= 3.2.2," " but found %i.%i.%i" % _version_info ) nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length) new_uint64_pointer = lambda: (ffi.new('uint64_t*'), nsp(ffi.sizeof('uint64_t'))) new_int64_pointer = lambda: (ffi.new('int64_t*'), nsp(ffi.sizeof('int64_t'))) new_int_pointer = lambda: (ffi.new('int*'), nsp(ffi.sizeof('int'))) new_binary_data = lambda length: (ffi.new('char[%d]' % (length)), nsp(ffi.sizeof('char') * length)) value_uint64_pointer = lambda val : (ffi.new('uint64_t*', val), ffi.sizeof('uint64_t')) value_int64_pointer = lambda val: (ffi.new('int64_t*', val), ffi.sizeof('int64_t')) value_int_pointer = lambda val: (ffi.new('int*', val), ffi.sizeof('int')) value_binary_data = lambda val, length: (ffi.new('char[%d]' % (length + 1), val), ffi.sizeof('char') * length) IPC_PATH_MAX_LEN = C.get_ipc_path_max_len() pyzmq-16.0.2/zmq/backend/cffi/_poll.py000066400000000000000000000036561301503633700175570ustar00rootroot00000000000000# coding: utf-8 """zmq poll function""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from ._cffi import C, ffi from .utils import _retry_sys_call def _make_zmq_pollitem(socket, flags): zmq_socket = socket._zmq_socket zmq_pollitem = ffi.new('zmq_pollitem_t*') zmq_pollitem.socket = zmq_socket zmq_pollitem.fd = 0 zmq_pollitem.events = flags zmq_pollitem.revents = 0 return zmq_pollitem[0] def _make_zmq_pollitem_fromfd(socket_fd, flags): zmq_pollitem = ffi.new('zmq_pollitem_t*') zmq_pollitem.socket = ffi.NULL zmq_pollitem.fd = socket_fd zmq_pollitem.events = flags zmq_pollitem.revents = 0 return zmq_pollitem[0] def zmq_poll(sockets, timeout): cffi_pollitem_list = [] low_level_to_socket_obj = {} from zmq import Socket for item in sockets: if isinstance(item[0], Socket): low_level_to_socket_obj[item[0]._zmq_socket] = item cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1])) else: if not isinstance(item[0], int): # not an FD, get it from fileno() item = (item[0].fileno(), item[1]) low_level_to_socket_obj[item[0]] = item cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1])) items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list) list_length = ffi.cast('int', len(cffi_pollitem_list)) c_timeout = ffi.cast('long', timeout) _retry_sys_call(C.zmq_poll, items, list_length, c_timeout) result = [] for index in range(len(items)): if items[index].revents > 0: if not items[index].socket == ffi.NULL: result.append((low_level_to_socket_obj[items[index].socket][0], items[index].revents)) else: result.append((items[index].fd, items[index].revents)) return result __all__ = ['zmq_poll'] pyzmq-16.0.2/zmq/backend/cffi/_verify.c000066400000000000000000000003201301503633700176700ustar00rootroot00000000000000#include #include #include #include #include "zmq_compat.h" int get_ipc_path_max_len(void) { struct sockaddr_un *dummy; return sizeof(dummy->sun_path) - 1; } pyzmq-16.0.2/zmq/backend/cffi/constants.py000066400000000000000000000005451301503633700204600ustar00rootroot00000000000000# coding: utf-8 """zmq constants""" from ._cffi import C, c_constant_names from zmq.utils.constant_names import all_names g = globals() for cname in c_constant_names: if cname.startswith("ZMQ_"): name = cname[4:] else: name = cname g[name] = getattr(C, cname) DRAFT_API = C.PYZMQ_DRAFT_API __all__ = ['DRAFT_API'] + all_names pyzmq-16.0.2/zmq/backend/cffi/context.py000066400000000000000000000050241301503633700201250ustar00rootroot00000000000000# coding: utf-8 """zmq Context class""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import weakref from ._cffi import C, ffi from .constants import EINVAL, IO_THREADS, LINGER from zmq.error import ZMQError, InterruptedSystemCall, _check_rc class Context(object): _zmq_ctx = None _iothreads = None _closed = None _sockets = None _shadow = False def __init__(self, io_threads=1, shadow=None): if shadow: self._zmq_ctx = ffi.cast("void *", shadow) self._shadow = True else: self._shadow = False if not io_threads >= 0: raise ZMQError(EINVAL) self._zmq_ctx = C.zmq_ctx_new() if self._zmq_ctx == ffi.NULL: raise ZMQError(C.zmq_errno()) if not shadow: C.zmq_ctx_set(self._zmq_ctx, IO_THREADS, io_threads) self._closed = False self._sockets = set() @property def underlying(self): """The address of the underlying libzmq context""" return int(ffi.cast('size_t', self._zmq_ctx)) @property def closed(self): return self._closed def _add_socket(self, socket): ref = weakref.ref(socket) self._sockets.add(ref) return ref def _rm_socket(self, ref): if ref in self._sockets: self._sockets.remove(ref) def set(self, option, value): """set a context option see zmq_ctx_set """ rc = C.zmq_ctx_set(self._zmq_ctx, option, value) _check_rc(rc) def get(self, option): """get context option see zmq_ctx_get """ rc = C.zmq_ctx_get(self._zmq_ctx, option) _check_rc(rc) return rc def term(self): if self.closed: return rc = C.zmq_ctx_destroy(self._zmq_ctx) try: _check_rc(rc) except InterruptedSystemCall: # ignore interrupted term # see PEP 475 notes about close & EINTR for why pass self._zmq_ctx = None self._closed = True def destroy(self, linger=None): if self.closed: return sockets = self._sockets self._sockets = set() for s in sockets: s = s() if s and not s.closed: if linger is not None: s.setsockopt(LINGER, linger) s.close() self.term() __all__ = ['Context'] pyzmq-16.0.2/zmq/backend/cffi/devices.py000066400000000000000000000011041301503633700200560ustar00rootroot00000000000000# coding: utf-8 """zmq device functions""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from ._cffi import C, ffi from .socket import Socket from .utils import _retry_sys_call def device(device_type, frontend, backend): return proxy(frontend, backend) def proxy(frontend, backend, capture=None): if isinstance(capture, Socket): capture = capture._zmq_socket else: capture = ffi.NULL _retry_sys_call(C.zmq_proxy, frontend._zmq_socket, backend._zmq_socket, capture) __all__ = ['device', 'proxy'] pyzmq-16.0.2/zmq/backend/cffi/error.py000066400000000000000000000005411301503633700175710ustar00rootroot00000000000000"""zmq error functions""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from ._cffi import C, ffi def strerror(errno): s = ffi.string(C.zmq_strerror(errno)) if not isinstance(s, str): # py3 s = s.decode() return s zmq_errno = C.zmq_errno __all__ = ['strerror', 'zmq_errno'] pyzmq-16.0.2/zmq/backend/cffi/message.py000066400000000000000000000025771301503633700200770ustar00rootroot00000000000000"""Dummy Frame object""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from ._cffi import ffi, C import zmq from zmq.utils.strtypes import unicode try: view = memoryview except NameError: view = buffer _content = lambda x: x.tobytes() if type(x) == memoryview else x class Frame(object): _data = None tracker = None closed = False more = False buffer = None def __init__(self, data, track=False): try: view(data) except TypeError: raise self._data = data if isinstance(data, unicode): raise TypeError("Unicode objects not allowed. Only: str/bytes, " + "buffer interfaces.") self.more = False self.tracker = None self.closed = False if track: self.tracker = zmq.MessageTracker() self.buffer = view(self.bytes) @property def bytes(self): data = _content(self._data) return data def __len__(self): return len(self.bytes) def __eq__(self, other): return self.bytes == _content(other) def __str__(self): if str is unicode: return self.bytes.decode() else: return self.bytes @property def done(self): return True Message = Frame __all__ = ['Frame', 'Message'] pyzmq-16.0.2/zmq/backend/cffi/socket.py000066400000000000000000000175001301503633700177330ustar00rootroot00000000000000# coding: utf-8 """zmq Socket class""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import errno as errno_mod from ._cffi import (C, ffi, new_uint64_pointer, new_int64_pointer, new_int_pointer, new_binary_data, value_uint64_pointer, value_int64_pointer, value_int_pointer, value_binary_data, IPC_PATH_MAX_LEN) from .message import Frame from .constants import RCVMORE from .utils import _retry_sys_call import zmq from zmq.error import ZMQError, _check_rc, _check_version from zmq.utils.strtypes import unicode def new_pointer_from_opt(option, length=0): from zmq.sugar.constants import ( int64_sockopts, bytes_sockopts, ) if option in int64_sockopts: return new_int64_pointer() elif option in bytes_sockopts: return new_binary_data(length) else: # default return new_int_pointer() def value_from_opt_pointer(option, opt_pointer, length=0): from zmq.sugar.constants import ( int64_sockopts, bytes_sockopts, ) if option in int64_sockopts: return int(opt_pointer[0]) elif option in bytes_sockopts: return ffi.buffer(opt_pointer, length)[:] else: return int(opt_pointer[0]) def initialize_opt_pointer(option, value, length=0): from zmq.sugar.constants import ( int64_sockopts, bytes_sockopts, ) if option in int64_sockopts: return value_int64_pointer(value) elif option in bytes_sockopts: return value_binary_data(value, length) else: return value_int_pointer(value) class Socket(object): context = None socket_type = None _zmq_socket = None _closed = None _ref = None _shadow = False def __init__(self, context=None, socket_type=None, shadow=None): self.context = context if shadow is not None: self._zmq_socket = ffi.cast("void *", shadow) self._shadow = True else: self._shadow = False self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type) if self._zmq_socket == ffi.NULL: raise ZMQError() self._closed = False if context: self._ref = context._add_socket(self) @property def underlying(self): """The address of the underlying libzmq socket""" return int(ffi.cast('size_t', self._zmq_socket)) @property def closed(self): return self._closed def close(self, linger=None): rc = 0 if not self._closed and hasattr(self, '_zmq_socket'): if self._zmq_socket is not None: if linger is not None: self.set(zmq.LINGER, linger) rc = C.zmq_close(self._zmq_socket) self._closed = True if self.context: self.context._rm_socket(self._ref) return rc def bind(self, address): if isinstance(address, unicode): address = address.encode('utf8') rc = C.zmq_bind(self._zmq_socket, address) if rc < 0: if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG: # py3compat: address is bytes, but msg wants str if str is unicode: address = address.decode('utf-8', 'replace') path = address.split('://', 1)[-1] msg = ('ipc path "{0}" is longer than {1} ' 'characters (sizeof(sockaddr_un.sun_path)).' .format(path, IPC_PATH_MAX_LEN)) raise ZMQError(C.zmq_errno(), msg=msg) else: _check_rc(rc) def unbind(self, address): _check_version((3,2), "unbind") if isinstance(address, unicode): address = address.encode('utf8') rc = C.zmq_unbind(self._zmq_socket, address) _check_rc(rc) def connect(self, address): if isinstance(address, unicode): address = address.encode('utf8') rc = C.zmq_connect(self._zmq_socket, address) _check_rc(rc) def disconnect(self, address): _check_version((3,2), "disconnect") if isinstance(address, unicode): address = address.encode('utf8') rc = C.zmq_disconnect(self._zmq_socket, address) _check_rc(rc) def set(self, option, value): length = None if isinstance(value, unicode): raise TypeError("unicode not allowed, use bytes") if isinstance(value, bytes): if option not in zmq.constants.bytes_sockopts: raise TypeError("not a bytes sockopt: %s" % option) length = len(value) c_data = initialize_opt_pointer(option, value, length) c_value_pointer = c_data[0] c_sizet = c_data[1] _retry_sys_call(C.zmq_setsockopt, self._zmq_socket, option, ffi.cast('void*', c_value_pointer), c_sizet) def get(self, option): c_data = new_pointer_from_opt(option, length=255) c_value_pointer = c_data[0] c_sizet_pointer = c_data[1] _retry_sys_call(C.zmq_getsockopt, self._zmq_socket, option, c_value_pointer, c_sizet_pointer) sz = c_sizet_pointer[0] v = value_from_opt_pointer(option, c_value_pointer, sz) if option != zmq.IDENTITY and option in zmq.constants.bytes_sockopts and v.endswith(b'\0'): v = v[:-1] return v def send(self, message, flags=0, copy=False, track=False): if isinstance(message, unicode): raise TypeError("Message must be in bytes, not an unicode Object") if isinstance(message, Frame): message = message.bytes zmq_msg = ffi.new('zmq_msg_t*') c_message = ffi.new('char[]', message) rc = C.zmq_msg_init_size(zmq_msg, len(message)) _check_rc(rc) C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message)) _retry_sys_call(C.zmq_msg_send, zmq_msg, self._zmq_socket, flags) rc2 = C.zmq_msg_close(zmq_msg) _check_rc(rc2) if track: return zmq.MessageTracker() def recv(self, flags=0, copy=True, track=False): zmq_msg = ffi.new('zmq_msg_t*') C.zmq_msg_init(zmq_msg) try: _retry_sys_call(C.zmq_msg_recv, zmq_msg, self._zmq_socket, flags) except Exception: C.zmq_msg_close(zmq_msg) raise _buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg)) value = _buffer[:] rc = C.zmq_msg_close(zmq_msg) _check_rc(rc) frame = Frame(value, track=track) frame.more = self.getsockopt(RCVMORE) if copy: return frame.bytes else: return frame def monitor(self, addr, events=-1): """s.monitor(addr, flags) Start publishing socket events on inproc. See libzmq docs for zmq_monitor for details. Note: requires libzmq >= 3.2 Parameters ---------- addr : str The inproc url used for monitoring. Passing None as the addr will cause an existing socket monitor to be deregistered. events : int [default: zmq.EVENT_ALL] The zmq event bitmask for which events will be sent to the monitor. """ _check_version((3,2), "monitor") if events < 0: events = zmq.EVENT_ALL if addr is None: addr = ffi.NULL if isinstance(addr, unicode): addr = addr.encode('utf8') rc = C.zmq_socket_monitor(self._zmq_socket, addr, events) __all__ = ['Socket', 'IPC_PATH_MAX_LEN'] pyzmq-16.0.2/zmq/backend/cffi/utils.py000066400000000000000000000027171301503633700176070ustar00rootroot00000000000000# coding: utf-8 """miscellaneous zmq_utils wrapping""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from errno import EINTR from ._cffi import ffi, C from zmq.error import ZMQError, InterruptedSystemCall, _check_rc, _check_version from zmq.utils.strtypes import unicode def has(capability): """Check for zmq capability by name (e.g. 'ipc', 'curve') .. versionadded:: libzmq-4.1 .. versionadded:: 14.1 """ _check_version((4,1), 'zmq.has') if isinstance(capability, unicode): capability = capability.encode('utf8') return bool(C.zmq_has(capability)) def curve_keypair(): """generate a Z85 keypair for use with zmq.CURVE security Requires libzmq (≥ 4.0) to have been built with CURVE support. Returns ------- (public, secret) : two bytestrings The public and private keypair as 40 byte z85-encoded bytestrings. """ _check_version((3,2), "monitor") public = ffi.new('char[64]') private = ffi.new('char[64]') rc = C.zmq_curve_keypair(public, private) _check_rc(rc) return ffi.buffer(public)[:40], ffi.buffer(private)[:40] def _retry_sys_call(f, *args, **kwargs): """make a call, retrying if interrupted with EINTR""" while True: rc = f(*args) try: _check_rc(rc) except InterruptedSystemCall: continue else: break __all__ = ['has', 'curve_keypair'] pyzmq-16.0.2/zmq/backend/cython/000077500000000000000000000000001301503633700164635ustar00rootroot00000000000000pyzmq-16.0.2/zmq/backend/cython/__init__.py000066400000000000000000000011631301503633700205750ustar00rootroot00000000000000"""Python bindings for core 0MQ objects.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Lesser GNU Public License (LGPL). from . import (constants, error, message, context, socket, utils, _poll, _version, _device ) __all__ = [] for submod in (constants, error, message, context, socket, utils, _poll, _version, _device): __all__.extend(submod.__all__) from .constants import * from .error import * from .message import * from .context import * from .socket import * from ._poll import * from .utils import * from ._device import * from ._version import * pyzmq-16.0.2/zmq/backend/cython/_device.pyx000066400000000000000000000060771301503633700206350ustar00rootroot00000000000000"""Python binding for 0MQ device function.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from .libzmq cimport zmq_device, zmq_proxy, ZMQ_VERSION_MAJOR from .socket cimport Socket as cSocket from .checkrc cimport _check_rc from zmq.error import InterruptedSystemCall #----------------------------------------------------------------------------- # Basic device API #----------------------------------------------------------------------------- def device(int device_type, cSocket frontend, cSocket backend=None): """device(device_type, frontend, backend) Start a zeromq device. .. deprecated:: libzmq-3.2 Use zmq.proxy Parameters ---------- device_type : (QUEUE, FORWARDER, STREAMER) The type of device to start. frontend : Socket The Socket instance for the incoming traffic. backend : Socket The Socket instance for the outbound traffic. """ if ZMQ_VERSION_MAJOR >= 3: return proxy(frontend, backend) cdef int rc = 0 while True: with nogil: rc = zmq_device(device_type, frontend.handle, backend.handle) try: _check_rc(rc) except InterruptedSystemCall: continue else: break return rc def proxy(cSocket frontend, cSocket backend, cSocket capture=None): """proxy(frontend, backend, capture) Start a zeromq proxy (replacement for device). .. versionadded:: libzmq-3.2 .. versionadded:: 13.0 Parameters ---------- frontend : Socket The Socket instance for the incoming traffic. backend : Socket The Socket instance for the outbound traffic. capture : Socket (optional) The Socket instance for capturing traffic. """ cdef int rc = 0 cdef void* capture_handle if isinstance(capture, cSocket): capture_handle = capture.handle else: capture_handle = NULL while True: with nogil: rc = zmq_proxy(frontend.handle, backend.handle, capture_handle) try: _check_rc(rc) except InterruptedSystemCall: continue else: break return rc __all__ = ['device', 'proxy'] pyzmq-16.0.2/zmq/backend/cython/_poll.pyx000066400000000000000000000112631301503633700203350ustar00rootroot00000000000000"""0MQ polling related functions and classes.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from libc.stdlib cimport free, malloc from .libzmq cimport zmq_pollitem_t, ZMQ_VERSION_MAJOR from .libzmq cimport zmq_poll as zmq_poll_c from socket cimport Socket import sys from .checkrc cimport _check_rc from zmq.error import InterruptedSystemCall #----------------------------------------------------------------------------- # Polling related methods #----------------------------------------------------------------------------- # version-independent typecheck for int/long if sys.version_info[0] >= 3: int_t = int else: int_t = (int,long) def zmq_poll(sockets, long timeout=-1): """zmq_poll(sockets, timeout=-1) Poll a set of 0MQ sockets, native file descs. or sockets. Parameters ---------- sockets : list of tuples of (socket, flags) Each element of this list is a two-tuple containing a socket and a flags. The socket may be a 0MQ socket or any object with a ``fileno()`` method. The flags can be zmq.POLLIN (for detecting for incoming messages), zmq.POLLOUT (for detecting that send is OK) or zmq.POLLIN|zmq.POLLOUT for detecting both. timeout : int The number of milliseconds to poll for. Negative means no timeout. """ cdef int rc, i cdef zmq_pollitem_t *pollitems = NULL cdef int nsockets = len(sockets) cdef Socket current_socket if nsockets == 0: return [] pollitems = malloc(nsockets*sizeof(zmq_pollitem_t)) if pollitems == NULL: raise MemoryError("Could not allocate poll items") if ZMQ_VERSION_MAJOR < 3: # timeout is us in 2.x, ms in 3.x # expected input is ms (matches 3.x) timeout = 1000*timeout for i in range(nsockets): s, events = sockets[i] if isinstance(s, Socket): pollitems[i].socket = (s).handle pollitems[i].fd = 0 pollitems[i].events = events pollitems[i].revents = 0 elif isinstance(s, int_t): pollitems[i].socket = NULL pollitems[i].fd = s pollitems[i].events = events pollitems[i].revents = 0 elif hasattr(s, 'fileno'): try: fileno = int(s.fileno()) except: free(pollitems) raise ValueError('fileno() must return a valid integer fd') else: pollitems[i].socket = NULL pollitems[i].fd = fileno pollitems[i].events = events pollitems[i].revents = 0 else: free(pollitems) raise TypeError( "Socket must be a 0MQ socket, an integer fd or have " "a fileno() method: %r" % s ) try: while True: with nogil: rc = zmq_poll_c(pollitems, nsockets, timeout) try: _check_rc(rc) except InterruptedSystemCall: continue else: break except Exception: free(pollitems) raise results = [] for i in range(nsockets): revents = pollitems[i].revents # for compatibility with select.poll: # - only return sockets with non-zero status # - return the fd for plain sockets if revents > 0: if pollitems[i].socket != NULL: s = sockets[i][0] else: s = pollitems[i].fd results.append((s, revents)) free(pollitems) return results #----------------------------------------------------------------------------- # Symbols to export #----------------------------------------------------------------------------- __all__ = [ 'zmq_poll' ] pyzmq-16.0.2/zmq/backend/cython/_version.pyx000066400000000000000000000026521301503633700210560ustar00rootroot00000000000000"""PyZMQ and 0MQ version functions.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from .libzmq cimport _zmq_version #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- def zmq_version_info(): """zmq_version_info() Return the version of ZeroMQ itself as a 3-tuple of ints. """ cdef int major, minor, patch _zmq_version(&major, &minor, &patch) return (major, minor, patch) __all__ = ['zmq_version_info'] pyzmq-16.0.2/zmq/backend/cython/checkrc.pxd000066400000000000000000000015761301503633700206130ustar00rootroot00000000000000from libc.errno cimport EINTR, EAGAIN from cpython cimport PyErr_CheckSignals from zmq.backend.cython.libzmq cimport zmq_errno, ZMQ_ETERM cdef inline int _check_rc(int rc) except -1: """internal utility for checking zmq return condition and raising the appropriate Exception class """ cdef int errno = zmq_errno() PyErr_CheckSignals() if rc == -1: # if rc < -1, it's a bug in libzmq. Should we warn? if errno == EINTR: from zmq.error import InterruptedSystemCall raise InterruptedSystemCall(errno) elif errno == EAGAIN: from zmq.error import Again raise Again(errno) elif errno == ZMQ_ETERM: from zmq.error import ContextTerminated raise ContextTerminated(errno) else: from zmq.error import ZMQError raise ZMQError(errno) return 0 pyzmq-16.0.2/zmq/backend/cython/constant_enums.pxi000066400000000000000000000115261301503633700222520ustar00rootroot00000000000000cdef extern from "zmq.h" nogil: enum: PYZMQ_DRAFT_API enum: ZMQ_VERSION enum: ZMQ_VERSION_MAJOR enum: ZMQ_VERSION_MINOR enum: ZMQ_VERSION_PATCH enum: ZMQ_NOBLOCK enum: ZMQ_DONTWAIT enum: ZMQ_POLLIN enum: ZMQ_POLLOUT enum: ZMQ_POLLERR enum: ZMQ_POLLPRI enum: ZMQ_SNDMORE enum: ZMQ_STREAMER enum: ZMQ_FORWARDER enum: ZMQ_QUEUE enum: ZMQ_IO_THREADS_DFLT enum: ZMQ_MAX_SOCKETS_DFLT enum: ZMQ_POLLITEMS_DFLT enum: ZMQ_THREAD_PRIORITY_DFLT enum: ZMQ_THREAD_SCHED_POLICY_DFLT enum: ZMQ_PAIR enum: ZMQ_PUB enum: ZMQ_SUB enum: ZMQ_REQ enum: ZMQ_REP enum: ZMQ_DEALER enum: ZMQ_ROUTER enum: ZMQ_XREQ enum: ZMQ_XREP enum: ZMQ_PULL enum: ZMQ_PUSH enum: ZMQ_XPUB enum: ZMQ_XSUB enum: ZMQ_UPSTREAM enum: ZMQ_DOWNSTREAM enum: ZMQ_STREAM enum: ZMQ_SERVER enum: ZMQ_CLIENT enum: ZMQ_RADIO enum: ZMQ_DISH enum: ZMQ_GATHER enum: ZMQ_SCATTER enum: ZMQ_DGRAM enum: ZMQ_EVENT_CONNECTED enum: ZMQ_EVENT_CONNECT_DELAYED enum: ZMQ_EVENT_CONNECT_RETRIED enum: ZMQ_EVENT_LISTENING enum: ZMQ_EVENT_BIND_FAILED enum: ZMQ_EVENT_ACCEPTED enum: ZMQ_EVENT_ACCEPT_FAILED enum: ZMQ_EVENT_CLOSED enum: ZMQ_EVENT_CLOSE_FAILED enum: ZMQ_EVENT_DISCONNECTED enum: ZMQ_EVENT_ALL enum: ZMQ_EVENT_MONITOR_STOPPED enum: ZMQ_NULL enum: ZMQ_PLAIN enum: ZMQ_CURVE enum: ZMQ_GSSAPI enum: ZMQ_EAGAIN "EAGAIN" enum: ZMQ_EINVAL "EINVAL" enum: ZMQ_EFAULT "EFAULT" enum: ZMQ_ENOMEM "ENOMEM" enum: ZMQ_ENODEV "ENODEV" enum: ZMQ_EMSGSIZE "EMSGSIZE" enum: ZMQ_EAFNOSUPPORT "EAFNOSUPPORT" enum: ZMQ_ENETUNREACH "ENETUNREACH" enum: ZMQ_ECONNABORTED "ECONNABORTED" enum: ZMQ_ECONNRESET "ECONNRESET" enum: ZMQ_ENOTCONN "ENOTCONN" enum: ZMQ_ETIMEDOUT "ETIMEDOUT" enum: ZMQ_EHOSTUNREACH "EHOSTUNREACH" enum: ZMQ_ENETRESET "ENETRESET" enum: ZMQ_HAUSNUMERO enum: ZMQ_ENOTSUP "ENOTSUP" enum: ZMQ_EPROTONOSUPPORT "EPROTONOSUPPORT" enum: ZMQ_ENOBUFS "ENOBUFS" enum: ZMQ_ENETDOWN "ENETDOWN" enum: ZMQ_EADDRINUSE "EADDRINUSE" enum: ZMQ_EADDRNOTAVAIL "EADDRNOTAVAIL" enum: ZMQ_ECONNREFUSED "ECONNREFUSED" enum: ZMQ_EINPROGRESS "EINPROGRESS" enum: ZMQ_ENOTSOCK "ENOTSOCK" enum: ZMQ_EFSM "EFSM" enum: ZMQ_ENOCOMPATPROTO "ENOCOMPATPROTO" enum: ZMQ_ETERM "ETERM" enum: ZMQ_EMTHREAD "EMTHREAD" enum: ZMQ_IO_THREADS enum: ZMQ_MAX_SOCKETS enum: ZMQ_SOCKET_LIMIT enum: ZMQ_THREAD_PRIORITY enum: ZMQ_THREAD_SCHED_POLICY enum: ZMQ_BLOCKY enum: ZMQ_IDENTITY enum: ZMQ_SUBSCRIBE enum: ZMQ_UNSUBSCRIBE enum: ZMQ_LAST_ENDPOINT enum: ZMQ_TCP_ACCEPT_FILTER enum: ZMQ_PLAIN_USERNAME enum: ZMQ_PLAIN_PASSWORD enum: ZMQ_CURVE_PUBLICKEY enum: ZMQ_CURVE_SECRETKEY enum: ZMQ_CURVE_SERVERKEY enum: ZMQ_ZAP_DOMAIN enum: ZMQ_CONNECT_RID enum: ZMQ_GSSAPI_PRINCIPAL enum: ZMQ_GSSAPI_SERVICE_PRINCIPAL enum: ZMQ_SOCKS_PROXY enum: ZMQ_XPUB_WELCOME_MSG enum: ZMQ_FD enum: ZMQ_RECONNECT_IVL_MAX enum: ZMQ_SNDTIMEO enum: ZMQ_RCVTIMEO enum: ZMQ_SNDHWM enum: ZMQ_RCVHWM enum: ZMQ_MULTICAST_HOPS enum: ZMQ_IPV4ONLY enum: ZMQ_ROUTER_BEHAVIOR enum: ZMQ_TCP_KEEPALIVE enum: ZMQ_TCP_KEEPALIVE_CNT enum: ZMQ_TCP_KEEPALIVE_IDLE enum: ZMQ_TCP_KEEPALIVE_INTVL enum: ZMQ_DELAY_ATTACH_ON_CONNECT enum: ZMQ_XPUB_VERBOSE enum: ZMQ_EVENTS enum: ZMQ_TYPE enum: ZMQ_LINGER enum: ZMQ_RECONNECT_IVL enum: ZMQ_BACKLOG enum: ZMQ_ROUTER_MANDATORY enum: ZMQ_FAIL_UNROUTABLE enum: ZMQ_ROUTER_RAW enum: ZMQ_IMMEDIATE enum: ZMQ_IPV6 enum: ZMQ_MECHANISM enum: ZMQ_PLAIN_SERVER enum: ZMQ_CURVE_SERVER enum: ZMQ_PROBE_ROUTER enum: ZMQ_REQ_RELAXED enum: ZMQ_REQ_CORRELATE enum: ZMQ_CONFLATE enum: ZMQ_ROUTER_HANDOVER enum: ZMQ_TOS enum: ZMQ_IPC_FILTER_PID enum: ZMQ_IPC_FILTER_UID enum: ZMQ_IPC_FILTER_GID enum: ZMQ_GSSAPI_SERVER enum: ZMQ_GSSAPI_PLAINTEXT enum: ZMQ_HANDSHAKE_IVL enum: ZMQ_XPUB_NODROP enum: ZMQ_XPUB_MANUAL enum: ZMQ_STREAM_NOTIFY enum: ZMQ_INVERT_MATCHING enum: ZMQ_XPUB_VERBOSER enum: ZMQ_HEARTBEAT_IVL enum: ZMQ_HEARTBEAT_TTL enum: ZMQ_HEARTBEAT_TIMEOUT enum: ZMQ_CONNECT_TIMEOUT enum: ZMQ_TCP_MAXRT enum: ZMQ_THREAD_SAFE enum: ZMQ_MULTICAST_MAXTPDU enum: ZMQ_VMCI_CONNECT_TIMEOUT enum: ZMQ_USE_FD enum: ZMQ_AFFINITY enum: ZMQ_MAXMSGSIZE enum: ZMQ_HWM enum: ZMQ_SWAP enum: ZMQ_MCAST_LOOP enum: ZMQ_RECOVERY_IVL_MSEC enum: ZMQ_VMCI_BUFFER_SIZE enum: ZMQ_VMCI_BUFFER_MIN_SIZE enum: ZMQ_VMCI_BUFFER_MAX_SIZE enum: ZMQ_RATE enum: ZMQ_RECOVERY_IVL enum: ZMQ_SNDBUF enum: ZMQ_RCVBUF enum: ZMQ_RCVMORE enum: ZMQ_MORE enum: ZMQ_SRCFD enum: ZMQ_SHARED pyzmq-16.0.2/zmq/backend/cython/constants.pxi000066400000000000000000000206241301503633700212250ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Python module level constants #----------------------------------------------------------------------------- DRAFT_API = PYZMQ_DRAFT_API VERSION = ZMQ_VERSION VERSION_MAJOR = ZMQ_VERSION_MAJOR VERSION_MINOR = ZMQ_VERSION_MINOR VERSION_PATCH = ZMQ_VERSION_PATCH NOBLOCK = ZMQ_NOBLOCK DONTWAIT = ZMQ_DONTWAIT POLLIN = ZMQ_POLLIN POLLOUT = ZMQ_POLLOUT POLLERR = ZMQ_POLLERR POLLPRI = ZMQ_POLLPRI SNDMORE = ZMQ_SNDMORE STREAMER = ZMQ_STREAMER FORWARDER = ZMQ_FORWARDER QUEUE = ZMQ_QUEUE IO_THREADS_DFLT = ZMQ_IO_THREADS_DFLT MAX_SOCKETS_DFLT = ZMQ_MAX_SOCKETS_DFLT POLLITEMS_DFLT = ZMQ_POLLITEMS_DFLT THREAD_PRIORITY_DFLT = ZMQ_THREAD_PRIORITY_DFLT THREAD_SCHED_POLICY_DFLT = ZMQ_THREAD_SCHED_POLICY_DFLT PAIR = ZMQ_PAIR PUB = ZMQ_PUB SUB = ZMQ_SUB REQ = ZMQ_REQ REP = ZMQ_REP DEALER = ZMQ_DEALER ROUTER = ZMQ_ROUTER XREQ = ZMQ_XREQ XREP = ZMQ_XREP PULL = ZMQ_PULL PUSH = ZMQ_PUSH XPUB = ZMQ_XPUB XSUB = ZMQ_XSUB UPSTREAM = ZMQ_UPSTREAM DOWNSTREAM = ZMQ_DOWNSTREAM STREAM = ZMQ_STREAM SERVER = ZMQ_SERVER CLIENT = ZMQ_CLIENT RADIO = ZMQ_RADIO DISH = ZMQ_DISH GATHER = ZMQ_GATHER SCATTER = ZMQ_SCATTER DGRAM = ZMQ_DGRAM EVENT_CONNECTED = ZMQ_EVENT_CONNECTED EVENT_CONNECT_DELAYED = ZMQ_EVENT_CONNECT_DELAYED EVENT_CONNECT_RETRIED = ZMQ_EVENT_CONNECT_RETRIED EVENT_LISTENING = ZMQ_EVENT_LISTENING EVENT_BIND_FAILED = ZMQ_EVENT_BIND_FAILED EVENT_ACCEPTED = ZMQ_EVENT_ACCEPTED EVENT_ACCEPT_FAILED = ZMQ_EVENT_ACCEPT_FAILED EVENT_CLOSED = ZMQ_EVENT_CLOSED EVENT_CLOSE_FAILED = ZMQ_EVENT_CLOSE_FAILED EVENT_DISCONNECTED = ZMQ_EVENT_DISCONNECTED EVENT_ALL = ZMQ_EVENT_ALL EVENT_MONITOR_STOPPED = ZMQ_EVENT_MONITOR_STOPPED globals()['NULL'] = ZMQ_NULL PLAIN = ZMQ_PLAIN CURVE = ZMQ_CURVE GSSAPI = ZMQ_GSSAPI EAGAIN = ZMQ_EAGAIN EINVAL = ZMQ_EINVAL EFAULT = ZMQ_EFAULT ENOMEM = ZMQ_ENOMEM ENODEV = ZMQ_ENODEV EMSGSIZE = ZMQ_EMSGSIZE EAFNOSUPPORT = ZMQ_EAFNOSUPPORT ENETUNREACH = ZMQ_ENETUNREACH ECONNABORTED = ZMQ_ECONNABORTED ECONNRESET = ZMQ_ECONNRESET ENOTCONN = ZMQ_ENOTCONN ETIMEDOUT = ZMQ_ETIMEDOUT EHOSTUNREACH = ZMQ_EHOSTUNREACH ENETRESET = ZMQ_ENETRESET HAUSNUMERO = ZMQ_HAUSNUMERO ENOTSUP = ZMQ_ENOTSUP EPROTONOSUPPORT = ZMQ_EPROTONOSUPPORT ENOBUFS = ZMQ_ENOBUFS ENETDOWN = ZMQ_ENETDOWN EADDRINUSE = ZMQ_EADDRINUSE EADDRNOTAVAIL = ZMQ_EADDRNOTAVAIL ECONNREFUSED = ZMQ_ECONNREFUSED EINPROGRESS = ZMQ_EINPROGRESS ENOTSOCK = ZMQ_ENOTSOCK EFSM = ZMQ_EFSM ENOCOMPATPROTO = ZMQ_ENOCOMPATPROTO ETERM = ZMQ_ETERM EMTHREAD = ZMQ_EMTHREAD IO_THREADS = ZMQ_IO_THREADS MAX_SOCKETS = ZMQ_MAX_SOCKETS SOCKET_LIMIT = ZMQ_SOCKET_LIMIT THREAD_PRIORITY = ZMQ_THREAD_PRIORITY THREAD_SCHED_POLICY = ZMQ_THREAD_SCHED_POLICY BLOCKY = ZMQ_BLOCKY IDENTITY = ZMQ_IDENTITY SUBSCRIBE = ZMQ_SUBSCRIBE UNSUBSCRIBE = ZMQ_UNSUBSCRIBE LAST_ENDPOINT = ZMQ_LAST_ENDPOINT TCP_ACCEPT_FILTER = ZMQ_TCP_ACCEPT_FILTER PLAIN_USERNAME = ZMQ_PLAIN_USERNAME PLAIN_PASSWORD = ZMQ_PLAIN_PASSWORD CURVE_PUBLICKEY = ZMQ_CURVE_PUBLICKEY CURVE_SECRETKEY = ZMQ_CURVE_SECRETKEY CURVE_SERVERKEY = ZMQ_CURVE_SERVERKEY ZAP_DOMAIN = ZMQ_ZAP_DOMAIN CONNECT_RID = ZMQ_CONNECT_RID GSSAPI_PRINCIPAL = ZMQ_GSSAPI_PRINCIPAL GSSAPI_SERVICE_PRINCIPAL = ZMQ_GSSAPI_SERVICE_PRINCIPAL SOCKS_PROXY = ZMQ_SOCKS_PROXY XPUB_WELCOME_MSG = ZMQ_XPUB_WELCOME_MSG FD = ZMQ_FD RECONNECT_IVL_MAX = ZMQ_RECONNECT_IVL_MAX SNDTIMEO = ZMQ_SNDTIMEO RCVTIMEO = ZMQ_RCVTIMEO SNDHWM = ZMQ_SNDHWM RCVHWM = ZMQ_RCVHWM MULTICAST_HOPS = ZMQ_MULTICAST_HOPS IPV4ONLY = ZMQ_IPV4ONLY ROUTER_BEHAVIOR = ZMQ_ROUTER_BEHAVIOR TCP_KEEPALIVE = ZMQ_TCP_KEEPALIVE TCP_KEEPALIVE_CNT = ZMQ_TCP_KEEPALIVE_CNT TCP_KEEPALIVE_IDLE = ZMQ_TCP_KEEPALIVE_IDLE TCP_KEEPALIVE_INTVL = ZMQ_TCP_KEEPALIVE_INTVL DELAY_ATTACH_ON_CONNECT = ZMQ_DELAY_ATTACH_ON_CONNECT XPUB_VERBOSE = ZMQ_XPUB_VERBOSE EVENTS = ZMQ_EVENTS TYPE = ZMQ_TYPE LINGER = ZMQ_LINGER RECONNECT_IVL = ZMQ_RECONNECT_IVL BACKLOG = ZMQ_BACKLOG ROUTER_MANDATORY = ZMQ_ROUTER_MANDATORY FAIL_UNROUTABLE = ZMQ_FAIL_UNROUTABLE ROUTER_RAW = ZMQ_ROUTER_RAW IMMEDIATE = ZMQ_IMMEDIATE IPV6 = ZMQ_IPV6 MECHANISM = ZMQ_MECHANISM PLAIN_SERVER = ZMQ_PLAIN_SERVER CURVE_SERVER = ZMQ_CURVE_SERVER PROBE_ROUTER = ZMQ_PROBE_ROUTER REQ_RELAXED = ZMQ_REQ_RELAXED REQ_CORRELATE = ZMQ_REQ_CORRELATE CONFLATE = ZMQ_CONFLATE ROUTER_HANDOVER = ZMQ_ROUTER_HANDOVER TOS = ZMQ_TOS IPC_FILTER_PID = ZMQ_IPC_FILTER_PID IPC_FILTER_UID = ZMQ_IPC_FILTER_UID IPC_FILTER_GID = ZMQ_IPC_FILTER_GID GSSAPI_SERVER = ZMQ_GSSAPI_SERVER GSSAPI_PLAINTEXT = ZMQ_GSSAPI_PLAINTEXT HANDSHAKE_IVL = ZMQ_HANDSHAKE_IVL XPUB_NODROP = ZMQ_XPUB_NODROP XPUB_MANUAL = ZMQ_XPUB_MANUAL STREAM_NOTIFY = ZMQ_STREAM_NOTIFY INVERT_MATCHING = ZMQ_INVERT_MATCHING XPUB_VERBOSER = ZMQ_XPUB_VERBOSER HEARTBEAT_IVL = ZMQ_HEARTBEAT_IVL HEARTBEAT_TTL = ZMQ_HEARTBEAT_TTL HEARTBEAT_TIMEOUT = ZMQ_HEARTBEAT_TIMEOUT CONNECT_TIMEOUT = ZMQ_CONNECT_TIMEOUT TCP_MAXRT = ZMQ_TCP_MAXRT THREAD_SAFE = ZMQ_THREAD_SAFE MULTICAST_MAXTPDU = ZMQ_MULTICAST_MAXTPDU VMCI_CONNECT_TIMEOUT = ZMQ_VMCI_CONNECT_TIMEOUT USE_FD = ZMQ_USE_FD AFFINITY = ZMQ_AFFINITY MAXMSGSIZE = ZMQ_MAXMSGSIZE HWM = ZMQ_HWM SWAP = ZMQ_SWAP MCAST_LOOP = ZMQ_MCAST_LOOP RECOVERY_IVL_MSEC = ZMQ_RECOVERY_IVL_MSEC VMCI_BUFFER_SIZE = ZMQ_VMCI_BUFFER_SIZE VMCI_BUFFER_MIN_SIZE = ZMQ_VMCI_BUFFER_MIN_SIZE VMCI_BUFFER_MAX_SIZE = ZMQ_VMCI_BUFFER_MAX_SIZE RATE = ZMQ_RATE RECOVERY_IVL = ZMQ_RECOVERY_IVL SNDBUF = ZMQ_SNDBUF RCVBUF = ZMQ_RCVBUF RCVMORE = ZMQ_RCVMORE MORE = ZMQ_MORE SRCFD = ZMQ_SRCFD SHARED = ZMQ_SHARED #----------------------------------------------------------------------------- # Symbols to export #----------------------------------------------------------------------------- __all__ = [ "DRAFT_API", "VERSION", "VERSION_MAJOR", "VERSION_MINOR", "VERSION_PATCH", "NOBLOCK", "DONTWAIT", "POLLIN", "POLLOUT", "POLLERR", "POLLPRI", "SNDMORE", "STREAMER", "FORWARDER", "QUEUE", "IO_THREADS_DFLT", "MAX_SOCKETS_DFLT", "POLLITEMS_DFLT", "THREAD_PRIORITY_DFLT", "THREAD_SCHED_POLICY_DFLT", "PAIR", "PUB", "SUB", "REQ", "REP", "DEALER", "ROUTER", "XREQ", "XREP", "PULL", "PUSH", "XPUB", "XSUB", "UPSTREAM", "DOWNSTREAM", "STREAM", "SERVER", "CLIENT", "RADIO", "DISH", "GATHER", "SCATTER", "DGRAM", "EVENT_CONNECTED", "EVENT_CONNECT_DELAYED", "EVENT_CONNECT_RETRIED", "EVENT_LISTENING", "EVENT_BIND_FAILED", "EVENT_ACCEPTED", "EVENT_ACCEPT_FAILED", "EVENT_CLOSED", "EVENT_CLOSE_FAILED", "EVENT_DISCONNECTED", "EVENT_ALL", "EVENT_MONITOR_STOPPED", "NULL", "PLAIN", "CURVE", "GSSAPI", "EAGAIN", "EINVAL", "EFAULT", "ENOMEM", "ENODEV", "EMSGSIZE", "EAFNOSUPPORT", "ENETUNREACH", "ECONNABORTED", "ECONNRESET", "ENOTCONN", "ETIMEDOUT", "EHOSTUNREACH", "ENETRESET", "HAUSNUMERO", "ENOTSUP", "EPROTONOSUPPORT", "ENOBUFS", "ENETDOWN", "EADDRINUSE", "EADDRNOTAVAIL", "ECONNREFUSED", "EINPROGRESS", "ENOTSOCK", "EFSM", "ENOCOMPATPROTO", "ETERM", "EMTHREAD", "IO_THREADS", "MAX_SOCKETS", "SOCKET_LIMIT", "THREAD_PRIORITY", "THREAD_SCHED_POLICY", "BLOCKY", "IDENTITY", "SUBSCRIBE", "UNSUBSCRIBE", "LAST_ENDPOINT", "TCP_ACCEPT_FILTER", "PLAIN_USERNAME", "PLAIN_PASSWORD", "CURVE_PUBLICKEY", "CURVE_SECRETKEY", "CURVE_SERVERKEY", "ZAP_DOMAIN", "CONNECT_RID", "GSSAPI_PRINCIPAL", "GSSAPI_SERVICE_PRINCIPAL", "SOCKS_PROXY", "XPUB_WELCOME_MSG", "FD", "RECONNECT_IVL_MAX", "SNDTIMEO", "RCVTIMEO", "SNDHWM", "RCVHWM", "MULTICAST_HOPS", "IPV4ONLY", "ROUTER_BEHAVIOR", "TCP_KEEPALIVE", "TCP_KEEPALIVE_CNT", "TCP_KEEPALIVE_IDLE", "TCP_KEEPALIVE_INTVL", "DELAY_ATTACH_ON_CONNECT", "XPUB_VERBOSE", "EVENTS", "TYPE", "LINGER", "RECONNECT_IVL", "BACKLOG", "ROUTER_MANDATORY", "FAIL_UNROUTABLE", "ROUTER_RAW", "IMMEDIATE", "IPV6", "MECHANISM", "PLAIN_SERVER", "CURVE_SERVER", "PROBE_ROUTER", "REQ_RELAXED", "REQ_CORRELATE", "CONFLATE", "ROUTER_HANDOVER", "TOS", "IPC_FILTER_PID", "IPC_FILTER_UID", "IPC_FILTER_GID", "GSSAPI_SERVER", "GSSAPI_PLAINTEXT", "HANDSHAKE_IVL", "XPUB_NODROP", "XPUB_MANUAL", "STREAM_NOTIFY", "INVERT_MATCHING", "XPUB_VERBOSER", "HEARTBEAT_IVL", "HEARTBEAT_TTL", "HEARTBEAT_TIMEOUT", "CONNECT_TIMEOUT", "TCP_MAXRT", "THREAD_SAFE", "MULTICAST_MAXTPDU", "VMCI_CONNECT_TIMEOUT", "USE_FD", "AFFINITY", "MAXMSGSIZE", "HWM", "SWAP", "MCAST_LOOP", "RECOVERY_IVL_MSEC", "VMCI_BUFFER_SIZE", "VMCI_BUFFER_MIN_SIZE", "VMCI_BUFFER_MAX_SIZE", "RATE", "RECOVERY_IVL", "SNDBUF", "RCVBUF", "RCVMORE", "MORE", "SRCFD", "SHARED", ] pyzmq-16.0.2/zmq/backend/cython/constants.pyx000066400000000000000000000022631301503633700212440ustar00rootroot00000000000000"""0MQ Constants.""" # # Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from .libzmq cimport * #----------------------------------------------------------------------------- # Python module level constants #----------------------------------------------------------------------------- include "constants.pxi" pyzmq-16.0.2/zmq/backend/cython/context.pxd000066400000000000000000000033621301503633700206700ustar00rootroot00000000000000"""0MQ Context class declaration.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- cdef class Context: cdef object __weakref__ # enable weakref cdef void *handle # The C handle for the underlying zmq object. cdef bint _shadow # whether the Context is a shadow wrapper of another cdef void **_sockets # A C-array containg socket handles cdef size_t _n_sockets # the number of sockets cdef size_t _max_sockets # the size of the _sockets array cdef int _pid # the pid of the process which created me (for fork safety) cdef public bint closed # bool property for a closed context. cdef inline int _term(self) # helpers for events on _sockets in Socket.__cinit__()/close() cdef inline void _add_socket(self, void* handle) cdef inline void _remove_socket(self, void* handle) pyzmq-16.0.2/zmq/backend/cython/context.pyx000066400000000000000000000161701301503633700207160ustar00rootroot00000000000000"""0MQ Context class.""" # coding: utf-8 # Copyright (c) PyZMQ Developers. # Distributed under the terms of the Lesser GNU Public License (LGPL). from libc.stdlib cimport free, malloc, realloc from .libzmq cimport * cdef extern from "getpid_compat.h": int getpid() from zmq.error import ZMQError, InterruptedSystemCall from .checkrc cimport _check_rc _instance = None cdef class Context: """Context(io_threads=1) Manage the lifecycle of a 0MQ context. Parameters ---------- io_threads : int The number of IO threads. """ # no-op for the signature def __init__(self, io_threads=1, shadow=0): pass def __cinit__(self, int io_threads=1, size_t shadow=0, **kwargs): self.handle = NULL self._sockets = NULL if shadow: self.handle = shadow self._shadow = True else: self._shadow = False if ZMQ_VERSION_MAJOR >= 3: self.handle = zmq_ctx_new() else: self.handle = zmq_init(io_threads) if self.handle == NULL: raise ZMQError() cdef int rc = 0 if ZMQ_VERSION_MAJOR >= 3 and not self._shadow: rc = zmq_ctx_set(self.handle, ZMQ_IO_THREADS, io_threads) _check_rc(rc) self.closed = False self._n_sockets = 0 self._max_sockets = 32 self._sockets = malloc(self._max_sockets*sizeof(void *)) if self._sockets == NULL: raise MemoryError("Could not allocate _sockets array") self._pid = getpid() def __dealloc__(self): """don't touch members in dealloc, just cleanup allocations""" cdef int rc if self._sockets != NULL: free(self._sockets) self._sockets = NULL self._n_sockets = 0 # we can't call object methods in dealloc as it # might already be partially deleted if not self._shadow: self._term() cdef inline void _add_socket(self, void* handle): """Add a socket handle to be closed when Context terminates. This is to be called in the Socket constructor. """ if self._n_sockets >= self._max_sockets: self._max_sockets *= 2 self._sockets = realloc(self._sockets, self._max_sockets*sizeof(void *)) if self._sockets == NULL: raise MemoryError("Could not reallocate _sockets array") self._sockets[self._n_sockets] = handle self._n_sockets += 1 cdef inline void _remove_socket(self, void* handle): """Remove a socket from the collected handles. This should be called by Socket.close, to prevent trying to close a socket a second time. """ cdef bint found = False for idx in range(self._n_sockets): if self._sockets[idx] == handle: found=True break if found: self._n_sockets -= 1 if self._n_sockets: # move last handle to closed socket's index self._sockets[idx] = self._sockets[self._n_sockets] @property def underlying(self): """The address of the underlying libzmq context""" return self.handle cdef inline int _term(self): cdef int rc=0 if self.handle != NULL and not self.closed and getpid() == self._pid: with nogil: rc = zmq_ctx_destroy(self.handle) self.handle = NULL return rc def term(self): """ctx.term() Close or terminate the context. This can be called to close the context by hand. If this is not called, the context will automatically be closed when it is garbage collected. """ cdef int rc=0 rc = self._term() try: _check_rc(rc) except InterruptedSystemCall: # ignore interrupted term # see PEP 475 notes about close & EINTR for why pass self.closed = True def set(self, int option, optval): """ctx.set(option, optval) Set a context option. See the 0MQ API documentation for zmq_ctx_set for details on specific options. .. versionadded:: libzmq-3.2 .. versionadded:: 13.0 Parameters ---------- option : int The option to set. Available values will depend on your version of libzmq. Examples include:: zmq.IO_THREADS, zmq.MAX_SOCKETS optval : int The value of the option to set. """ cdef int optval_int_c cdef int rc cdef char* optval_c if self.closed: raise RuntimeError("Context has been destroyed") if not isinstance(optval, int): raise TypeError('expected int, got: %r' % optval) optval_int_c = optval rc = zmq_ctx_set(self.handle, option, optval_int_c) _check_rc(rc) def get(self, int option): """ctx.get(option) Get the value of a context option. See the 0MQ API documentation for zmq_ctx_get for details on specific options. .. versionadded:: libzmq-3.2 .. versionadded:: 13.0 Parameters ---------- option : int The option to get. Available values will depend on your version of libzmq. Examples include:: zmq.IO_THREADS, zmq.MAX_SOCKETS Returns ------- optval : int The value of the option as an integer. """ cdef int optval_int_c cdef size_t sz cdef int rc if self.closed: raise RuntimeError("Context has been destroyed") rc = zmq_ctx_get(self.handle, option) _check_rc(rc) return rc def destroy(self, linger=None): """ctx.destroy(linger=None) Close all sockets associated with this context, and then terminate the context. If linger is specified, the LINGER sockopt of the sockets will be set prior to closing. .. warning:: destroy involves calling ``zmq_close()``, which is **NOT** threadsafe. If there are active sockets in other threads, this must not be called. """ cdef int linger_c cdef bint setlinger=False if linger is not None: linger_c = linger setlinger=True if self.handle != NULL and not self.closed and self._n_sockets: while self._n_sockets: if setlinger: zmq_setsockopt(self._sockets[0], ZMQ_LINGER, &linger_c, sizeof(int)) rc = zmq_close(self._sockets[0]) if rc < 0 and zmq_errno() != ZMQ_ENOTSOCK: raise ZMQError() self._n_sockets -= 1 self._sockets[0] = self._sockets[self._n_sockets] self.term() __all__ = ['Context'] pyzmq-16.0.2/zmq/backend/cython/error.pyx000066400000000000000000000033301301503633700203550ustar00rootroot00000000000000"""0MQ Error classes and functions.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # allow const char* cdef extern from *: ctypedef char* const_char_ptr "const char*" from .libzmq cimport zmq_strerror, zmq_errno as zmq_errno_c from zmq.utils.strtypes import bytes def strerror(int errno): """strerror(errno) Return the error string given the error number. """ cdef const_char_ptr str_e # char * will be a bytes object: str_e = zmq_strerror(errno) if str is bytes: # Python 2: str is bytes, so we already have the right type return str_e else: # Python 3: decode bytes to unicode str return str_e.decode() def zmq_errno(): """zmq_errno() Return the integer errno of the most recent zmq error. """ return zmq_errno_c() __all__ = ['strerror', 'zmq_errno'] pyzmq-16.0.2/zmq/backend/cython/libzmq.pxd000066400000000000000000000073561301503633700205110ustar00rootroot00000000000000"""All the C imports for 0MQ""" # # Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Import the C header files #----------------------------------------------------------------------------- cdef extern from *: ctypedef void* const_void_ptr "const void *" ctypedef char* const_char_ptr "const char *" cdef extern from "zmq_compat.h": ctypedef signed long long int64_t "pyzmq_int64_t" include "constant_enums.pxi" cdef extern from "zmq.h" nogil: void _zmq_version "zmq_version"(int *major, int *minor, int *patch) ctypedef int fd_t "ZMQ_FD_T" enum: errno char *zmq_strerror (int errnum) int zmq_errno() void *zmq_ctx_new () int zmq_ctx_destroy (void *context) int zmq_ctx_set (void *context, int option, int optval) int zmq_ctx_get (void *context, int option) void *zmq_init (int io_threads) int zmq_term (void *context) # blackbox def for zmq_msg_t ctypedef void * zmq_msg_t "zmq_msg_t" ctypedef void zmq_free_fn(void *data, void *hint) int zmq_msg_init (zmq_msg_t *msg) int zmq_msg_init_size (zmq_msg_t *msg, size_t size) int zmq_msg_init_data (zmq_msg_t *msg, void *data, size_t size, zmq_free_fn *ffn, void *hint) int zmq_msg_send (zmq_msg_t *msg, void *s, int flags) int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags) int zmq_msg_close (zmq_msg_t *msg) int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src) int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src) void *zmq_msg_data (zmq_msg_t *msg) size_t zmq_msg_size (zmq_msg_t *msg) int zmq_msg_more (zmq_msg_t *msg) int zmq_msg_get (zmq_msg_t *msg, int option) int zmq_msg_set (zmq_msg_t *msg, int option, int optval) const_char_ptr zmq_msg_gets (zmq_msg_t *msg, const_char_ptr property) int zmq_has (const_char_ptr capability) void *zmq_socket (void *context, int type) int zmq_close (void *s) int zmq_setsockopt (void *s, int option, void *optval, size_t optvallen) int zmq_getsockopt (void *s, int option, void *optval, size_t *optvallen) int zmq_bind (void *s, char *addr) int zmq_connect (void *s, char *addr) int zmq_unbind (void *s, char *addr) int zmq_disconnect (void *s, char *addr) int zmq_socket_monitor (void *s, char *addr, int flags) # send/recv int zmq_sendbuf (void *s, const_void_ptr buf, size_t n, int flags) int zmq_recvbuf (void *s, void *buf, size_t n, int flags) ctypedef struct zmq_pollitem_t: void *socket int fd short events short revents int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout) int zmq_device (int device_, void *insocket_, void *outsocket_) int zmq_proxy (void *frontend, void *backend, void *capture) int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key) pyzmq-16.0.2/zmq/backend/cython/message.pxd000066400000000000000000000045361301503633700206340ustar00rootroot00000000000000"""0MQ Message related class declarations.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from cpython cimport PyBytes_FromStringAndSize from zmq.backend.cython.libzmq cimport zmq_msg_t, zmq_msg_data, zmq_msg_size #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- cdef class MessageTracker(object): cdef set events # Message Event objects to track. cdef set peers # Other Message or MessageTracker objects. cdef class Frame: cdef zmq_msg_t zmq_msg cdef object _data # The actual message data as a Python object. cdef object _buffer # A Python Buffer/View of the message contents cdef object _bytes # A bytes/str copy of the message. cdef bint _failed_init # Flag to handle failed zmq_msg_init cdef public object tracker_event # Event for use with zmq_free_fn. cdef public object tracker # MessageTracker object. cdef public bint more # whether RCVMORE was set cdef Frame fast_copy(self) # Create shallow copy of Message object. cdef object _getbuffer(self) # Construct self._buffer. cdef inline object copy_zmq_msg_bytes(zmq_msg_t *zmq_msg): """ Copy the data from a zmq_msg_t """ cdef char *data_c = NULL cdef Py_ssize_t data_len_c data_c = zmq_msg_data(zmq_msg) data_len_c = zmq_msg_size(zmq_msg) return PyBytes_FromStringAndSize(data_c, data_len_c) pyzmq-16.0.2/zmq/backend/cython/message.pyx000066400000000000000000000303341301503633700206540ustar00rootroot00000000000000"""0MQ Message related classes.""" # # Copyright (c) 2013 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # get version-independent aliases: cdef extern from "pyversion_compat.h": pass from cpython cimport Py_DECREF, Py_INCREF from zmq.utils.buffers cimport asbuffer_r, viewfromobject_r cdef extern from "Python.h": ctypedef int Py_ssize_t from .libzmq cimport * from libc.stdio cimport fprintf, stderr as cstderr from libc.stdlib cimport malloc, free from libc.string cimport memcpy import time try: # below 3.3 from threading import _Event as Event except (ImportError, AttributeError): # python throws ImportError, cython throws AttributeError from threading import Event import zmq from zmq.error import _check_version from .checkrc cimport _check_rc from zmq.utils.strtypes import bytes,unicode,basestring #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- ctypedef struct zhint: void *ctx size_t id cdef void free_python_msg(void *data, void *vhint) nogil: """A pure-C function for DECREF'ing Python-owned message data. Sends a message on a PUSH socket The hint is a `zhint` struct with two values: ctx (void *): pointer to the Garbage Collector's context id (size_t): the id to be used to construct a zmq_msg_t that should be sent on a PUSH socket, signaling the Garbage Collector to remove its reference to the object. - A PUSH socket is created in the context, - it is connected to the garbage collector inproc channel, - it sends the gc message - the PUSH socket is closed When the Garbage Collector's PULL socket receives the message, it deletes its reference to the object, allowing Python to free the memory. """ cdef void *push cdef zmq_msg_t msg cdef zhint *hint = vhint if hint != NULL: zmq_msg_init_size(&msg, sizeof(size_t)) memcpy(zmq_msg_data(&msg), &hint.id, sizeof(size_t)) push = zmq_socket(hint.ctx, ZMQ_PUSH) if push == NULL: # this will happen if the context has been terminated return rc = zmq_connect(push, "inproc://pyzmq.gc.01") if rc < 0: fprintf(cstderr, "pyzmq-gc connect failed: %s\n", zmq_strerror(zmq_errno())) return rc = zmq_msg_send(&msg, push, 0) if rc < 0: fprintf(cstderr, "pyzmq-gc send failed: %s\n", zmq_strerror(zmq_errno())) zmq_msg_close(&msg) zmq_close(push) free(hint) gc = None cdef class Frame: """Frame(data=None, track=False) A zmq message Frame class for non-copy send/recvs. This class is only needed if you want to do non-copying send and recvs. When you pass a string to this class, like ``Frame(s)``, the ref-count of `s` is increased by two: once because the Frame saves `s` as an instance attribute and another because a ZMQ message is created that points to the buffer of `s`. This second ref-count increase makes sure that `s` lives until all messages that use it have been sent. Once 0MQ sends all the messages and it doesn't need the buffer of s, 0MQ will call ``Py_DECREF(s)``. Parameters ---------- data : object, optional any object that provides the buffer interface will be used to construct the 0MQ message data. track : bool [default: False] whether a MessageTracker_ should be created to track this object. Tracking a message has a cost at creation, because it creates a threadsafe Event object. """ def __cinit__(self, object data=None, track=False, **kwargs): cdef int rc cdef char *data_c = NULL cdef Py_ssize_t data_len_c=0 cdef zhint *hint # init more as False self.more = False # Save the data object in case the user wants the the data as a str. self._data = data self._failed_init = True # bool switch for dealloc self._buffer = None # buffer view of data self._bytes = None # bytes copy of data # Event and MessageTracker for monitoring when zmq is done with data: if track: evt = Event() self.tracker_event = evt self.tracker = zmq.MessageTracker(evt) else: self.tracker_event = None self.tracker = None if isinstance(data, unicode): raise TypeError("Unicode objects not allowed. Only: str/bytes, buffer interfaces.") if data is None: rc = zmq_msg_init(&self.zmq_msg) _check_rc(rc) self._failed_init = False return else: asbuffer_r(data, &data_c, &data_len_c) # create the hint for zmq_free_fn # two pointers: the gc context and a message to be sent to the gc PULL socket # allows libzmq to signal to Python when it is done with Python-owned memory. global gc if gc is None: from zmq.utils.garbage import gc hint = malloc(sizeof(zhint)) hint.id = gc.store(data, self.tracker_event) hint.ctx = gc._context.underlying rc = zmq_msg_init_data( &self.zmq_msg, data_c, data_len_c, free_python_msg, hint ) if rc != 0: free(hint) _check_rc(rc) self._failed_init = False def __init__(self, object data=None, track=False): """Enforce signature""" pass def __dealloc__(self): cdef int rc if self._failed_init: return # This simply decreases the 0MQ ref-count of zmq_msg. with nogil: rc = zmq_msg_close(&self.zmq_msg) _check_rc(rc) # buffer interface code adapted from petsc4py by Lisandro Dalcin, a BSD project def __getbuffer__(self, Py_buffer* buffer, int flags): # new-style (memoryview) buffer interface buffer.buf = zmq_msg_data(&self.zmq_msg) buffer.len = zmq_msg_size(&self.zmq_msg) buffer.obj = self buffer.readonly = 1 buffer.format = "B" buffer.ndim = 1 buffer.shape = &(buffer.len) buffer.strides = NULL buffer.suboffsets = NULL buffer.itemsize = 1 buffer.internal = NULL def __getsegcount__(self, Py_ssize_t *lenp): # required for getreadbuffer if lenp != NULL: lenp[0] = zmq_msg_size(&self.zmq_msg) return 1 def __getreadbuffer__(self, Py_ssize_t idx, void **p): # old-style (buffer) interface cdef char *data_c = NULL cdef Py_ssize_t data_len_c if idx != 0: raise SystemError("accessing non-existent buffer segment") # read-only, because we don't want to allow # editing of the message in-place data_c = zmq_msg_data(&self.zmq_msg) data_len_c = zmq_msg_size(&self.zmq_msg) if p != NULL: p[0] = data_c return data_len_c # end buffer interface def __copy__(self): """Create a shallow copy of the message. This does not copy the contents of the Frame, just the pointer. This will increment the 0MQ ref count of the message, but not the ref count of the Python object. That is only done once when the Python is first turned into a 0MQ message. """ return self.fast_copy() cdef Frame fast_copy(self): """Fast, cdef'd version of shallow copy of the Frame.""" cdef Frame new_msg new_msg = Frame() # This does not copy the contents, but just increases the ref-count # of the zmq_msg by one. zmq_msg_copy(&new_msg.zmq_msg, &self.zmq_msg) # Copy the ref to data so the copy won't create a copy when str is # called. if self._data is not None: new_msg._data = self._data if self._buffer is not None: new_msg._buffer = self._buffer if self._bytes is not None: new_msg._bytes = self._bytes # Frame copies share the tracker and tracker_event new_msg.tracker_event = self.tracker_event new_msg.tracker = self.tracker return new_msg def __len__(self): """Return the length of the message in bytes.""" cdef size_t sz sz = zmq_msg_size(&self.zmq_msg) return sz # return zmq_msg_size(&self.zmq_msg) def __str__(self): """Return the str form of the message.""" if isinstance(self._data, bytes): b = self._data else: b = self.bytes if str is unicode: return b.decode() else: return b cdef inline object _getbuffer(self): """Create a Python buffer/view of the message data. This will be called only once, the first time the `buffer` property is accessed. Subsequent calls use a cached copy. """ if self._data is None: return viewfromobject_r(self) else: return viewfromobject_r(self._data) @property def buffer(self): """A read-only buffer view of the message contents.""" if self._buffer is None: self._buffer = self._getbuffer() return self._buffer @property def bytes(self): """The message content as a Python bytes object. The first time this property is accessed, a copy of the message contents is made. From then on that same copy of the message is returned. """ if self._bytes is None: self._bytes = copy_zmq_msg_bytes(&self.zmq_msg) return self._bytes def set(self, int option, int value): """Frame.set(option, value) Set a Frame option. See the 0MQ API documentation for zmq_msg_set for details on specific options. .. versionadded:: libzmq-3.2 .. versionadded:: 13.0 """ cdef int rc = zmq_msg_set(&self.zmq_msg, option, value) _check_rc(rc) def get(self, option): """Frame.get(option) Get a Frame option or property. See the 0MQ API documentation for zmq_msg_get and zmq_msg_gets for details on specific options. .. versionadded:: libzmq-3.2 .. versionadded:: 13.0 .. versionchanged:: 14.3 add support for zmq_msg_gets (requires libzmq-4.1) """ cdef int rc = 0 cdef char *property_c = NULL cdef Py_ssize_t property_len_c = 0 # zmq_msg_get if isinstance(option, int): rc = zmq_msg_get(&self.zmq_msg, option) _check_rc(rc) return rc # zmq_msg_gets _check_version((4,1), "get string properties") if isinstance(option, unicode): option = option.encode('utf8') if not isinstance(option, bytes): raise TypeError("expected str, got: %r" % option) property_c = option cdef const char *result = zmq_msg_gets(&self.zmq_msg, property_c) if result == NULL: _check_rc(-1) return result.decode('utf8') # legacy Message name Message = Frame __all__ = ['Frame', 'Message'] pyzmq-16.0.2/zmq/backend/cython/rebuffer.pyx000066400000000000000000000067451301503633700210410ustar00rootroot00000000000000""" Utility for changing itemsize of memoryviews, and getting numpy arrays from byte-arrays that should be interpreted with a different itemsize. Authors ------- * MinRK """ #----------------------------------------------------------------------------- # Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley # # This file is part of pyzmq # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- from libc.stdlib cimport malloc from zmq.utils.buffers cimport * cdef inline object _rebuffer(object obj, char * format, int itemsize): """clobber the format & itemsize of a 1-D This is the Python 3 model, but will work on Python >= 2.6. Currently, we use it only on >= 3.0. """ cdef Py_buffer view cdef int flags = PyBUF_SIMPLE cdef int mode = 0 # cdef Py_ssize_t *shape, *strides, *suboffsets mode = check_buffer(obj) if mode == 0: raise TypeError("%r does not provide a buffer interface."%obj) if mode == 3: flags = PyBUF_ANY_CONTIGUOUS if format: flags |= PyBUF_FORMAT PyObject_GetBuffer(obj, &view, flags) assert view.ndim <= 1, "Can only reinterpret 1-D memoryviews" assert view.len % itemsize == 0, "Buffer of length %i not divisible into items of size %i"%(view.len, itemsize) # hack the format view.ndim = 1 view.format = format view.itemsize = itemsize view.strides = malloc(sizeof(Py_ssize_t)) view.strides[0] = itemsize view.shape = malloc(sizeof(Py_ssize_t)) view.shape[0] = view.len/itemsize view.suboffsets = malloc(sizeof(Py_ssize_t)) view.suboffsets[0] = 0 # for debug: make buffer writable, for zero-copy testing # view.readonly = 0 return PyMemoryView_FromBuffer(&view) else: raise TypeError("This funciton is only for new-style buffer objects.") def rebuffer(obj, format, itemsize): """Change the itemsize of a memoryview. Only for 1D contiguous buffers. """ return _rebuffer(obj, format, itemsize) def array_from_buffer(view, dtype, shape): """Get a numpy array from a memoryview, regardless of the itemsize of the original memoryview. This is important, because pyzmq does not send memoryview shape data over the wire, so we need to change the memoryview itemsize before calling asarray. """ import numpy A = numpy.array([],dtype=dtype) ref = viewfromobject(A,0) fmt = ref.format.encode() buf = viewfromobject(view, 0) buf = _rebuffer(view, fmt, ref.itemsize) return numpy.asarray(buf, dtype=dtype).reshape(shape) def print_view_info(obj): """simple utility for printing info on a new-style buffer object""" cdef Py_buffer view cdef int flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT cdef int mode = 0 mode = check_buffer(obj) if mode == 0: raise TypeError("%r does not provide a buffer interface."%obj) if mode == 3: PyObject_GetBuffer(obj, &view, flags) print view.buf, view.len, view.format, view.ndim, if view.ndim: if view.shape: print view.shape[0], if view.strides: print view.strides[0], if view.suboffsets: print view.suboffsets[0], print pyzmq-16.0.2/zmq/backend/cython/socket.pxd000066400000000000000000000036341301503633700204760ustar00rootroot00000000000000"""0MQ Socket class declaration.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from context cimport Context #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- cdef class Socket: cdef object __weakref__ # enable weakref cdef void *handle # The C handle for the underlying zmq object. cdef bint _shadow # whether the Socket is a shadow wrapper of another # Hold on to a reference to the context to make sure it is not garbage # collected until the socket it done with it. cdef public Context context # The zmq Context object that owns this. cdef public bint _closed # bool property for a closed socket. cdef int _pid # the pid of the process which created me (for fork safety) # cpdef methods for direct-cython access: cpdef object send(self, object data, int flags=*, copy=*, track=*) cpdef object recv(self, int flags=*, copy=*, track=*) pyzmq-16.0.2/zmq/backend/cython/socket.pyx000066400000000000000000000565221301503633700205270ustar00rootroot00000000000000"""0MQ Socket class.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Cython Imports #----------------------------------------------------------------------------- # get version-independent aliases: cdef extern from "pyversion_compat.h": pass from libc.errno cimport ENAMETOOLONG from libc.string cimport memcpy from cpython cimport PyBytes_FromStringAndSize from cpython cimport PyBytes_AsString, PyBytes_Size from cpython cimport Py_DECREF, Py_INCREF from zmq.utils.buffers cimport asbuffer_r, viewfromobject_r from .libzmq cimport * from message cimport Frame, copy_zmq_msg_bytes from context cimport Context cdef extern from "Python.h": ctypedef int Py_ssize_t cdef extern from "ipcmaxlen.h": int get_ipc_path_max_len() cdef extern from "getpid_compat.h": int getpid() #----------------------------------------------------------------------------- # Python Imports #----------------------------------------------------------------------------- import copy as copy_mod import time import sys import random import struct import codecs try: import cPickle pickle = cPickle except: cPickle = None import pickle import zmq from zmq.backend.cython import constants from .constants import * from .checkrc cimport _check_rc from zmq.error import ZMQError, ZMQBindError, InterruptedSystemCall, _check_version from zmq.utils.strtypes import bytes,unicode,basestring #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- IPC_PATH_MAX_LEN = get_ipc_path_max_len() # inline some small socket submethods: # true methods frequently cannot be inlined, acc. Cython docs cdef inline _check_closed(Socket s): """raise ENOTSUP if socket is closed Does not do a deep check """ if s._closed: raise ZMQError(ENOTSOCK) cdef inline _check_closed_deep(Socket s): """thorough check of whether the socket has been closed, even if by another entity (e.g. ctx.destroy). Only used by the `closed` property. returns True if closed, False otherwise """ cdef int rc cdef int errno cdef int stype cdef size_t sz=sizeof(int) if s._closed: return True else: rc = zmq_getsockopt(s.handle, ZMQ_TYPE, &stype, &sz) if rc < 0 and zmq_errno() == ENOTSOCK: s._closed = True return True else: _check_rc(rc) return False cdef inline Frame _recv_frame(void *handle, int flags=0, track=False): """Receive a message in a non-copying manner and return a Frame.""" cdef int rc msg = zmq.Frame(track=track) cdef Frame cmsg = msg while True: with nogil: rc = zmq_msg_recv(&cmsg.zmq_msg, handle, flags) try: _check_rc(rc) except InterruptedSystemCall: continue else: break return msg cdef inline object _recv_copy(void *handle, int flags=0): """Receive a message and return a copy""" cdef zmq_msg_t zmq_msg rc = zmq_msg_init (&zmq_msg) _check_rc(rc) while True: with nogil: rc = zmq_msg_recv(&zmq_msg, handle, flags) try: _check_rc(rc) except InterruptedSystemCall: continue except Exception: zmq_msg_close(&zmq_msg) # ensure msg is closed on failure raise else: break msg_bytes = copy_zmq_msg_bytes(&zmq_msg) zmq_msg_close(&zmq_msg) return msg_bytes cdef inline object _send_frame(void *handle, Frame msg, int flags=0): """Send a Frame on this socket in a non-copy manner.""" cdef int rc cdef Frame msg_copy # Always copy so the original message isn't garbage collected. # This doesn't do a real copy, just a reference. msg_copy = msg.fast_copy() while True: with nogil: rc = zmq_msg_send(&msg_copy.zmq_msg, handle, flags) try: _check_rc(rc) except InterruptedSystemCall: continue else: break return msg.tracker cdef inline object _send_copy(void *handle, object msg, int flags=0): """Send a message on this socket by copying its content.""" cdef int rc cdef zmq_msg_t data cdef char *msg_c cdef Py_ssize_t msg_c_len=0 # copy to c array: asbuffer_r(msg, &msg_c, &msg_c_len) # Copy the msg before sending. This avoids any complications with # the GIL, etc. # If zmq_msg_init_* fails we must not call zmq_msg_close (Bus Error) rc = zmq_msg_init_size(&data, msg_c_len) _check_rc(rc) while True: with nogil: memcpy(zmq_msg_data(&data), msg_c, zmq_msg_size(&data)) rc = zmq_msg_send(&data, handle, flags) try: _check_rc(rc) except InterruptedSystemCall: continue except Exception: zmq_msg_close(&data) # close the unused msg raise # raise original exception else: rc = zmq_msg_close(&data) _check_rc(rc) break cdef inline object _getsockopt(void *handle, int option, void *optval, size_t *sz): """getsockopt, retrying interrupted calls checks rc, raising ZMQError on failure. """ cdef int rc=0 while True: rc = zmq_getsockopt(handle, option, optval, sz) try: _check_rc(rc) except InterruptedSystemCall: continue else: break cdef inline object _setsockopt(void *handle, int option, void *optval, size_t sz): """setsockopt, retrying interrupted calls checks rc, raising ZMQError on failure. """ cdef int rc=0 while True: rc = zmq_setsockopt(handle, option, optval, sz) try: _check_rc(rc) except InterruptedSystemCall: continue else: break cdef class Socket: """Socket(context, socket_type) A 0MQ socket. These objects will generally be constructed via the socket() method of a Context object. Note: 0MQ Sockets are *not* threadsafe. **DO NOT** share them across threads. Parameters ---------- context : Context The 0MQ Context this Socket belongs to. socket_type : int The socket type, which can be any of the 0MQ socket types: REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, XPUB, XSUB. See Also -------- .Context.socket : method for creating a socket bound to a Context. """ # no-op for the signature def __init__(self, context=None, socket_type=-1, shadow=0): pass def __cinit__(self, Context context=None, int socket_type=-1, size_t shadow=0, *args, **kwargs): cdef Py_ssize_t c_handle self.handle = NULL self.context = context if shadow: self._shadow = True self.handle = shadow else: if context is None: raise TypeError("context must be specified") if socket_type < 0: raise TypeError("socket_type must be specified") self._shadow = False self.handle = zmq_socket(context.handle, socket_type) if self.handle == NULL: raise ZMQError() self._closed = False self._pid = getpid() if context: context._add_socket(self.handle) def __dealloc__(self): """remove from context's list But be careful that context might not exist if called during gc """ if self.handle != NULL and not self._shadow and getpid() == self._pid: # during gc, self.context might be NULL if self.context and not self.context.closed: self.context._remove_socket(self.handle) @property def underlying(self): """The address of the underlying libzmq socket""" return self.handle @property def closed(self): return _check_closed_deep(self) def close(self, linger=None): """s.close(linger=None) Close the socket. If linger is specified, LINGER sockopt will be set prior to closing. This can be called to close the socket by hand. If this is not called, the socket will automatically be closed when it is garbage collected. """ cdef int rc=0 cdef int linger_c cdef bint setlinger=False if linger is not None: linger_c = linger setlinger=True if self.handle != NULL and not self._closed and getpid() == self._pid: if setlinger: zmq_setsockopt(self.handle, ZMQ_LINGER, &linger_c, sizeof(int)) rc = zmq_close(self.handle) if rc < 0 and zmq_errno() != ENOTSOCK: # ignore ENOTSOCK (closed by Context) _check_rc(rc) self._closed = True # during gc, self.context might be NULL if self.context: self.context._remove_socket(self.handle) self.handle = NULL def set(self, int option, optval): """s.set(option, optval) Set socket options. See the 0MQ API documentation for details on specific options. Parameters ---------- option : int The option to set. Available values will depend on your version of libzmq. Examples include:: zmq.SUBSCRIBE, UNSUBSCRIBE, IDENTITY, HWM, LINGER, FD optval : int or bytes The value of the option to set. Notes ----- .. warning:: All options other than zmq.SUBSCRIBE, zmq.UNSUBSCRIBE and zmq.LINGER only take effect for subsequent socket bind/connects. """ cdef int64_t optval_int64_c cdef int optval_int_c cdef char* optval_c cdef Py_ssize_t sz _check_closed(self) if isinstance(optval, unicode): raise TypeError("unicode not allowed, use setsockopt_string") if option in zmq.constants.bytes_sockopts: if not isinstance(optval, bytes): raise TypeError('expected bytes, got: %r' % optval) optval_c = PyBytes_AsString(optval) sz = PyBytes_Size(optval) _setsockopt(self.handle, option, optval_c, sz) elif option in zmq.constants.int64_sockopts: if not isinstance(optval, int): raise TypeError('expected int, got: %r' % optval) optval_int64_c = optval _setsockopt(self.handle, option, &optval_int64_c, sizeof(int64_t)) else: # default is to assume int, which is what most new sockopts will be # this lets pyzmq work with newer libzmq which may add constants # pyzmq has not yet added, rather than artificially raising. Invalid # sockopts will still raise just the same, but it will be libzmq doing # the raising. if not isinstance(optval, int): raise TypeError('expected int, got: %r' % optval) optval_int_c = optval _setsockopt(self.handle, option, &optval_int_c, sizeof(int)) def get(self, int option): """s.get(option) Get the value of a socket option. See the 0MQ API documentation for details on specific options. Parameters ---------- option : int The option to get. Available values will depend on your version of libzmq. Examples include:: zmq.IDENTITY, HWM, LINGER, FD, EVENTS Returns ------- optval : int or bytes The value of the option as a bytestring or int. """ cdef int64_t optval_int64_c cdef int optval_int_c cdef fd_t optval_fd_c cdef char identity_str_c [255] cdef size_t sz cdef int rc _check_closed(self) if option in zmq.constants.bytes_sockopts: sz = 255 _getsockopt(self.handle, option, identity_str_c, &sz) # strip null-terminated strings *except* identity if option != ZMQ_IDENTITY and sz > 0 and (identity_str_c)[sz-1] == b'\0': sz -= 1 result = PyBytes_FromStringAndSize(identity_str_c, sz) elif option in zmq.constants.int64_sockopts: sz = sizeof(int64_t) _getsockopt(self.handle, option, &optval_int64_c, &sz) result = optval_int64_c elif option in zmq.constants.fd_sockopts: sz = sizeof(fd_t) _getsockopt(self.handle, option, &optval_fd_c, &sz) result = optval_fd_c else: # default is to assume int, which is what most new sockopts will be # this lets pyzmq work with newer libzmq which may add constants # pyzmq has not yet added, rather than artificially raising. Invalid # sockopts will still raise just the same, but it will be libzmq doing # the raising. sz = sizeof(int) _getsockopt(self.handle, option, &optval_int_c, &sz) result = optval_int_c return result def bind(self, addr): """s.bind(addr) Bind the socket to an address. This causes the socket to listen on a network port. Sockets on the other side of this connection will use ``Socket.connect(addr)`` to connect to this socket. Parameters ---------- addr : str The address string. This has the form 'protocol://interface:port', for example 'tcp://127.0.0.1:5555'. Protocols supported include tcp, udp, pgm, epgm, inproc and ipc. If the address is unicode, it is encoded to utf-8 first. """ cdef int rc cdef char* c_addr _check_closed(self) if isinstance(addr, unicode): addr = addr.encode('utf-8') if not isinstance(addr, bytes): raise TypeError('expected str, got: %r' % addr) c_addr = addr rc = zmq_bind(self.handle, c_addr) if rc != 0: if IPC_PATH_MAX_LEN and zmq_errno() == ENAMETOOLONG: # py3compat: addr is bytes, but msg wants str if str is unicode: addr = addr.decode('utf-8', 'replace') path = addr.split('://', 1)[-1] msg = ('ipc path "{0}" is longer than {1} ' 'characters (sizeof(sockaddr_un.sun_path)). ' 'zmq.IPC_PATH_MAX_LEN constant can be used ' 'to check addr length (if it is defined).' .format(path, IPC_PATH_MAX_LEN)) raise ZMQError(msg=msg) while True: try: _check_rc(rc) except InterruptedSystemCall: rc = zmq_bind(self.handle, c_addr) continue else: break def connect(self, addr): """s.connect(addr) Connect to a remote 0MQ socket. Parameters ---------- addr : str The address string. This has the form 'protocol://interface:port', for example 'tcp://127.0.0.1:5555'. Protocols supported are tcp, upd, pgm, inproc and ipc. If the address is unicode, it is encoded to utf-8 first. """ cdef int rc cdef char* c_addr _check_closed(self) if isinstance(addr, unicode): addr = addr.encode('utf-8') if not isinstance(addr, bytes): raise TypeError('expected str, got: %r' % addr) c_addr = addr while True: try: rc = zmq_connect(self.handle, c_addr) _check_rc(rc) except InterruptedSystemCall: # retry syscall continue else: break def unbind(self, addr): """s.unbind(addr) Unbind from an address (undoes a call to bind). .. versionadded:: libzmq-3.2 .. versionadded:: 13.0 Parameters ---------- addr : str The address string. This has the form 'protocol://interface:port', for example 'tcp://127.0.0.1:5555'. Protocols supported are tcp, upd, pgm, inproc and ipc. If the address is unicode, it is encoded to utf-8 first. """ cdef int rc cdef char* c_addr _check_version((3,2), "unbind") _check_closed(self) if isinstance(addr, unicode): addr = addr.encode('utf-8') if not isinstance(addr, bytes): raise TypeError('expected str, got: %r' % addr) c_addr = addr rc = zmq_unbind(self.handle, c_addr) if rc != 0: raise ZMQError() def disconnect(self, addr): """s.disconnect(addr) Disconnect from a remote 0MQ socket (undoes a call to connect). .. versionadded:: libzmq-3.2 .. versionadded:: 13.0 Parameters ---------- addr : str The address string. This has the form 'protocol://interface:port', for example 'tcp://127.0.0.1:5555'. Protocols supported are tcp, upd, pgm, inproc and ipc. If the address is unicode, it is encoded to utf-8 first. """ cdef int rc cdef char* c_addr _check_version((3,2), "disconnect") _check_closed(self) if isinstance(addr, unicode): addr = addr.encode('utf-8') if not isinstance(addr, bytes): raise TypeError('expected str, got: %r' % addr) c_addr = addr rc = zmq_disconnect(self.handle, c_addr) if rc != 0: raise ZMQError() def monitor(self, addr, int events=ZMQ_EVENT_ALL): """s.monitor(addr, flags) Start publishing socket events on inproc. See libzmq docs for zmq_monitor for details. While this function is available from libzmq 3.2, pyzmq cannot parse monitor messages from libzmq prior to 4.0. .. versionadded: libzmq-3.2 .. versionadded: 14.0 Parameters ---------- addr : str The inproc url used for monitoring. Passing None as the addr will cause an existing socket monitor to be deregistered. events : int [default: zmq.EVENT_ALL] The zmq event bitmask for which events will be sent to the monitor. """ cdef int rc, c_flags cdef char* c_addr = NULL _check_version((3,2), "monitor") if addr is not None: if isinstance(addr, unicode): addr = addr.encode('utf-8') if not isinstance(addr, bytes): raise TypeError('expected str, got: %r' % addr) c_addr = addr c_flags = events rc = zmq_socket_monitor(self.handle, c_addr, c_flags) _check_rc(rc) #------------------------------------------------------------------------- # Sending and receiving messages #------------------------------------------------------------------------- cpdef object send(self, object data, int flags=0, copy=True, track=False): """s.send(data, flags=0, copy=True, track=False) Send a message on this socket. This queues the message to be sent by the IO thread at a later time. Parameters ---------- data : object, str, Frame The content of the message. flags : int Any supported flag: NOBLOCK, SNDMORE. copy : bool Should the message be sent in a copying or non-copying manner. track : bool Should the message be tracked for notification that ZMQ has finished with it? (ignored if copy=True) Returns ------- None : if `copy` or not track None if message was sent, raises an exception otherwise. MessageTracker : if track and not copy a MessageTracker object, whose `pending` property will be True until the send is completed. Raises ------ TypeError If a unicode object is passed ValueError If `track=True`, but an untracked Frame is passed. ZMQError If the send does not succeed for any reason. """ _check_closed(self) if isinstance(data, unicode): raise TypeError("unicode not allowed, use send_string") if copy: # msg.bytes never returns the input data object # it is always a copy, but always the same copy if isinstance(data, Frame): data = data.buffer return _send_copy(self.handle, data, flags) else: if isinstance(data, Frame): if track and not data.tracker: raise ValueError('Not a tracked message') msg = data else: msg = Frame(data, track=track) return _send_frame(self.handle, msg, flags) cpdef object recv(self, int flags=0, copy=True, track=False): """s.recv(flags=0, copy=True, track=False) Receive a message. Parameters ---------- flags : int Any supported flag: NOBLOCK. If NOBLOCK is set, this method will raise a ZMQError with EAGAIN if a message is not ready. If NOBLOCK is not set, then this method will block until a message arrives. copy : bool Should the message be received in a copying or non-copying manner? If False a Frame object is returned, if True a string copy of message is returned. track : bool Should the message be tracked for notification that ZMQ has finished with it? (ignored if copy=True) Returns ------- msg : bytes, Frame The received message frame. If `copy` is False, then it will be a Frame, otherwise it will be bytes. Raises ------ ZMQError for any of the reasons zmq_msg_recv might fail. """ _check_closed(self) if copy: return _recv_copy(self.handle, flags) else: frame = _recv_frame(self.handle, flags, track) frame.more = self.getsockopt(zmq.RCVMORE) return frame __all__ = ['Socket', 'IPC_PATH_MAX_LEN'] pyzmq-16.0.2/zmq/backend/cython/utils.pyx000066400000000000000000000036441301503633700203740ustar00rootroot00000000000000"""0MQ utils.""" # # Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # from .libzmq cimport ( zmq_curve_keypair, zmq_has, const_char_ptr, ) from zmq.error import ZMQError, _check_rc, _check_version from zmq.utils.strtypes import unicode def has(capability): """Check for zmq capability by name (e.g. 'ipc', 'curve') .. versionadded:: libzmq-4.1 .. versionadded:: 14.1 """ _check_version((4,1), 'zmq.has') cdef bytes ccap if isinstance(capability, unicode): capability = capability.encode('utf8') ccap = capability return bool(zmq_has(ccap)) def curve_keypair(): """generate a Z85 keypair for use with zmq.CURVE security Requires libzmq (≥ 4.0) to have been built with CURVE support. .. versionadded:: libzmq-4.0 .. versionadded:: 14.0 Returns ------- (public, secret) : two bytestrings The public and private keypair as 40 byte z85-encoded bytestrings. """ cdef int rc cdef char[64] public_key cdef char[64] secret_key _check_version((4,0), "curve_keypair") rc = zmq_curve_keypair (public_key, secret_key) _check_rc(rc) return public_key, secret_key __all__ = ['has', 'curve_keypair'] pyzmq-16.0.2/zmq/backend/select.py000066400000000000000000000015561301503633700170170ustar00rootroot00000000000000"""Import basic exposure of libzmq C API as a backend""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. public_api = [ 'Context', 'Socket', 'Frame', 'Message', 'device', 'proxy', 'zmq_poll', 'strerror', 'zmq_errno', 'has', 'curve_keypair', 'constants', 'zmq_version_info', 'IPC_PATH_MAX_LEN', ] def select_backend(name): """Select the pyzmq backend""" try: mod = __import__(name, fromlist=public_api) except ImportError: raise except Exception as e: import sys from zmq.utils.sixcerpt import reraise exc_info = sys.exc_info() reraise(ImportError, ImportError("Importing %s failed with %s" % (name, e)), exc_info[2]) ns = {} for key in public_api: ns[key] = getattr(mod, key) return ns pyzmq-16.0.2/zmq/decorators.py000066400000000000000000000121721301503633700163120ustar00rootroot00000000000000"""Decorators for running functions with context/sockets. .. versionadded:: 15.3 Like using Contexts and Sockets as context managers, but with decorator syntax. Context and sockets are closed at the end of the function. For example:: from zmq.decorators import context, socket @context() @socket(zmq.PUSH) def work(ctx, push): ... """ # Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. __all__ = ( 'context', 'socket', ) from functools import wraps import zmq from zmq.utils.strtypes import basestring class _Decorator(object): '''The mini decorator factory''' def __init__(self, target=None): self._target = target def __call__(self, *dec_args, **dec_kwargs): ''' The main logic of decorator Here is how those arguments works:: @out_decorator(*dec_args, *dec_kwargs) def func(*wrap_args, **wrap_kwargs): ... And in the ``wrapper``, we simply create ``self.target`` instance via ``with``:: target = self.get_target(*args, **kwargs) with target(*dec_args, **dec_kwargs) as obj: ... ''' kw_name, dec_args, dec_kwargs = self.process_decorator_args(*dec_args, **dec_kwargs) def decorator(func): @wraps(func) def wrapper(*args, **kwargs): target = self.get_target(*args, **kwargs) with target(*dec_args, **dec_kwargs) as obj: # insert our object into args if kw_name and kw_name not in kwargs: kwargs[kw_name] = obj elif kw_name and kw_name in kwargs: raise TypeError( "{0}() got multiple values for" " argument '{1}'".format( func.__name__, kw_name)) else: args = args + (obj,) return func(*args, **kwargs) return wrapper return decorator def get_target(self, *args, **kwargs): """Return the target function Allows modifying args/kwargs to be passed. """ return self._target def process_decorator_args(self, *args, **kwargs): """Process args passed to the decorator. args not consumed by the decorator will be passed to the target factory (Context/Socket constructor). """ kw_name = None if isinstance(kwargs.get('name'), basestring): kw_name = kwargs.pop('name') elif len(args) >= 1 and isinstance(args[0], basestring): kw_name = args[0] args = args[1:] return kw_name, args, kwargs class _ContextDecorator(_Decorator): """Decorator subclass for Contexts""" def __init__(self): super(_ContextDecorator, self).__init__(zmq.Context) class _SocketDecorator(_Decorator): """Decorator subclass for sockets Gets the context from other args. """ def process_decorator_args(self, *args, **kwargs): """Also grab context_name out of kwargs""" kw_name, args, kwargs = super(_SocketDecorator, self).process_decorator_args(*args, **kwargs) self.context_name = kwargs.pop('context_name', 'context') return kw_name, args, kwargs def get_target(self, *args, **kwargs): """Get context, based on call-time args""" context = self._get_context(*args, **kwargs) return context.socket def _get_context(self, *args, **kwargs): ''' Find the ``zmq.Context`` from ``args`` and ``kwargs`` at call time. First, if there is an keyword argument named ``context`` and it is a ``zmq.Context`` instance , we will take it. Second, we check all the ``args``, take the first ``zmq.Context`` instance. Finally, we will provide default Context -- ``zmq.Context.instance`` :return: a ``zmq.Context`` instance ''' if self.context_name in kwargs: ctx = kwargs[self.context_name] if isinstance(ctx, zmq.Context): return ctx for arg in args: if isinstance(arg, zmq.Context): return arg # not specified by any decorator return zmq.Context.instance() def context(*args, **kwargs): '''Decorator for adding a Context to a function. Usage:: @context() def foo(ctx): ... .. versionadded:: 15.3 :param str name: the keyword argument passed to decorated function ''' return _ContextDecorator()(*args, **kwargs) def socket(*args, **kwargs): '''Decorator for adding a socket to a function. Usage:: @socket(zmq.PUSH) def foo(push): ... .. versionadded:: 15.3 :param str name: the keyword argument passed to decorated function :param str context_name: the keyword only argument to identify context object ''' return _SocketDecorator()(*args, **kwargs) pyzmq-16.0.2/zmq/devices/000077500000000000000000000000001301503633700152125ustar00rootroot00000000000000pyzmq-16.0.2/zmq/devices/__init__.py000066400000000000000000000011001301503633700173130ustar00rootroot00000000000000"""0MQ Device classes for running in background threads or processes.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from zmq import device from zmq.devices import basedevice, proxydevice, monitoredqueue, monitoredqueuedevice from zmq.devices.basedevice import * from zmq.devices.proxydevice import * from zmq.devices.monitoredqueue import * from zmq.devices.monitoredqueuedevice import * __all__ = ['device'] for submod in (basedevice, proxydevice, monitoredqueue, monitoredqueuedevice): __all__.extend(submod.__all__) pyzmq-16.0.2/zmq/devices/basedevice.py000066400000000000000000000156741301503633700176730ustar00rootroot00000000000000"""Classes for running 0MQ Devices in the background.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import time from threading import Thread from multiprocessing import Process from zmq import device, QUEUE, Context, ETERM, ZMQError class Device: """A 0MQ Device to be run in the background. You do not pass Socket instances to this, but rather Socket types:: Device(device_type, in_socket_type, out_socket_type) For instance:: dev = Device(zmq.QUEUE, zmq.DEALER, zmq.ROUTER) Similar to zmq.device, but socket types instead of sockets themselves are passed, and the sockets are created in the work thread, to avoid issues with thread safety. As a result, additional bind_{in|out} and connect_{in|out} methods and setsockopt_{in|out} allow users to specify connections for the sockets. Parameters ---------- device_type : int The 0MQ Device type {in|out}_type : int zmq socket types, to be passed later to context.socket(). e.g. zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used for both in_socket and out_socket. Methods ------- bind_{in_out}(iface) passthrough for ``{in|out}_socket.bind(iface)``, to be called in the thread connect_{in_out}(iface) passthrough for ``{in|out}_socket.connect(iface)``, to be called in the thread setsockopt_{in_out}(opt,value) passthrough for ``{in|out}_socket.setsockopt(opt, value)``, to be called in the thread Attributes ---------- daemon : int sets whether the thread should be run as a daemon Default is true, because if it is false, the thread will not exit unless it is killed context_factory : callable (class attribute) Function for creating the Context. This will be Context.instance in ThreadDevices, and Context in ProcessDevices. The only reason it is not instance() in ProcessDevices is that there may be a stale Context instance already initialized, and the forked environment should *never* try to use it. """ context_factory = Context.instance """Callable that returns a context. Typically either Context.instance or Context, depending on whether the device should share the global instance or not. """ def __init__(self, device_type=QUEUE, in_type=None, out_type=None): self.device_type = device_type if in_type is None: raise TypeError("in_type must be specified") if out_type is None: raise TypeError("out_type must be specified") self.in_type = in_type self.out_type = out_type self._in_binds = [] self._in_connects = [] self._in_sockopts = [] self._out_binds = [] self._out_connects = [] self._out_sockopts = [] self.daemon = True self.done = False def bind_in(self, addr): """Enqueue ZMQ address for binding on in_socket. See zmq.Socket.bind for details. """ self._in_binds.append(addr) def connect_in(self, addr): """Enqueue ZMQ address for connecting on in_socket. See zmq.Socket.connect for details. """ self._in_connects.append(addr) def setsockopt_in(self, opt, value): """Enqueue setsockopt(opt, value) for in_socket See zmq.Socket.setsockopt for details. """ self._in_sockopts.append((opt, value)) def bind_out(self, addr): """Enqueue ZMQ address for binding on out_socket. See zmq.Socket.bind for details. """ self._out_binds.append(addr) def connect_out(self, addr): """Enqueue ZMQ address for connecting on out_socket. See zmq.Socket.connect for details. """ self._out_connects.append(addr) def setsockopt_out(self, opt, value): """Enqueue setsockopt(opt, value) for out_socket See zmq.Socket.setsockopt for details. """ self._out_sockopts.append((opt, value)) def _setup_sockets(self): ctx = self.context_factory() self._context = ctx # create the sockets ins = ctx.socket(self.in_type) if self.out_type < 0: outs = ins else: outs = ctx.socket(self.out_type) # set sockopts (must be done first, in case of zmq.IDENTITY) for opt,value in self._in_sockopts: ins.setsockopt(opt, value) for opt,value in self._out_sockopts: outs.setsockopt(opt, value) for iface in self._in_binds: ins.bind(iface) for iface in self._out_binds: outs.bind(iface) for iface in self._in_connects: ins.connect(iface) for iface in self._out_connects: outs.connect(iface) return ins,outs def run_device(self): """The runner method. Do not call me directly, instead call ``self.start()``, just like a Thread. """ ins,outs = self._setup_sockets() device(self.device_type, ins, outs) def run(self): """wrap run_device in try/catch ETERM""" try: self.run_device() except ZMQError as e: if e.errno == ETERM: # silence TERM errors, because this should be a clean shutdown pass else: raise finally: self.done = True def start(self): """Start the device. Override me in subclass for other launchers.""" return self.run() def join(self,timeout=None): """wait for me to finish, like Thread.join. Reimplemented appropriately by subclasses.""" tic = time.time() toc = tic while not self.done and not (timeout is not None and toc-tic > timeout): time.sleep(.001) toc = time.time() class BackgroundDevice(Device): """Base class for launching Devices in background processes and threads.""" launcher=None _launch_class=None def start(self): self.launcher = self._launch_class(target=self.run) self.launcher.daemon = self.daemon return self.launcher.start() def join(self, timeout=None): return self.launcher.join(timeout=timeout) class ThreadDevice(BackgroundDevice): """A Device that will be run in a background Thread. See Device for details. """ _launch_class=Thread class ProcessDevice(BackgroundDevice): """A Device that will be run in a background Process. See Device for details. """ _launch_class=Process context_factory = Context """Callable that returns a context. Typically either Context.instance or Context, depending on whether the device should share the global instance or not. """ __all__ = ['Device', 'ThreadDevice', 'ProcessDevice'] pyzmq-16.0.2/zmq/devices/monitoredqueue.pxd000066400000000000000000000143611301503633700210010ustar00rootroot00000000000000"""MonitoredQueue class declarations. Authors ------- * MinRK * Brian Granger """ # # Copyright (c) 2010 Min Ragan-Kelley, Brian Granger # # This file is part of pyzmq, but is derived and adapted from zmq_queue.cpp # originally from libzmq-2.1.6, used under LGPLv3 # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see . # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from zmq.backend.cython.libzmq cimport * #----------------------------------------------------------------------------- # MonitoredQueue C functions #----------------------------------------------------------------------------- cdef inline int _relay(void *insocket_, void *outsocket_, void *sidesocket_, zmq_msg_t msg, zmq_msg_t side_msg, zmq_msg_t id_msg, bint swap_ids) nogil: cdef int rc cdef int64_t flag_2 cdef int flag_3 cdef int flags cdef bint more cdef size_t flagsz cdef void * flag_ptr if ZMQ_VERSION_MAJOR < 3: flagsz = sizeof (int64_t) flag_ptr = &flag_2 else: flagsz = sizeof (int) flag_ptr = &flag_3 if swap_ids:# both router, must send second identity first # recv two ids into msg, id_msg rc = zmq_msg_recv(&msg, insocket_, 0) if rc < 0: return rc rc = zmq_msg_recv(&id_msg, insocket_, 0) if rc < 0: return rc # send second id (id_msg) first #!!!! always send a copy before the original !!!! rc = zmq_msg_copy(&side_msg, &id_msg) if rc < 0: return rc rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE) if rc < 0: return rc rc = zmq_msg_send(&id_msg, sidesocket_, ZMQ_SNDMORE) if rc < 0: return rc # send first id (msg) second rc = zmq_msg_copy(&side_msg, &msg) if rc < 0: return rc rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE) if rc < 0: return rc rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE) if rc < 0: return rc while (True): rc = zmq_msg_recv(&msg, insocket_, 0) if rc < 0: return rc # assert (rc == 0) rc = zmq_getsockopt (insocket_, ZMQ_RCVMORE, flag_ptr, &flagsz) if rc < 0: return rc flags = 0 if ZMQ_VERSION_MAJOR < 3: if flag_2: flags |= ZMQ_SNDMORE else: if flag_3: flags |= ZMQ_SNDMORE # LABEL has been removed: # rc = zmq_getsockopt (insocket_, ZMQ_RCVLABEL, flag_ptr, &flagsz) # if flag_3: # flags |= ZMQ_SNDLABEL # assert (rc == 0) rc = zmq_msg_copy(&side_msg, &msg) if rc < 0: return rc if flags: rc = zmq_msg_send(&side_msg, outsocket_, flags) if rc < 0: return rc # only SNDMORE for side-socket rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE) if rc < 0: return rc else: rc = zmq_msg_send(&side_msg, outsocket_, 0) if rc < 0: return rc rc = zmq_msg_send(&msg, sidesocket_, 0) if rc < 0: return rc break return rc # the MonitoredQueue C function, adapted from zmq::queue.cpp : cdef inline int c_monitored_queue (void *insocket_, void *outsocket_, void *sidesocket_, zmq_msg_t *in_msg_ptr, zmq_msg_t *out_msg_ptr, int swap_ids) nogil: """The actual C function for a monitored queue device. See ``monitored_queue()`` for details. """ cdef zmq_msg_t msg cdef int rc = zmq_msg_init (&msg) cdef zmq_msg_t id_msg rc = zmq_msg_init (&id_msg) if rc < 0: return rc cdef zmq_msg_t side_msg rc = zmq_msg_init (&side_msg) if rc < 0: return rc cdef zmq_pollitem_t items [2] items [0].socket = insocket_ items [0].fd = 0 items [0].events = ZMQ_POLLIN items [0].revents = 0 items [1].socket = outsocket_ items [1].fd = 0 items [1].events = ZMQ_POLLIN items [1].revents = 0 # I don't think sidesocket should be polled? # items [2].socket = sidesocket_ # items [2].fd = 0 # items [2].events = ZMQ_POLLIN # items [2].revents = 0 while (True): # // Wait while there are either requests or replies to process. rc = zmq_poll (&items [0], 2, -1) if rc < 0: return rc # // The algorithm below asumes ratio of request and replies processed # // under full load to be 1:1. Although processing requests replies # // first is tempting it is suspectible to DoS attacks (overloading # // the system with unsolicited replies). # # // Process a request. if (items [0].revents & ZMQ_POLLIN): # send in_prefix to side socket rc = zmq_msg_copy(&side_msg, in_msg_ptr) if rc < 0: return rc rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE) if rc < 0: return rc # relay the rest of the message rc = _relay(insocket_, outsocket_, sidesocket_, msg, side_msg, id_msg, swap_ids) if rc < 0: return rc if (items [1].revents & ZMQ_POLLIN): # send out_prefix to side socket rc = zmq_msg_copy(&side_msg, out_msg_ptr) if rc < 0: return rc rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE) if rc < 0: return rc # relay the rest of the message rc = _relay(outsocket_, insocket_, sidesocket_, msg, side_msg, id_msg, swap_ids) if rc < 0: return rc return rc pyzmq-16.0.2/zmq/devices/monitoredqueue.py000066400000000000000000000020211301503633700206240ustar00rootroot00000000000000"""pure Python monitored_queue function For use when Cython extension is unavailable (PyPy). Authors ------- * MinRK """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import zmq def _relay(ins, outs, sides, prefix, swap_ids): msg = ins.recv_multipart() if swap_ids: msg[:2] = msg[:2][::-1] outs.send_multipart(msg) sides.send_multipart([prefix] + msg) def monitored_queue(in_socket, out_socket, mon_socket, in_prefix=b'in', out_prefix=b'out'): swap_ids = in_socket.type == zmq.ROUTER and out_socket.type == zmq.ROUTER poller = zmq.Poller() poller.register(in_socket, zmq.POLLIN) poller.register(out_socket, zmq.POLLIN) while True: events = dict(poller.poll()) if in_socket in events: _relay(in_socket, out_socket, mon_socket, in_prefix, swap_ids) if out_socket in events: _relay(out_socket, in_socket, mon_socket, out_prefix, swap_ids) __all__ = ['monitored_queue'] pyzmq-16.0.2/zmq/devices/monitoredqueue.pyx000066400000000000000000000070011301503633700210170ustar00rootroot00000000000000"""MonitoredQueue classes and functions. Authors ------- * MinRK * Brian Granger """ #----------------------------------------------------------------------------- # Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley # # This file is part of pyzmq # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- cdef extern from "Python.h": ctypedef int Py_ssize_t from libc.string cimport memcpy from zmq.utils.buffers cimport asbuffer_r from zmq.backend.cython.libzmq cimport * from zmq.backend.cython.socket cimport Socket from zmq.backend.cython.checkrc cimport _check_rc from zmq import ROUTER, ZMQError from zmq.error import InterruptedSystemCall #----------------------------------------------------------------------------- # MonitoredQueue functions #----------------------------------------------------------------------------- def monitored_queue(Socket in_socket, Socket out_socket, Socket mon_socket, bytes in_prefix=b'in', bytes out_prefix=b'out'): """monitored_queue(in_socket, out_socket, mon_socket, in_prefix=b'in', out_prefix=b'out') Start a monitored queue device. A monitored queue is very similar to the zmq.proxy device (monitored queue came first). Differences from zmq.proxy: - monitored_queue supports both in and out being ROUTER sockets (via swapping IDENTITY prefixes). - monitor messages are prefixed, making in and out messages distinguishable. Parameters ---------- in_socket : Socket One of the sockets to the Queue. Its messages will be prefixed with 'in'. out_socket : Socket One of the sockets to the Queue. Its messages will be prefixed with 'out'. The only difference between in/out socket is this prefix. mon_socket : Socket This socket sends out every message received by each of the others with an in/out prefix specifying which one it was. in_prefix : str Prefix added to broadcast messages from in_socket. out_prefix : str Prefix added to broadcast messages from out_socket. """ cdef void *ins=in_socket.handle cdef void *outs=out_socket.handle cdef void *mons=mon_socket.handle cdef zmq_msg_t in_msg cdef zmq_msg_t out_msg cdef bint swap_ids cdef char *msg_c = NULL cdef Py_ssize_t msg_c_len cdef int rc # force swap_ids if both ROUTERs swap_ids = (in_socket.type == ROUTER and out_socket.type == ROUTER) # build zmq_msg objects from str prefixes asbuffer_r(in_prefix, &msg_c, &msg_c_len) rc = zmq_msg_init_size(&in_msg, msg_c_len) _check_rc(rc) memcpy(zmq_msg_data(&in_msg), msg_c, zmq_msg_size(&in_msg)) asbuffer_r(out_prefix, &msg_c, &msg_c_len) rc = zmq_msg_init_size(&out_msg, msg_c_len) _check_rc(rc) while True: with nogil: memcpy(zmq_msg_data(&out_msg), msg_c, zmq_msg_size(&out_msg)) rc = c_monitored_queue(ins, outs, mons, &in_msg, &out_msg, swap_ids) try: _check_rc(rc) except InterruptedSystemCall: continue else: break return rc __all__ = ['monitored_queue'] pyzmq-16.0.2/zmq/devices/monitoredqueuedevice.py000066400000000000000000000037121301503633700220140ustar00rootroot00000000000000"""MonitoredQueue classes and functions.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from zmq import ZMQError, PUB from zmq.devices.proxydevice import ProxyBase, Proxy, ThreadProxy, ProcessProxy from zmq.devices.monitoredqueue import monitored_queue class MonitoredQueueBase(ProxyBase): """Base class for overriding methods.""" _in_prefix = b'' _out_prefix = b'' def __init__(self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out'): ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type) self._in_prefix = in_prefix self._out_prefix = out_prefix def run_device(self): ins,outs,mons = self._setup_sockets() monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix) class MonitoredQueue(MonitoredQueueBase, Proxy): """Class for running monitored_queue in the background. See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy, only in that it adds a ``prefix`` to messages sent on the monitor socket, with a different prefix for each direction. MQ also supports ROUTER on both sides, which zmq.proxy does not. If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket. If it arrives on out_sock, it will be prefixed with `out_prefix`. A PUB socket is the most logical choice for the mon_socket, but it is not required. """ pass class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy): """Run zmq.monitored_queue in a background thread. See MonitoredQueue and Proxy for details. """ pass class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy): """Run zmq.monitored_queue in a background thread. See MonitoredQueue and Proxy for details. """ __all__ = [ 'MonitoredQueue', 'ThreadMonitoredQueue', 'ProcessMonitoredQueue' ] pyzmq-16.0.2/zmq/devices/proxydevice.py000066400000000000000000000047031301503633700201310ustar00rootroot00000000000000"""Proxy classes and functions.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import zmq from zmq.devices.basedevice import Device, ThreadDevice, ProcessDevice class ProxyBase(object): """Base class for overriding methods.""" def __init__(self, in_type, out_type, mon_type=zmq.PUB): Device.__init__(self, in_type=in_type, out_type=out_type) self.mon_type = mon_type self._mon_binds = [] self._mon_connects = [] self._mon_sockopts = [] def bind_mon(self, addr): """Enqueue ZMQ address for binding on mon_socket. See zmq.Socket.bind for details. """ self._mon_binds.append(addr) def connect_mon(self, addr): """Enqueue ZMQ address for connecting on mon_socket. See zmq.Socket.bind for details. """ self._mon_connects.append(addr) def setsockopt_mon(self, opt, value): """Enqueue setsockopt(opt, value) for mon_socket See zmq.Socket.setsockopt for details. """ self._mon_sockopts.append((opt, value)) def _setup_sockets(self): ins,outs = Device._setup_sockets(self) ctx = self._context mons = ctx.socket(self.mon_type) # set sockopts (must be done first, in case of zmq.IDENTITY) for opt,value in self._mon_sockopts: mons.setsockopt(opt, value) for iface in self._mon_binds: mons.bind(iface) for iface in self._mon_connects: mons.connect(iface) return ins,outs,mons def run_device(self): ins,outs,mons = self._setup_sockets() zmq.proxy(ins, outs, mons) class Proxy(ProxyBase, Device): """Threadsafe Proxy object. See zmq.devices.Device for most of the spec. This subclass adds a _mon version of each _{in|out} method, for configuring the monitor socket. A Proxy is a 3-socket ZMQ Device that functions just like a QUEUE, except each message is also sent out on the monitor socket. A PUB socket is the most logical choice for the mon_socket, but it is not required. """ pass class ThreadProxy(ProxyBase, ThreadDevice): """Proxy in a Thread. See Proxy for more.""" pass class ProcessProxy(ProxyBase, ProcessDevice): """Proxy in a Process. See Proxy for more.""" pass __all__ = [ 'Proxy', 'ThreadProxy', 'ProcessProxy', ] pyzmq-16.0.2/zmq/error.py000066400000000000000000000122241301503633700152740ustar00rootroot00000000000000"""0MQ Error classes and functions.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from errno import EINTR class ZMQBaseError(Exception): """Base exception class for 0MQ errors in Python.""" pass class ZMQError(ZMQBaseError): """Wrap an errno style error. Parameters ---------- errno : int The ZMQ errno or None. If None, then ``zmq_errno()`` is called and used. msg : string Description of the error or None. """ errno = None def __init__(self, errno=None, msg=None): """Wrap an errno style error. Parameters ---------- errno : int The ZMQ errno or None. If None, then ``zmq_errno()`` is called and used. msg : string Description of the error or None. """ from zmq.backend import strerror, zmq_errno if errno is None: errno = zmq_errno() if isinstance(errno, int): self.errno = errno if msg is None: self.strerror = strerror(errno) else: self.strerror = msg else: if msg is None: self.strerror = str(errno) else: self.strerror = msg # flush signals, because there could be a SIGINT # waiting to pounce, resulting in uncaught exceptions. # Doing this here means getting SIGINT during a blocking # libzmq call will raise a *catchable* KeyboardInterrupt # PyErr_CheckSignals() def __str__(self): return self.strerror def __repr__(self): return "%s('%s')" % (self.__class__.__name__, str(self)) class ZMQBindError(ZMQBaseError): """An error for ``Socket.bind_to_random_port()``. See Also -------- .Socket.bind_to_random_port """ pass class NotDone(ZMQBaseError): """Raised when timeout is reached while waiting for 0MQ to finish with a Message See Also -------- .MessageTracker.wait : object for tracking when ZeroMQ is done """ pass class ContextTerminated(ZMQError): """Wrapper for zmq.ETERM .. versionadded:: 13.0 """ def __init__(self, errno='ignored', msg='ignored'): from zmq import ETERM super(ContextTerminated, self).__init__(ETERM) class Again(ZMQError): """Wrapper for zmq.EAGAIN .. versionadded:: 13.0 """ def __init__(self, errno='ignored', msg='ignored'): from zmq import EAGAIN super(Again, self).__init__(EAGAIN) try: InterruptedError except NameError: InterruptedError = OSError class InterruptedSystemCall(ZMQError, InterruptedError): """Wrapper for EINTR This exception should be caught internally in pyzmq to retry system calls, and not propagate to the user. .. versionadded:: 14.7 """ def __init__(self, errno='ignored', msg='ignored'): super(InterruptedSystemCall, self).__init__(EINTR) def __str__(self): s = super(InterruptedSystemCall, self).__str__() return s + ": This call should have been retried. Please report this to pyzmq." def _check_rc(rc, errno=None): """internal utility for checking zmq return condition and raising the appropriate Exception class """ if rc == -1: if errno is None: from zmq.backend import zmq_errno errno = zmq_errno() from zmq import EAGAIN, ETERM if errno == EINTR: raise InterruptedSystemCall(errno) elif errno == EAGAIN: raise Again(errno) elif errno == ETERM: raise ContextTerminated(errno) else: raise ZMQError(errno) _zmq_version_info = None _zmq_version = None class ZMQVersionError(NotImplementedError): """Raised when a feature is not provided by the linked version of libzmq. .. versionadded:: 14.2 """ min_version = None def __init__(self, min_version, msg='Feature'): global _zmq_version if _zmq_version is None: from zmq import zmq_version _zmq_version = zmq_version() self.msg = msg self.min_version = min_version self.version = _zmq_version def __repr__(self): return "ZMQVersionError('%s')" % str(self) def __str__(self): return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version) def _check_version(min_version_info, msg='Feature'): """Check for libzmq raises ZMQVersionError if current zmq version is not at least min_version min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info(). """ global _zmq_version_info if _zmq_version_info is None: from zmq import zmq_version_info _zmq_version_info = zmq_version_info() if _zmq_version_info < min_version_info: min_version = '.'.join(str(v) for v in min_version_info) raise ZMQVersionError(min_version, msg) __all__ = [ 'ZMQBaseError', 'ZMQBindError', 'ZMQError', 'NotDone', 'ContextTerminated', 'InterruptedSystemCall', 'Again', 'ZMQVersionError', ] pyzmq-16.0.2/zmq/eventloop/000077500000000000000000000000001301503633700156035ustar00rootroot00000000000000pyzmq-16.0.2/zmq/eventloop/__init__.py000066400000000000000000000001521301503633700177120ustar00rootroot00000000000000"""A Tornado based event loop for PyZMQ.""" from zmq.eventloop.ioloop import IOLoop __all__ = ['IOLoop']pyzmq-16.0.2/zmq/eventloop/future.py000066400000000000000000000364461301503633700175040ustar00rootroot00000000000000"""Future-returning APIs for coroutines.""" # Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. from collections import namedtuple from itertools import chain from zmq import POLLOUT, POLLIN try: from tornado.concurrent import Future except ImportError: from .minitornado.concurrent import Future class CancelledError(Exception): pass class _TornadoFuture(Future): """Subclass Tornado Future, reinstating cancellation.""" def cancel(self): if self.done(): return False self.set_exception(CancelledError()) return True def cancelled(self): return self.done() and isinstance(self.exception(), CancelledError) import zmq as _zmq from zmq.eventloop.ioloop import IOLoop _FutureEvent = namedtuple('_FutureEvent', ('future', 'kind', 'kwargs', 'msg')) # mixins for tornado/asyncio compatibility class _AsyncTornado(object): _Future = _TornadoFuture _READ = IOLoop.READ _WRITE = IOLoop.WRITE def _default_loop(self): return IOLoop.current() class _AsyncPoller(_zmq.Poller): """Poller that returns a Future on poll, instead of blocking.""" def poll(self, timeout=-1): """Return a Future for a poll event""" future = self._Future() if timeout == 0: try: result = super(_AsyncPoller, self).poll(0) except Exception as e: future.set_exception(e) else: future.set_result(result) return future loop = self._default_loop() # register Future to be called as soon as any event is available on any socket watcher = self._Future() # watch raw sockets: raw_sockets = [] def wake_raw(*args): if not watcher.done(): watcher.set_result(None) watcher.add_done_callback(lambda f: self._unwatch_raw_sockets(loop, *raw_sockets)) for socket, mask in self.sockets: if isinstance(socket, _AsyncSocket): if mask & _zmq.POLLIN: socket._add_recv_event('poll', future=watcher) if mask & _zmq.POLLOUT: socket._add_send_event('poll', future=watcher) else: raw_sockets.append(socket) evt = 0 if mask & _zmq.POLLIN: evt |= self._READ if mask & _zmq.POLLOUT: evt |= self._WRITE self._watch_raw_socket(loop, socket, evt, wake_raw) def on_poll_ready(f): if future.done(): return if watcher.exception(): future.set_exception(watcher.exception()) else: try: result = super(_AsyncPoller, self).poll(0) except Exception as e: future.set_exception(e) else: future.set_result(result) watcher.add_done_callback(on_poll_ready) if timeout is not None and timeout > 0: # schedule cancel to fire on poll timeout, if any def trigger_timeout(): if not watcher.done(): watcher.set_result(None) timeout_handle = loop.call_later( 1e-3 * timeout, trigger_timeout ) def cancel_timeout(f): if hasattr(timeout_handle, 'cancel'): timeout_handle.cancel() else: loop.remove_timeout(timeout_handle) future.add_done_callback(cancel_timeout) def cancel_watcher(f): if not watcher.done(): watcher.cancel() future.add_done_callback(cancel_watcher) return future class Poller(_AsyncTornado, _AsyncPoller): def _watch_raw_socket(self, loop, socket, evt, f): """Schedule callback for a raw socket""" loop.add_handler(socket, lambda *args: f(), evt) def _unwatch_raw_sockets(self, loop, *sockets): """Unschedule callback for a raw socket""" for socket in sockets: loop.remove_handler(socket) class _AsyncSocket(_zmq.Socket): _recv_futures = None _send_futures = None _state = 0 _shadow_sock = None _poller_class = Poller io_loop = None def __init__(self, context, socket_type, io_loop=None): super(_AsyncSocket, self).__init__(context, socket_type) self.io_loop = io_loop or self._default_loop() self._recv_futures = [] self._send_futures = [] self._state = 0 self._shadow_sock = _zmq.Socket.shadow(self.underlying) self._init_io_state() def close(self, linger=None): if not self.closed: for event in chain(self._recv_futures, self._send_futures): if not event.future.done(): event.future.cancel() self._clear_io_state() super(_AsyncSocket, self).close(linger=linger) close.__doc__ = _zmq.Socket.close.__doc__ def recv_multipart(self, flags=0, copy=True, track=False): """Receive a complete multipart zmq message. Returns a Future whose result will be a multipart message. """ return self._add_recv_event('recv_multipart', dict(flags=flags, copy=copy, track=track) ) def recv(self, flags=0, copy=True, track=False): """Receive a single zmq frame. Returns a Future, whose result will be the received frame. Recommend using recv_multipart instead. """ return self._add_recv_event('recv', dict(flags=flags, copy=copy, track=track) ) def send_multipart(self, msg, flags=0, copy=True, track=False): """Send a complete multipart zmq message. Returns a Future that resolves when sending is complete. """ return self._add_send_event('send_multipart', msg=msg, kwargs=dict(flags=flags, copy=copy, track=track), ) def send(self, msg, flags=0, copy=True, track=False): """Send a single zmq frame. Returns a Future that resolves when sending is complete. Recommend using send_multipart instead. """ return self._add_send_event('send', msg=msg, kwargs=dict(flags=flags, copy=copy, track=track), ) def _deserialize(self, recvd, load): """Deserialize with Futures""" f = self._Future() def _chain(_): """Chain result through serialization to recvd""" if f.done(): return if recvd.exception(): f.set_exception(recvd.exception()) else: buf = recvd.result() try: loaded = load(buf) except Exception as e: f.set_exception(e) else: f.set_result(loaded) recvd.add_done_callback(_chain) def _chain_cancel(_): """Chain cancellation from f to recvd""" if recvd.done(): return if f.cancelled(): recvd.cancel() f.add_done_callback(_chain_cancel) return f def poll(self, timeout=None, flags=_zmq.POLLIN): """poll the socket for events returns a Future for the poll results. """ if self.closed: raise _zmq.ZMQError(_zmq.ENOTSUP) p = self._poller_class() p.register(self, flags) f = p.poll(timeout) future = self._Future() def unwrap_result(f): if future.done(): return if f.exception(): future.set_exception(f.exception()) else: evts = dict(f.result()) future.set_result(evts.get(self, 0)) f.add_done_callback(unwrap_result) return future def _add_timeout(self, future, timeout): """Add a timeout for a send or recv Future""" def future_timeout(): if future.done(): # future already resolved, do nothing return # pop the entry from _recv_futures for f_idx, (f, kind, kwargs, _) in enumerate(self._recv_futures): if f == future: self._recv_futures.pop(f_idx) break # pop the entry from _send_futures for f_idx, (f, kind, kwargs, _) in enumerate(self._send_futures): if f == future: self._send_futures.pop(f_idx) break # raise EAGAIN future.set_exception(_zmq.Again()) self._call_later(timeout, future_timeout) def _call_later(self, delay, callback): """Schedule a function to be called later Override for different IOLoop implementations Tornado and asyncio happen to both have ioloop.call_later with the same signature. """ self.io_loop.call_later(delay, callback) def _add_recv_event(self, kind, kwargs=None, future=None): """Add a recv event, returning the corresponding Future""" f = future or self._Future() if kind.startswith('recv') and kwargs.get('flags', 0) & _zmq.DONTWAIT: # short-circuit non-blocking calls recv = getattr(self._shadow_sock, kind) try: r = recv(**kwargs) except Exception as e: f.set_exception(e) else: f.set_result(r) return f # we add it to the list of futures before we add the timeout as the # timeout will remove the future from recv_futures to avoid leaks self._recv_futures.append( _FutureEvent(f, kind, kwargs, msg=None) ) if hasattr(_zmq, 'RCVTIMEO'): timeout_ms = self._shadow_sock.rcvtimeo if timeout_ms >= 0: self._add_timeout(f, timeout_ms * 1e-3) if self.events & POLLIN: # recv immediately, if we can self._handle_recv() if self._recv_futures: self._add_io_state(self._READ) return f def _add_send_event(self, kind, msg=None, kwargs=None, future=None): """Add a send event, returning the corresponding Future""" f = future or self._Future() if kind.startswith('send') and kwargs.get('flags', 0) & _zmq.DONTWAIT: # short-circuit non-blocking calls send = getattr(self._shadow_sock, kind) try: r = send(msg, **kwargs) except Exception as e: f.set_exception(e) else: f.set_result(r) return f # we add it to the list of futures before we add the timeout as the # timeout will remove the future from recv_futures to avoid leaks self._send_futures.append( _FutureEvent(f, kind, kwargs=kwargs, msg=msg) ) if hasattr(_zmq, 'SNDTIMEO'): timeout_ms = self._shadow_sock.sndtimeo if timeout_ms >= 0: self._add_timeout(f, timeout_ms * 1e-3) if self.events & POLLOUT: # send immediately if we can self._handle_send() if self._send_futures: self._add_io_state(self._WRITE) return f def _handle_recv(self): """Handle recv events""" if not self._shadow_sock.events & POLLIN: # event triggered, but state may have been changed between trigger and callback return f = None while self._recv_futures: f, kind, kwargs, _ = self._recv_futures.pop(0) # skip any cancelled futures if f.done(): f = None else: break if not self._recv_futures: self._drop_io_state(self._READ) if f is None: return if kind == 'poll': # on poll event, just signal ready, nothing else. f.set_result(None) return elif kind == 'recv_multipart': recv = self._shadow_sock.recv_multipart elif kind == 'recv': recv = self._shadow_sock.recv else: raise ValueError("Unhandled recv event type: %r" % kind) kwargs['flags'] |= _zmq.DONTWAIT try: result = recv(**kwargs) except Exception as e: f.set_exception(e) else: f.set_result(result) def _handle_send(self): if not self._shadow_sock.events & POLLOUT: # event triggered, but state may have been changed between trigger and callback return f = None while self._send_futures: f, kind, kwargs, msg = self._send_futures.pop(0) # skip any cancelled futures if f.done(): f = None else: break if not self._send_futures: self._drop_io_state(self._WRITE) if f is None: return if kind == 'poll': # on poll event, just signal ready, nothing else. f.set_result(None) return elif kind == 'send_multipart': send = self._shadow_sock.send_multipart elif kind == 'send': send = self._shadow_sock.send else: raise ValueError("Unhandled send event type: %r" % kind) kwargs['flags'] |= _zmq.DONTWAIT try: result = send(msg, **kwargs) except Exception as e: f.set_exception(e) else: f.set_result(result) # event masking from ZMQStream def _handle_events(self, fd, events): """Dispatch IO events to _handle_recv, etc.""" if events & self._READ: self._handle_recv() if events & self._WRITE: self._handle_send() def _add_io_state(self, state): """Add io_state to poller.""" if not self._state & state: self._state = self._state | state self._update_handler(self._state) def _drop_io_state(self, state): """Stop poller from watching an io_state.""" if self._state & state: self._state = self._state & (~state) self._update_handler(self._state) def _update_handler(self, state): """Update IOLoop handler with state.""" self._state = state self.io_loop.update_handler(self, state) def _init_io_state(self): """initialize the ioloop event handler""" self.io_loop.add_handler(self, self._handle_events, self._state) def _clear_io_state(self): """unregister the ioloop event handler called once during close """ self.io_loop.remove_handler(self) class Socket(_AsyncTornado, _AsyncSocket): pass class Context(_zmq.Context): io_loop = None @staticmethod def _socket_class(self, socket_type): return Socket(self, socket_type, io_loop=self.io_loop) def __init__(self, *args, **kwargs): io_loop = kwargs.pop('io_loop', None) super(Context, self).__init__(*args, **kwargs) self.io_loop = io_loop or IOLoop.current() pyzmq-16.0.2/zmq/eventloop/ioloop.py000066400000000000000000000155721301503633700174700ustar00rootroot00000000000000# coding: utf-8 """tornado IOLoop API with zmq compatibility If you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop, otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado. The minimal shipped version of tornado's IOLoop does not include support for concurrent futures - this will only be available if you have tornado ≥ 3.0. """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from __future__ import absolute_import, division, with_statement import os import time import warnings from zmq import ( Poller, POLLIN, POLLOUT, POLLERR, ZMQError, ETERM, ) try: import tornado tornado_version = tornado.version_info except (ImportError, AttributeError): tornado_version = () try: # tornado ≥ 3 from tornado.ioloop import PollIOLoop, PeriodicCallback from tornado.log import gen_log except ImportError: from .minitornado.ioloop import PollIOLoop, PeriodicCallback from .minitornado.log import gen_log class DelayedCallback(PeriodicCallback): """Schedules the given callback to be called once. The callback is called once, after callback_time milliseconds. `start` must be called after the DelayedCallback is created. The timeout is calculated from when `start` is called. """ def __init__(self, callback, callback_time, io_loop=None): # PeriodicCallback require callback_time to be positive warnings.warn("""DelayedCallback is deprecated. Use loop.add_timeout instead.""", DeprecationWarning) callback_time = max(callback_time, 1e-3) super(DelayedCallback, self).__init__(callback, callback_time, io_loop) def start(self): """Starts the timer.""" self._running = True self._firstrun = True self._next_timeout = time.time() + self.callback_time / 1000.0 self.io_loop.add_timeout(self._next_timeout, self._run) def _run(self): if not self._running: return self._running = False try: self.callback() except Exception: gen_log.error("Error in delayed callback", exc_info=True) class ZMQPoller(object): """A poller that can be used in the tornado IOLoop. This simply wraps a regular zmq.Poller, scaling the timeout by 1000, so that it is in seconds rather than milliseconds. """ def __init__(self): self._poller = Poller() @staticmethod def _map_events(events): """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR""" z_events = 0 if events & IOLoop.READ: z_events |= POLLIN if events & IOLoop.WRITE: z_events |= POLLOUT if events & IOLoop.ERROR: z_events |= POLLERR return z_events @staticmethod def _remap_events(z_events): """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR""" events = 0 if z_events & POLLIN: events |= IOLoop.READ if z_events & POLLOUT: events |= IOLoop.WRITE if z_events & POLLERR: events |= IOLoop.ERROR return events def register(self, fd, events): return self._poller.register(fd, self._map_events(events)) def modify(self, fd, events): return self._poller.modify(fd, self._map_events(events)) def unregister(self, fd): return self._poller.unregister(fd) def poll(self, timeout): """poll in seconds rather than milliseconds. Event masks will be IOLoop.READ/WRITE/ERROR """ z_events = self._poller.poll(1000*timeout) return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ] def close(self): pass class ZMQIOLoop(PollIOLoop): """ZMQ subclass of tornado's IOLoop Minor modifications, so that .current/.instance return self """ _zmq_impl = ZMQPoller def initialize(self, impl=None, **kwargs): impl = self._zmq_impl() if impl is None else impl super(ZMQIOLoop, self).initialize(impl=impl, **kwargs) @classmethod def instance(cls, *args, **kwargs): """Returns a global `IOLoop` instance. Most applications have a single, global `IOLoop` running on the main thread. Use this method to get this instance from another thread. To get the current thread's `IOLoop`, use `current()`. """ # install ZMQIOLoop as the active IOLoop implementation # when using tornado 3 if tornado_version >= (3,): PollIOLoop.configure(cls) loop = PollIOLoop.instance(*args, **kwargs) if not isinstance(loop, cls): warnings.warn("IOLoop.current expected instance of %r, got %r" % (cls, loop), RuntimeWarning, stacklevel=2, ) return loop @classmethod def current(cls, *args, **kwargs): """Returns the current thread’s IOLoop. """ # install ZMQIOLoop as the active IOLoop implementation # when using tornado 3 if tornado_version >= (3,): PollIOLoop.configure(cls) loop = PollIOLoop.current(*args, **kwargs) if not isinstance(loop, cls): warnings.warn("IOLoop.current expected instance of %r, got %r" % (cls, loop), RuntimeWarning, stacklevel=2, ) return loop def start(self): try: super(ZMQIOLoop, self).start() except ZMQError as e: if e.errno == ETERM: # quietly return on ETERM pass else: raise if (3, 0) <= tornado_version < (3, 1): def backport_close(self, all_fds=False): """backport IOLoop.close to 3.0 from 3.1 (supports fd.close() method)""" from zmq.eventloop.minitornado.ioloop import PollIOLoop as mini_loop return mini_loop.close.__get__(self)(all_fds) ZMQIOLoop.close = backport_close # public API name IOLoop = ZMQIOLoop def install(): """set the tornado IOLoop instance with the pyzmq IOLoop. After calling this function, tornado's IOLoop.instance() and pyzmq's IOLoop.instance() will return the same object. An assertion error will be raised if tornado's IOLoop has been initialized prior to calling this function. """ from tornado import ioloop # check if tornado's IOLoop is already initialized to something other # than the pyzmq IOLoop instance: assert (not ioloop.IOLoop.initialized()) or \ ioloop.IOLoop.instance() is IOLoop.instance(), "tornado IOLoop already initialized" if tornado_version >= (3,): # tornado 3 has an official API for registering new defaults, yay! ioloop.IOLoop.configure(ZMQIOLoop) else: # we have to set the global instance explicitly ioloop.IOLoop._instance = IOLoop.instance() pyzmq-16.0.2/zmq/eventloop/minitornado/000077500000000000000000000000001301503633700201265ustar00rootroot00000000000000pyzmq-16.0.2/zmq/eventloop/minitornado/__init__.py000066400000000000000000000000001301503633700222250ustar00rootroot00000000000000pyzmq-16.0.2/zmq/eventloop/minitornado/concurrent.py000066400000000000000000000007131301503633700226630ustar00rootroot00000000000000"""pyzmq does not ship tornado's futures, this just raises informative NotImplementedErrors to avoid having to change too much code. """ class NotImplementedFuture(object): def __init__(self, *args, **kwargs): raise NotImplementedError("pyzmq does not ship tornado's Futures, " "install tornado >= 3.0 for future support." ) Future = TracebackFuture = NotImplementedFuture def is_future(x): return isinstance(x, Future) pyzmq-16.0.2/zmq/eventloop/minitornado/ioloop.py000066400000000000000000001206071301503633700220070ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """An I/O event loop for non-blocking sockets. Typical applications will use a single `IOLoop` object, in the `IOLoop.instance` singleton. The `IOLoop.start` method should usually be called at the end of the ``main()`` function. Atypical applications may use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` case. In addition to I/O events, the `IOLoop` can also schedule time-based events. `IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`. """ from __future__ import absolute_import, division, print_function, with_statement import datetime import errno import functools import heapq import itertools import logging import numbers import os import select import sys import threading import time import traceback import math from .concurrent import TracebackFuture, is_future from .log import app_log, gen_log from . import stack_context from .util import Configurable, errno_from_exception, timedelta_to_seconds try: import signal except ImportError: signal = None try: import thread # py2 except ImportError: import _thread as thread # py3 from .platform.auto import set_close_exec, Waker _POLL_TIMEOUT = 3600.0 class TimeoutError(Exception): pass class IOLoop(Configurable): """A level-triggered I/O loop. We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they are available, or else we fall back on select(). If you are implementing a system that needs to handle thousands of simultaneous connections, you should use a system that supports either ``epoll`` or ``kqueue``. Example usage for a simple TCP server: .. testcode:: import errno import functools import tornado.ioloop import socket def connection_ready(sock, fd, events): while True: try: connection, address = sock.accept() except socket.error as e: if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): raise return connection.setblocking(0) handle_connection(connection, address) if __name__ == '__main__': sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) sock.bind(("", port)) sock.listen(128) io_loop = tornado.ioloop.IOLoop.current() callback = functools.partial(connection_ready, sock) io_loop.add_handler(sock.fileno(), callback, io_loop.READ) io_loop.start() .. testoutput:: :hide: By default, a newly-constructed `IOLoop` becomes the thread's current `IOLoop`, unless there already is a current `IOLoop`. This behavior can be controlled with the ``make_current`` argument to the `IOLoop` constructor: if ``make_current=True``, the new `IOLoop` will always try to become current and it raises an error if there is already a current instance. If ``make_current=False``, the new `IOLoop` will not try to become current. .. versionchanged:: 4.2 Added the ``make_current`` keyword argument to the `IOLoop` constructor. """ # Constants from the epoll module _EPOLLIN = 0x001 _EPOLLPRI = 0x002 _EPOLLOUT = 0x004 _EPOLLERR = 0x008 _EPOLLHUP = 0x010 _EPOLLRDHUP = 0x2000 _EPOLLONESHOT = (1 << 30) _EPOLLET = (1 << 31) # Our events map exactly to the epoll events NONE = 0 READ = _EPOLLIN WRITE = _EPOLLOUT ERROR = _EPOLLERR | _EPOLLHUP # Global lock for creating global IOLoop instance _instance_lock = threading.Lock() _current = threading.local() @staticmethod def instance(): """Returns a global `IOLoop` instance. Most applications have a single, global `IOLoop` running on the main thread. Use this method to get this instance from another thread. In most other cases, it is better to use `current()` to get the current thread's `IOLoop`. """ if not hasattr(IOLoop, "_instance"): with IOLoop._instance_lock: if not hasattr(IOLoop, "_instance"): # New instance after double check IOLoop._instance = IOLoop() return IOLoop._instance @staticmethod def initialized(): """Returns true if the singleton instance has been created.""" return hasattr(IOLoop, "_instance") def install(self): """Installs this `IOLoop` object as the singleton instance. This is normally not necessary as `instance()` will create an `IOLoop` on demand, but you may want to call `install` to use a custom subclass of `IOLoop`. """ assert not IOLoop.initialized() IOLoop._instance = self @staticmethod def clear_instance(): """Clear the global `IOLoop` instance. .. versionadded:: 4.0 """ if hasattr(IOLoop, "_instance"): del IOLoop._instance @staticmethod def current(instance=True): """Returns the current thread's `IOLoop`. If an `IOLoop` is currently running or has been marked as current by `make_current`, returns that instance. If there is no current `IOLoop`, returns `IOLoop.instance()` (i.e. the main thread's `IOLoop`, creating one if necessary) if ``instance`` is true. In general you should use `IOLoop.current` as the default when constructing an asynchronous object, and use `IOLoop.instance` when you mean to communicate to the main thread from a different one. .. versionchanged:: 4.1 Added ``instance`` argument to control the fallback to `IOLoop.instance()`. """ current = getattr(IOLoop._current, "instance", None) if current is None and instance: return IOLoop.instance() return current def make_current(self): """Makes this the `IOLoop` for the current thread. An `IOLoop` automatically becomes current for its thread when it is started, but it is sometimes useful to call `make_current` explicitly before starting the `IOLoop`, so that code run at startup time can find the right instance. .. versionchanged:: 4.1 An `IOLoop` created while there is no current `IOLoop` will automatically become current. """ IOLoop._current.instance = self @staticmethod def clear_current(): IOLoop._current.instance = None @classmethod def configurable_base(cls): return IOLoop @classmethod def configurable_default(cls): # this is the only patch to IOLoop: from zmq.eventloop.ioloop import ZMQIOLoop return ZMQIOLoop if hasattr(select, "epoll"): from tornado.platform.epoll import EPollIOLoop return EPollIOLoop if hasattr(select, "kqueue"): # Python 2.6+ on BSD or Mac from tornado.platform.kqueue import KQueueIOLoop return KQueueIOLoop from tornado.platform.select import SelectIOLoop return SelectIOLoop def initialize(self, make_current=None): if make_current is None: if IOLoop.current(instance=False) is None: self.make_current() elif make_current: if IOLoop.current(instance=False) is not None: raise RuntimeError("current IOLoop already exists") self.make_current() def close(self, all_fds=False): """Closes the `IOLoop`, freeing any resources used. If ``all_fds`` is true, all file descriptors registered on the IOLoop will be closed (not just the ones created by the `IOLoop` itself). Many applications will only use a single `IOLoop` that runs for the entire lifetime of the process. In that case closing the `IOLoop` is not necessary since everything will be cleaned up when the process exits. `IOLoop.close` is provided mainly for scenarios such as unit tests, which create and destroy a large number of ``IOLoops``. An `IOLoop` must be completely stopped before it can be closed. This means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must be allowed to return before attempting to call `IOLoop.close()`. Therefore the call to `close` will usually appear just after the call to `start` rather than near the call to `stop`. .. versionchanged:: 3.1 If the `IOLoop` implementation supports non-integer objects for "file descriptors", those objects will have their ``close`` method when ``all_fds`` is true. """ raise NotImplementedError() def add_handler(self, fd, handler, events): """Registers the given handler to receive the given events for ``fd``. The ``fd`` argument may either be an integer file descriptor or a file-like object with a ``fileno()`` method (and optionally a ``close()`` method, which may be called when the `IOLoop` is shut down). The ``events`` argument is a bitwise or of the constants ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. When an event occurs, ``handler(fd, events)`` will be run. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def update_handler(self, fd, events): """Changes the events we listen for ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def remove_handler(self, fd): """Stop listening for events on ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def set_blocking_signal_threshold(self, seconds, action): """Sends a signal if the `IOLoop` is blocked for more than ``s`` seconds. Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy platform. The action parameter is a Python signal handler. Read the documentation for the `signal` module for more information. If ``action`` is None, the process will be killed if it is blocked for too long. """ raise NotImplementedError() def set_blocking_log_threshold(self, seconds): """Logs a stack trace if the `IOLoop` is blocked for more than ``s`` seconds. Equivalent to ``set_blocking_signal_threshold(seconds, self.log_stack)`` """ self.set_blocking_signal_threshold(seconds, self.log_stack) def log_stack(self, signal, frame): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame))) def start(self): """Starts the I/O loop. The loop will run until one of the callbacks calls `stop()`, which will make the loop stop after the current event iteration completes. """ raise NotImplementedError() def _setup_logging(self): """The IOLoop catches and logs exceptions, so it's important that log output be visible. However, python's default behavior for non-root loggers (prior to python 3.2) is to print an unhelpful "no handlers could be found" message rather than the actual log entry, so we must explicitly configure logging if we've made it this far without anything. This method should be called from start() in subclasses. """ if not any([logging.getLogger().handlers, logging.getLogger('tornado').handlers, logging.getLogger('tornado.application').handlers]): logging.basicConfig() def stop(self): """Stop the I/O loop. If the event loop is not currently running, the next call to `start()` will return immediately. To use asynchronous methods from otherwise-synchronous code (such as unit tests), you can start and stop the event loop like this:: ioloop = IOLoop() async_method(ioloop=ioloop, callback=ioloop.stop) ioloop.start() ``ioloop.start()`` will return after ``async_method`` has run its callback, whether that callback was invoked before or after ``ioloop.start``. Note that even after `stop` has been called, the `IOLoop` is not completely stopped until `IOLoop.start` has also returned. Some work that was scheduled before the call to `stop` may still be run before the `IOLoop` shuts down. """ raise NotImplementedError() def run_sync(self, func, timeout=None): """Starts the `IOLoop`, runs the given function, and stops the loop. The function must return either a yieldable object or ``None``. If the function returns a yieldable object, the `IOLoop` will run until the yieldable is resolved (and `run_sync()` will return the yieldable's result). If it raises an exception, the `IOLoop` will stop and the exception will be re-raised to the caller. The keyword-only argument ``timeout`` may be used to set a maximum duration for the function. If the timeout expires, a `TimeoutError` is raised. This method is useful in conjunction with `tornado.gen.coroutine` to allow asynchronous calls in a ``main()`` function:: @gen.coroutine def main(): # do stuff... if __name__ == '__main__': IOLoop.current().run_sync(main) .. versionchanged:: 4.3 Returning a non-``None``, non-yieldable value is now an error. """ future_cell = [None] def run(): try: result = func() if result is not None: from tornado.gen import convert_yielded result = convert_yielded(result) except Exception: future_cell[0] = TracebackFuture() future_cell[0].set_exc_info(sys.exc_info()) else: if is_future(result): future_cell[0] = result else: future_cell[0] = TracebackFuture() future_cell[0].set_result(result) self.add_future(future_cell[0], lambda future: self.stop()) self.add_callback(run) if timeout is not None: timeout_handle = self.add_timeout(self.time() + timeout, self.stop) self.start() if timeout is not None: self.remove_timeout(timeout_handle) if not future_cell[0].done(): raise TimeoutError('Operation timed out after %s seconds' % timeout) return future_cell[0].result() def time(self): """Returns the current time according to the `IOLoop`'s clock. The return value is a floating-point number relative to an unspecified time in the past. By default, the `IOLoop`'s time function is `time.time`. However, it may be configured to use e.g. `time.monotonic` instead. Calls to `add_timeout` that pass a number instead of a `datetime.timedelta` should use this function to compute the appropriate time, so they can work no matter what time function is chosen. """ return time.time() def add_timeout(self, deadline, callback, *args, **kwargs): """Runs the ``callback`` at the time ``deadline`` from the I/O loop. Returns an opaque handle that may be passed to `remove_timeout` to cancel. ``deadline`` may be a number denoting a time (on the same scale as `IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. Since Tornado 4.0, `call_later` is a more convenient alternative for the relative case since it does not require a timedelta object. Note that it is not safe to call `add_timeout` from other threads. Instead, you must use `add_callback` to transfer control to the `IOLoop`'s thread, and then call `add_timeout` from there. Subclasses of IOLoop must implement either `add_timeout` or `call_at`; the default implementations of each will call the other. `call_at` is usually easier to implement, but subclasses that wish to maintain compatibility with Tornado versions prior to 4.0 must use `add_timeout` instead. .. versionchanged:: 4.0 Now passes through ``*args`` and ``**kwargs`` to the callback. """ if isinstance(deadline, numbers.Real): return self.call_at(deadline, callback, *args, **kwargs) elif isinstance(deadline, datetime.timedelta): return self.call_at(self.time() + timedelta_to_seconds(deadline), callback, *args, **kwargs) else: raise TypeError("Unsupported deadline %r" % deadline) def call_later(self, delay, callback, *args, **kwargs): """Runs the ``callback`` after ``delay`` seconds have passed. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.call_at(self.time() + delay, callback, *args, **kwargs) def call_at(self, when, callback, *args, **kwargs): """Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.add_timeout(when, callback, *args, **kwargs) def remove_timeout(self, timeout): """Cancels a pending timeout. The argument is a handle as returned by `add_timeout`. It is safe to call `remove_timeout` even if the callback has already been run. """ raise NotImplementedError() def add_callback(self, callback, *args, **kwargs): """Calls the given callback on the next I/O loop iteration. It is safe to call this method from any thread at any time, except from a signal handler. Note that this is the **only** method in `IOLoop` that makes this thread-safety guarantee; all other interaction with the `IOLoop` must be done from that `IOLoop`'s thread. `add_callback()` may be used to transfer control from other threads to the `IOLoop`'s thread. To add a callback from a signal handler, see `add_callback_from_signal`. """ raise NotImplementedError() def add_callback_from_signal(self, callback, *args, **kwargs): """Calls the given callback on the next I/O loop iteration. Safe for use from a Python signal handler; should not be used otherwise. Callbacks added with this method will be run without any `.stack_context`, to avoid picking up the context of the function that was interrupted by the signal. """ raise NotImplementedError() def spawn_callback(self, callback, *args, **kwargs): """Calls the given callback on the next IOLoop iteration. Unlike all other callback-related methods on IOLoop, ``spawn_callback`` does not associate the callback with its caller's ``stack_context``, so it is suitable for fire-and-forget callbacks that should not interfere with the caller. .. versionadded:: 4.0 """ with stack_context.NullContext(): self.add_callback(callback, *args, **kwargs) def add_future(self, future, callback): """Schedules a callback on the ``IOLoop`` when the given `.Future` is finished. The callback is invoked with one argument, the `.Future`. """ assert is_future(future) callback = stack_context.wrap(callback) future.add_done_callback( lambda future: self.add_callback(callback, future)) def _run_callback(self, callback): """Runs a callback with error handling. For use in subclasses. """ try: ret = callback() if ret is not None: from tornado import gen # Functions that return Futures typically swallow all # exceptions and store them in the Future. If a Future # makes it out to the IOLoop, ensure its exception (if any) # gets logged too. try: ret = gen.convert_yielded(ret) except gen.BadYieldError: # It's not unusual for add_callback to be used with # methods returning a non-None and non-yieldable # result, which should just be ignored. pass else: self.add_future(ret, lambda f: f.result()) except Exception: self.handle_callback_exception(callback) def handle_callback_exception(self, callback): """This method is called whenever a callback run by the `IOLoop` throws an exception. By default simply logs the exception as an error. Subclasses may override this method to customize reporting of exceptions. The exception itself is not passed explicitly, but is available in `sys.exc_info`. """ app_log.error("Exception in callback %r", callback, exc_info=True) def split_fd(self, fd): """Returns an (fd, obj) pair from an ``fd`` parameter. We accept both raw file descriptors and file-like objects as input to `add_handler` and related methods. When a file-like object is passed, we must retain the object itself so we can close it correctly when the `IOLoop` shuts down, but the poller interfaces favor file descriptors (they will accept file-like objects and call ``fileno()`` for you, but they always return the descriptor itself). This method is provided for use by `IOLoop` subclasses and should not generally be used by application code. .. versionadded:: 4.0 """ try: return fd.fileno(), fd except AttributeError: return fd, fd def close_fd(self, fd): """Utility method to close an ``fd``. If ``fd`` is a file-like object, we close it directly; otherwise we use `os.close`. This method is provided for use by `IOLoop` subclasses (in implementations of ``IOLoop.close(all_fds=True)`` and should not generally be used by application code. .. versionadded:: 4.0 """ try: try: fd.close() except AttributeError: os.close(fd) except OSError: pass class PollIOLoop(IOLoop): """Base class for IOLoops built around a select-like function. For concrete implementations, see `tornado.platform.epoll.EPollIOLoop` (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or `tornado.platform.select.SelectIOLoop` (all platforms). """ def initialize(self, impl, time_func=None, **kwargs): super(PollIOLoop, self).initialize(**kwargs) self._impl = impl if hasattr(self._impl, 'fileno'): set_close_exec(self._impl.fileno()) self.time_func = time_func or time.time self._handlers = {} self._events = {} self._callbacks = [] self._callback_lock = threading.Lock() self._timeouts = [] self._cancellations = 0 self._running = False self._stopped = False self._closing = False self._thread_ident = None self._blocking_signal_threshold = None self._timeout_counter = itertools.count() # Create a pipe that we send bogus data to when we want to wake # the I/O loop when it is idle self._waker = Waker() self.add_handler(self._waker.fileno(), lambda fd, events: self._waker.consume(), self.READ) def close(self, all_fds=False): with self._callback_lock: self._closing = True self.remove_handler(self._waker.fileno()) if all_fds: for fd, handler in self._handlers.values(): self.close_fd(fd) self._waker.close() self._impl.close() self._callbacks = None self._timeouts = None def add_handler(self, fd, handler, events): fd, obj = self.split_fd(fd) self._handlers[fd] = (obj, stack_context.wrap(handler)) self._impl.register(fd, events | self.ERROR) def update_handler(self, fd, events): fd, obj = self.split_fd(fd) self._impl.modify(fd, events | self.ERROR) def remove_handler(self, fd): fd, obj = self.split_fd(fd) self._handlers.pop(fd, None) self._events.pop(fd, None) try: self._impl.unregister(fd) except Exception: gen_log.debug("Error deleting fd from IOLoop", exc_info=True) def set_blocking_signal_threshold(self, seconds, action): if not hasattr(signal, "setitimer"): gen_log.error("set_blocking_signal_threshold requires a signal module " "with the setitimer method") return self._blocking_signal_threshold = seconds if seconds is not None: signal.signal(signal.SIGALRM, action if action is not None else signal.SIG_DFL) def start(self): if self._running: raise RuntimeError("IOLoop is already running") self._setup_logging() if self._stopped: self._stopped = False return old_current = getattr(IOLoop._current, "instance", None) IOLoop._current.instance = self self._thread_ident = thread.get_ident() self._running = True # signal.set_wakeup_fd closes a race condition in event loops: # a signal may arrive at the beginning of select/poll/etc # before it goes into its interruptible sleep, so the signal # will be consumed without waking the select. The solution is # for the (C, synchronous) signal handler to write to a pipe, # which will then be seen by select. # # In python's signal handling semantics, this only matters on the # main thread (fortunately, set_wakeup_fd only works on the main # thread and will raise a ValueError otherwise). # # If someone has already set a wakeup fd, we don't want to # disturb it. This is an issue for twisted, which does its # SIGCHLD processing in response to its own wakeup fd being # written to. As long as the wakeup fd is registered on the IOLoop, # the loop will still wake up and everything should work. old_wakeup_fd = None if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': # requires python 2.6+, unix. set_wakeup_fd exists but crashes # the python process on windows. try: old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno()) if old_wakeup_fd != -1: # Already set, restore previous value. This is a little racy, # but there's no clean get_wakeup_fd and in real use the # IOLoop is just started once at the beginning. signal.set_wakeup_fd(old_wakeup_fd) old_wakeup_fd = None except ValueError: # Non-main thread, or the previous value of wakeup_fd # is no longer valid. old_wakeup_fd = None try: while True: # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. with self._callback_lock: callbacks = self._callbacks self._callbacks = [] # Add any timeouts that have come due to the callback list. # Do not run anything until we have determined which ones # are ready, so timeouts that call add_timeout cannot # schedule anything in this iteration. due_timeouts = [] if self._timeouts: now = self.time() while self._timeouts: if self._timeouts[0].callback is None: # The timeout was cancelled. Note that the # cancellation check is repeated below for timeouts # that are cancelled by another timeout or callback. heapq.heappop(self._timeouts) self._cancellations -= 1 elif self._timeouts[0].deadline <= now: due_timeouts.append(heapq.heappop(self._timeouts)) else: break if (self._cancellations > 512 and self._cancellations > (len(self._timeouts) >> 1)): # Clean up the timeout queue when it gets large and it's # more than half cancellations. self._cancellations = 0 self._timeouts = [x for x in self._timeouts if x.callback is not None] heapq.heapify(self._timeouts) for callback in callbacks: self._run_callback(callback) for timeout in due_timeouts: if timeout.callback is not None: self._run_callback(timeout.callback) # Closures may be holding on to a lot of memory, so allow # them to be freed before we go into our poll wait. callbacks = callback = due_timeouts = timeout = None if self._callbacks: # If any callbacks or timeouts called add_callback, # we don't want to wait in poll() before we run them. poll_timeout = 0.0 elif self._timeouts: # If there are any timeouts, schedule the first one. # Use self.time() instead of 'now' to account for time # spent running callbacks. poll_timeout = self._timeouts[0].deadline - self.time() poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT)) else: # No timeouts and no callbacks, so use the default. poll_timeout = _POLL_TIMEOUT if not self._running: break if self._blocking_signal_threshold is not None: # clear alarm so it doesn't fire while poll is waiting for # events. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: event_pairs = self._impl.poll(poll_timeout) except Exception as e: # Depending on python version and IOLoop implementation, # different exception types may be thrown and there are # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') if errno_from_exception(e) == errno.EINTR: continue else: raise if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, self._blocking_signal_threshold, 0) # Pop one fd at a time from the set of pending fds and run # its handler. Since that handler may perform actions on # other file descriptors, there may be reentrant calls to # this IOLoop that update self._events self._events.update(event_pairs) while self._events: fd, events = self._events.popitem() try: fd_obj, handler_func = self._handlers[fd] handler_func(fd_obj, events) except (OSError, IOError) as e: if errno_from_exception(e) == errno.EPIPE: # Happens when the client closes the connection pass else: self.handle_callback_exception(self._handlers.get(fd)) except Exception: self.handle_callback_exception(self._handlers.get(fd)) fd_obj = handler_func = None finally: # reset the stopped flag so another start/stop pair can be issued self._stopped = False if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, 0, 0) IOLoop._current.instance = old_current if old_wakeup_fd is not None: signal.set_wakeup_fd(old_wakeup_fd) def stop(self): self._running = False self._stopped = True self._waker.wake() def time(self): return self.time_func() def call_at(self, deadline, callback, *args, **kwargs): timeout = _Timeout( deadline, functools.partial(stack_context.wrap(callback), *args, **kwargs), self) heapq.heappush(self._timeouts, timeout) return timeout def remove_timeout(self, timeout): # Removing from a heap is complicated, so just leave the defunct # timeout object in the queue (see discussion in # http://docs.python.org/library/heapq.html). # If this turns out to be a problem, we could add a garbage # collection pass whenever there are too many dead timeouts. timeout.callback = None self._cancellations += 1 def add_callback(self, callback, *args, **kwargs): if thread.get_ident() != self._thread_ident: # If we're not on the IOLoop's thread, we need to synchronize # with other threads, or waking logic will induce a race. with self._callback_lock: if self._closing: return list_empty = not self._callbacks self._callbacks.append(functools.partial( stack_context.wrap(callback), *args, **kwargs)) if list_empty: # If we're not in the IOLoop's thread, and we added the # first callback to an empty list, we may need to wake it # up (it may wake up on its own, but an occasional extra # wake is harmless). Waking up a polling IOLoop is # relatively expensive, so we try to avoid it when we can. self._waker.wake() else: if self._closing: return # If we're on the IOLoop's thread, we don't need the lock, # since we don't need to wake anyone, just add the # callback. Blindly insert into self._callbacks. This is # safe even from signal handlers because the GIL makes # list.append atomic. One subtlety is that if the signal # is interrupting another thread holding the # _callback_lock block in IOLoop.start, we may modify # either the old or new version of self._callbacks, but # either way will work. self._callbacks.append(functools.partial( stack_context.wrap(callback), *args, **kwargs)) def add_callback_from_signal(self, callback, *args, **kwargs): with stack_context.NullContext(): self.add_callback(callback, *args, **kwargs) class _Timeout(object): """An IOLoop timeout, a UNIX timestamp and a callback""" # Reduce memory overhead when there are lots of pending callbacks __slots__ = ['deadline', 'callback', 'tiebreaker'] def __init__(self, deadline, callback, io_loop): if not isinstance(deadline, numbers.Real): raise TypeError("Unsupported deadline %r" % deadline) self.deadline = deadline self.callback = callback self.tiebreaker = next(io_loop._timeout_counter) # Comparison methods to sort by deadline, with object id as a tiebreaker # to guarantee a consistent ordering. The heapq module uses __le__ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons # use __lt__). def __lt__(self, other): return ((self.deadline, self.tiebreaker) < (other.deadline, other.tiebreaker)) def __le__(self, other): return ((self.deadline, self.tiebreaker) <= (other.deadline, other.tiebreaker)) class PeriodicCallback(object): """Schedules the given callback to be called periodically. The callback is called every ``callback_time`` milliseconds. Note that the timeout is given in milliseconds, while most other time-related functions in Tornado use seconds. If the callback runs for longer than ``callback_time`` milliseconds, subsequent invocations will be skipped to get back on schedule. `start` must be called after the `PeriodicCallback` is created. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, callback, callback_time, io_loop=None): self.callback = callback if callback_time <= 0: raise ValueError("Periodic callback must have a positive callback_time") self.callback_time = callback_time self.io_loop = io_loop or IOLoop.current() self._running = False self._timeout = None def start(self): """Starts the timer.""" self._running = True self._next_timeout = self.io_loop.time() self._schedule_next() def stop(self): """Stops the timer.""" self._running = False if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def is_running(self): """Return True if this `.PeriodicCallback` has been started. .. versionadded:: 4.1 """ return self._running def _run(self): if not self._running: return try: return self.callback() except Exception: self.io_loop.handle_callback_exception(self.callback) finally: self._schedule_next() def _schedule_next(self): if self._running: current_time = self.io_loop.time() if self._next_timeout <= current_time: callback_time_sec = self.callback_time / 1000.0 self._next_timeout += (math.floor((current_time - self._next_timeout) / callback_time_sec) + 1) * callback_time_sec self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) pyzmq-16.0.2/zmq/eventloop/minitornado/log.py000066400000000000000000000002651301503633700212640ustar00rootroot00000000000000"""minimal subset of tornado.log for zmq.eventloop.minitornado""" import logging app_log = logging.getLogger("tornado.application") gen_log = logging.getLogger("tornado.general") pyzmq-16.0.2/zmq/eventloop/minitornado/platform/000077500000000000000000000000001301503633700217525ustar00rootroot00000000000000pyzmq-16.0.2/zmq/eventloop/minitornado/platform/__init__.py000066400000000000000000000000001301503633700240510ustar00rootroot00000000000000pyzmq-16.0.2/zmq/eventloop/minitornado/platform/auto.py000066400000000000000000000026201301503633700232740ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of platform-specific functionality. For each function or class described in `tornado.platform.interface`, the appropriate platform-specific implementation exists in this module. Most code that needs access to this functionality should do e.g.:: from tornado.platform.auto import set_close_exec """ from __future__ import absolute_import, division, print_function, with_statement import os if os.name == 'nt': from .common import Waker from .windows import set_close_exec else: from .posix import set_close_exec, Waker try: # monotime monkey-patches the time module to have a monotonic function # in versions of python before 3.3. import monotime except ImportError: pass try: from time import monotonic as monotonic_time except ImportError: monotonic_time = None pyzmq-16.0.2/zmq/eventloop/minitornado/platform/common.py000066400000000000000000000063771301503633700236310ustar00rootroot00000000000000"""Lowest-common-denominator implementations of platform functionality.""" from __future__ import absolute_import, division, print_function, with_statement import errno import socket from . import interface class Waker(interface.Waker): """Create an OS independent asynchronous pipe. For use on platforms that don't have os.pipe() (or where pipes cannot be passed to select()), but do have sockets. This includes Windows and Jython. """ def __init__(self): # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py self.writer = socket.socket() # Disable buffering -- pulling the trigger sends 1 byte, # and we want that sent immediately, to wake up ASAP. self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) count = 0 while 1: count += 1 # Bind to a local port; for efficiency, let the OS pick # a free port for us. # Unfortunately, stress tests showed that we may not # be able to connect to that port ("Address already in # use") despite that the OS picked it. This appears # to be a race bug in the Windows socket implementation. # So we loop until a connect() succeeds (almost always # on the first try). See the long thread at # http://mail.zope.org/pipermail/zope/2005-July/160433.html # for hideous details. a = socket.socket() a.bind(("127.0.0.1", 0)) a.listen(1) connect_address = a.getsockname() # assigned (host, port) pair try: self.writer.connect(connect_address) break # success except socket.error as detail: if (not hasattr(errno, 'WSAEADDRINUSE') or detail[0] != errno.WSAEADDRINUSE): # "Address already in use" is the only error # I've seen on two WinXP Pro SP2 boxes, under # Pythons 2.3.5 and 2.4.1. raise # (10048, 'Address already in use') # assert count <= 2 # never triggered in Tim's tests if count >= 10: # I've never seen it go above 2 a.close() self.writer.close() raise socket.error("Cannot bind trigger!") # Close `a` and try again. Note: I originally put a short # sleep() here, but it didn't appear to help or hurt. a.close() self.reader, addr = a.accept() self.reader.setblocking(0) self.writer.setblocking(0) a.close() self.reader_fd = self.reader.fileno() def fileno(self): return self.reader.fileno() def write_fileno(self): return self.writer.fileno() def wake(self): try: self.writer.send(b"x") except (IOError, socket.error): pass def consume(self): try: while True: result = self.reader.recv(1024) if not result: break except (IOError, socket.error): pass def close(self): self.reader.close() self.writer.close() pyzmq-16.0.2/zmq/eventloop/minitornado/platform/interface.py000066400000000000000000000043041301503633700242650ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Interfaces for platform-specific functionality. This module exists primarily for documentation purposes and as base classes for other tornado.platform modules. Most code should import the appropriate implementation from `tornado.platform.auto`. """ from __future__ import absolute_import, division, print_function, with_statement def set_close_exec(fd): """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor.""" raise NotImplementedError() class Waker(object): """A socket-like object that can wake another thread from ``select()``. The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to its ``select`` (or ``epoll`` or ``kqueue``) calls. When another thread wants to wake up the loop, it calls `wake`. Once it has woken up, it will call `consume` to do any necessary per-wake cleanup. When the ``IOLoop`` is closed, it closes its waker too. """ def fileno(self): """Returns the read file descriptor for this waker. Must be suitable for use with ``select()`` or equivalent on the local platform. """ raise NotImplementedError() def write_fileno(self): """Returns the write file descriptor for this waker.""" raise NotImplementedError() def wake(self): """Triggers activity on the waker's file descriptor.""" raise NotImplementedError() def consume(self): """Called after the listen has woken up to do any necessary cleanup.""" raise NotImplementedError() def close(self): """Closes the waker's file descriptor(s).""" raise NotImplementedError() pyzmq-16.0.2/zmq/eventloop/minitornado/platform/posix.py000066400000000000000000000034641301503633700234750ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Posix implementations of platform-specific functionality.""" from __future__ import absolute_import, division, print_function, with_statement import fcntl import os from . import interface def set_close_exec(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def _set_nonblocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) class Waker(interface.Waker): def __init__(self): r, w = os.pipe() _set_nonblocking(r) _set_nonblocking(w) set_close_exec(r) set_close_exec(w) self.reader = os.fdopen(r, "rb", 0) self.writer = os.fdopen(w, "wb", 0) def fileno(self): return self.reader.fileno() def write_fileno(self): return self.writer.fileno() def wake(self): try: self.writer.write(b"x") except IOError: pass def consume(self): try: while True: result = self.reader.read() if not result: break except IOError: pass def close(self): self.reader.close() self.writer.close() pyzmq-16.0.2/zmq/eventloop/minitornado/platform/windows.py000066400000000000000000000012511301503633700240150ustar00rootroot00000000000000# NOTE: win32 support is currently experimental, and not recommended # for production use. from __future__ import absolute_import, division, print_function, with_statement import ctypes import ctypes.wintypes # See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) SetHandleInformation.restype = ctypes.wintypes.BOOL HANDLE_FLAG_INHERIT = 0x00000001 def set_close_exec(fd): success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0) if not success: raise ctypes.GetLastError() pyzmq-16.0.2/zmq/eventloop/minitornado/stack_context.py000066400000000000000000000315571301503633700233640ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2010 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """`StackContext` allows applications to maintain threadlocal-like state that follows execution as it moves to other execution contexts. The motivating examples are to eliminate the need for explicit ``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to allow some additional context to be kept for logging. This is slightly magic, but it's an extension of the idea that an exception handler is a kind of stack-local state and when that stack is suspended and resumed in a new context that state needs to be preserved. `StackContext` shifts the burden of restoring that state from each call site (e.g. wrapping each `.AsyncHTTPClient` callback in ``async_callback``) to the mechanisms that transfer control from one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, thread pools, etc). Example usage:: @contextlib.contextmanager def die_on_error(): try: yield except Exception: logging.error("exception in asynchronous operation",exc_info=True) sys.exit(1) with StackContext(die_on_error): # Any exception thrown here *or in callback and its descendants* # will cause the process to exit instead of spinning endlessly # in the ioloop. http_client.fetch(url, callback) ioloop.start() Most applications shouldn't have to work with `StackContext` directly. Here are a few rules of thumb for when it's necessary: * If you're writing an asynchronous library that doesn't rely on a stack_context-aware library like `tornado.ioloop` or `tornado.iostream` (for example, if you're writing a thread pool), use `.stack_context.wrap()` before any asynchronous operations to capture the stack context from where the operation was started. * If you're writing an asynchronous library that has some shared resources (such as a connection pool), create those shared resources within a ``with stack_context.NullContext():`` block. This will prevent ``StackContexts`` from leaking from one request to another. * If you want to write something like an exception handler that will persist across asynchronous calls, create a new `StackContext` (or `ExceptionStackContext`), and make your asynchronous calls in a ``with`` block that references your `StackContext`. """ from __future__ import absolute_import, division, print_function, with_statement import sys import threading from .util import raise_exc_info class StackContextInconsistentError(Exception): pass class _State(threading.local): def __init__(self): self.contexts = (tuple(), None) _state = _State() class StackContext(object): """Establishes the given context as a StackContext that will be transferred. Note that the parameter is a callable that returns a context manager, not the context itself. That is, where for a non-transferable context manager you would say:: with my_context(): StackContext takes the function itself rather than its result:: with StackContext(my_context): The result of ``with StackContext() as cb:`` is a deactivation callback. Run this callback when the StackContext is no longer needed to ensure that it is not propagated any further (note that deactivating a context does not affect any instances of that context that are currently pending). This is an advanced feature and not necessary in most applications. """ def __init__(self, context_factory): self.context_factory = context_factory self.contexts = [] self.active = True def _deactivate(self): self.active = False # StackContext protocol def enter(self): context = self.context_factory() self.contexts.append(context) context.__enter__() def exit(self, type, value, traceback): context = self.contexts.pop() context.__exit__(type, value, traceback) # Note that some of this code is duplicated in ExceptionStackContext # below. ExceptionStackContext is more common and doesn't need # the full generality of this class. def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0] + (self,), self) _state.contexts = self.new_contexts try: self.enter() except: _state.contexts = self.old_contexts raise return self._deactivate def __exit__(self, type, value, traceback): try: self.exit(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts # Generator coroutines and with-statements with non-local # effects interact badly. Check here for signs of # the stack getting out of sync. # Note that this check comes after restoring _state.context # so that if it fails things are left in a (relatively) # consistent state. if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class ExceptionStackContext(object): """Specialization of StackContext for exception handling. The supplied ``exception_handler`` function will be called in the event of an uncaught exception in this context. The semantics are similar to a try/finally clause, and intended use cases are to log an error, close a socket, or similar cleanup actions. The ``exc_info`` triple ``(type, value, traceback)`` will be passed to the exception_handler function. If the exception handler returns true, the exception will be consumed and will not be propagated to other exception handlers. """ def __init__(self, exception_handler): self.exception_handler = exception_handler self.active = True def _deactivate(self): self.active = False def exit(self, type, value, traceback): if type is not None: return self.exception_handler(type, value, traceback) def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0], self) _state.contexts = self.new_contexts return self._deactivate def __exit__(self, type, value, traceback): try: if type is not None: return self.exception_handler(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class NullContext(object): """Resets the `StackContext`. Useful when creating a shared resource on demand (e.g. an `.AsyncHTTPClient`) where the stack that caused the creating is not relevant to future operations. """ def __enter__(self): self.old_contexts = _state.contexts _state.contexts = (tuple(), None) def __exit__(self, type, value, traceback): _state.contexts = self.old_contexts def _remove_deactivated(contexts): """Remove deactivated handlers from the chain""" # Clean ctx handlers stack_contexts = tuple([h for h in contexts[0] if h.active]) # Find new head head = contexts[1] while head is not None and not head.active: head = head.old_contexts[1] # Process chain ctx = head while ctx is not None: parent = ctx.old_contexts[1] while parent is not None: if parent.active: break ctx.old_contexts = parent.old_contexts parent = parent.old_contexts[1] ctx = parent return (stack_contexts, head) def wrap(fn): """Returns a callable object that will restore the current `StackContext` when executed. Use this whenever saving a callback to be executed later in a different execution context (either in a different thread or asynchronously in the same thread). """ # Check if function is already wrapped if fn is None or hasattr(fn, '_wrapped'): return fn # Capture current stack head # TODO: Any other better way to store contexts and update them in wrapped function? cap_contexts = [_state.contexts] if not cap_contexts[0][0] and not cap_contexts[0][1]: # Fast path when there are no active contexts. def null_wrapper(*args, **kwargs): try: current_state = _state.contexts _state.contexts = cap_contexts[0] return fn(*args, **kwargs) finally: _state.contexts = current_state null_wrapper._wrapped = True return null_wrapper def wrapped(*args, **kwargs): ret = None try: # Capture old state current_state = _state.contexts # Remove deactivated items cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) # Force new state _state.contexts = contexts # Current exception exc = (None, None, None) top = None # Apply stack contexts last_ctx = 0 stack = contexts[0] # Apply state for n in stack: try: n.enter() last_ctx += 1 except: # Exception happened. Record exception info and store top-most handler exc = sys.exc_info() top = n.old_contexts[1] # Execute callback if no exception happened while restoring state if top is None: try: ret = fn(*args, **kwargs) except: exc = sys.exc_info() top = contexts[1] # If there was exception, try to handle it by going through the exception chain if top is not None: exc = _handle_exception(top, exc) else: # Otherwise take shorter path and run stack contexts in reverse order while last_ctx > 0: last_ctx -= 1 c = stack[last_ctx] try: c.exit(*exc) except: exc = sys.exc_info() top = c.old_contexts[1] break else: top = None # If if exception happened while unrolling, take longer exception handler path if top is not None: exc = _handle_exception(top, exc) # If exception was not handled, raise it if exc != (None, None, None): raise_exc_info(exc) finally: _state.contexts = current_state return ret wrapped._wrapped = True return wrapped def _handle_exception(tail, exc): while tail is not None: try: if tail.exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() tail = tail.old_contexts[1] return exc def run_with_stack_context(context, func): """Run a coroutine ``func`` in the given `StackContext`. It is not safe to have a ``yield`` statement within a ``with StackContext`` block, so it is difficult to use stack context with `.gen.coroutine`. This helper function runs the function in the correct context while keeping the ``yield`` and ``with`` statements syntactically separate. Example:: @gen.coroutine def incorrect(): with StackContext(ctx): # ERROR: this will raise StackContextInconsistentError yield other_coroutine() @gen.coroutine def correct(): yield run_with_stack_context(StackContext(ctx), other_coroutine) .. versionadded:: 3.1 """ with context: return func() pyzmq-16.0.2/zmq/eventloop/minitornado/util.py000066400000000000000000000164121301503633700214610ustar00rootroot00000000000000"""Miscellaneous utility functions and classes. This module is used internally by Tornado. It is not necessarily expected that the functions and classes defined here will be useful to other applications, but they are documented here in case they are. The one public-facing part of this module is the `Configurable` class and its `~Configurable.configure` method, which becomes a part of the interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`, and `.Resolver`. """ from __future__ import absolute_import, division, print_function, with_statement import sys # Fake unicode literal support: Python 3.2 doesn't have the u'' marker for # literal strings, and alternative solutions like "from __future__ import # unicode_literals" have other problems (see PEP 414). u() can be applied # to ascii strings that include \u escapes (but they must not contain # literal non-ascii characters). if not isinstance(b'', type('')): def u(s): return s unicode_type = str basestring_type = str else: def u(s): return s.decode('unicode_escape') # These names don't exist in py3, so use noqa comments to disable # warnings in flake8. unicode_type = unicode # noqa basestring_type = basestring # noqa def import_object(name): """Imports an object by name. import_object('x') is equivalent to 'import x'. import_object('x.y.z') is equivalent to 'from x.y import z'. >>> import tornado.escape >>> import_object('tornado.escape') is tornado.escape True >>> import_object('tornado.escape.utf8') is tornado.escape.utf8 True >>> import_object('tornado') is tornado True >>> import_object('tornado.missing_module') Traceback (most recent call last): ... ImportError: No module named missing_module """ if isinstance(name, unicode_type) and str is not unicode_type: # On python 2 a byte string is required. name = name.encode('utf-8') if name.count('.') == 0: return __import__(name, None, None) parts = name.split('.') obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0) try: return getattr(obj, parts[-1]) except AttributeError: raise ImportError("No module named %s" % parts[-1]) # Deprecated alias that was used before we dropped py25 support. # Left here in case anyone outside Tornado is using it. bytes_type = bytes if sys.version_info > (3,): exec(""" def raise_exc_info(exc_info): raise exc_info[1].with_traceback(exc_info[2]) def exec_in(code, glob, loc=None): if isinstance(code, str): code = compile(code, '', 'exec', dont_inherit=True) exec(code, glob, loc) """) else: exec(""" def raise_exc_info(exc_info): raise exc_info[0], exc_info[1], exc_info[2] def exec_in(code, glob, loc=None): if isinstance(code, basestring): # exec(string) inherits the caller's future imports; compile # the string first to prevent that. code = compile(code, '', 'exec', dont_inherit=True) exec code in glob, loc """) def errno_from_exception(e): """Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instantiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno. """ if hasattr(e, 'errno'): return e.errno elif e.args: return e.args[0] else: return None class Configurable(object): """Base class for configurable interfaces. A configurable interface is an (abstract) class whose constructor acts as a factory function for one of its implementation subclasses. The implementation subclass as well as optional keyword arguments to its initializer can be set globally at runtime with `configure`. By using the constructor as the factory method, the interface looks like a normal class, `isinstance` works as usual, etc. This pattern is most useful when the choice of implementation is likely to be a global decision (e.g. when `~select.epoll` is available, always use it instead of `~select.select`), or when a previously-monolithic class has been split into specialized subclasses. Configurable subclasses must define the class methods `configurable_base` and `configurable_default`, and use the instance method `initialize` instead of ``__init__``. """ __impl_class = None __impl_kwargs = None def __new__(cls, *args, **kwargs): base = cls.configurable_base() init_kwargs = {} if cls is base: impl = cls.configured_class() if base.__impl_kwargs: init_kwargs.update(base.__impl_kwargs) else: impl = cls init_kwargs.update(kwargs) instance = super(Configurable, cls).__new__(impl) # initialize vs __init__ chosen for compatibility with AsyncHTTPClient # singleton magic. If we get rid of that we can switch to __init__ # here too. instance.initialize(*args, **init_kwargs) return instance @classmethod def configurable_base(cls): """Returns the base class of a configurable hierarchy. This will normally return the class in which it is defined. (which is *not* necessarily the same as the cls classmethod parameter). """ raise NotImplementedError() @classmethod def configurable_default(cls): """Returns the implementation class to be used if none is configured.""" raise NotImplementedError() def initialize(self): """Initialize a `Configurable` subclass instance. Configurable classes should use `initialize` instead of ``__init__``. .. versionchanged:: 4.2 Now accepts positional arguments in addition to keyword arguments. """ @classmethod def configure(cls, impl, **kwargs): """Sets the class to use when the base class is instantiated. Keyword arguments will be saved and added to the arguments passed to the constructor. This can be used to set global defaults for some parameters. """ base = cls.configurable_base() if isinstance(impl, (unicode_type, bytes)): impl = import_object(impl) if impl is not None and not issubclass(impl, cls): raise ValueError("Invalid subclass of %s" % cls) base.__impl_class = impl base.__impl_kwargs = kwargs @classmethod def configured_class(cls): """Returns the currently configured class.""" base = cls.configurable_base() if cls.__impl_class is None: base.__impl_class = cls.configurable_default() return base.__impl_class @classmethod def _save_configuration(cls): base = cls.configurable_base() return (base.__impl_class, base.__impl_kwargs) @classmethod def _restore_configuration(cls, saved): base = cls.configurable_base() base.__impl_class = saved[0] base.__impl_kwargs = saved[1] def timedelta_to_seconds(td): """Equivalent to td.total_seconds() (introduced in python 2.7).""" return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) pyzmq-16.0.2/zmq/eventloop/zmqstream.py000066400000000000000000000450411301503633700202040ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A utility class to send to and recv from a non-blocking socket.""" from __future__ import with_statement import sys import zmq from zmq.utils import jsonapi try: import cPickle as pickle except ImportError: import pickle from .ioloop import IOLoop try: # gen_log will only import from >= 3.0 from tornado.log import gen_log from tornado import stack_context except ImportError: from .minitornado.log import gen_log from .minitornado import stack_context try: from queue import Queue except ImportError: from Queue import Queue from zmq.utils.strtypes import bytes, unicode, basestring try: callable except NameError: callable = lambda obj: hasattr(obj, '__call__') class ZMQStream(object): """A utility class to register callbacks when a zmq socket sends and receives For use with zmq.eventloop.ioloop There are three main methods Methods: * **on_recv(callback, copy=True):** register a callback to be run every time the socket has something to receive * **on_send(callback):** register a callback to be run every time you call send * **send(self, msg, flags=0, copy=False, callback=None):** perform a send that will trigger the callback if callback is passed, on_send is also called. There are also send_multipart(), send_json(), send_pyobj() Three other methods for deactivating the callbacks: * **stop_on_recv():** turn off the recv callback * **stop_on_send():** turn off the send callback which simply call ``on_(None)``. The entire socket interface, excluding direct recv methods, is also provided, primarily through direct-linking the methods. e.g. >>> stream.bind is stream.socket.bind True """ socket = None io_loop = None poller = None _send_queue = None _recv_callback = None _send_callback = None _close_callback = None _state = 0 _flushed = False _recv_copy = False def __init__(self, socket, io_loop=None): self.socket = socket self.io_loop = io_loop or IOLoop.instance() self.poller = zmq.Poller() self._send_queue = Queue() self._recv_callback = None self._send_callback = None self._close_callback = None self._recv_copy = False self._flushed = False self._state = self.io_loop.ERROR self._init_io_state() # shortcircuit some socket methods self.bind = self.socket.bind self.bind_to_random_port = self.socket.bind_to_random_port self.connect = self.socket.connect self.setsockopt = self.socket.setsockopt self.getsockopt = self.socket.getsockopt self.setsockopt_string = self.socket.setsockopt_string self.getsockopt_string = self.socket.getsockopt_string self.setsockopt_unicode = self.socket.setsockopt_unicode self.getsockopt_unicode = self.socket.getsockopt_unicode def stop_on_recv(self): """Disable callback and automatic receiving.""" return self.on_recv(None) def stop_on_send(self): """Disable callback on sending.""" return self.on_send(None) def stop_on_err(self): """DEPRECATED, does nothing""" gen_log.warn("on_err does nothing, and will be removed") def on_err(self, callback): """DEPRECATED, does nothing""" gen_log.warn("on_err does nothing, and will be removed") def on_recv(self, callback, copy=True): """Register a callback for when a message is ready to recv. There can be only one callback registered at a time, so each call to `on_recv` replaces previously registered callbacks. on_recv(None) disables recv event polling. Use on_recv_stream(callback) instead, to register a callback that will receive both this ZMQStream and the message, instead of just the message. Parameters ---------- callback : callable callback must take exactly one argument, which will be a list, as returned by socket.recv_multipart() if callback is None, recv callbacks are disabled. copy : bool copy is passed directly to recv, so if copy is False, callback will receive Message objects. If copy is True, then callback will receive bytes/str objects. Returns : None """ self._check_closed() assert callback is None or callable(callback) self._recv_callback = stack_context.wrap(callback) self._recv_copy = copy if callback is None: self._drop_io_state(self.io_loop.READ) else: self._add_io_state(self.io_loop.READ) def on_recv_stream(self, callback, copy=True): """Same as on_recv, but callback will get this stream as first argument callback must take exactly two arguments, as it will be called as:: callback(stream, msg) Useful when a single callback should be used with multiple streams. """ if callback is None: self.stop_on_recv() else: self.on_recv(lambda msg: callback(self, msg), copy=copy) def on_send(self, callback): """Register a callback to be called on each send There will be two arguments:: callback(msg, status) * `msg` will be the list of sendable objects that was just sent * `status` will be the return result of socket.send_multipart(msg) - MessageTracker or None. Non-copying sends return a MessageTracker object whose `done` attribute will be True when the send is complete. This allows users to track when an object is safe to write to again. The second argument will always be None if copy=True on the send. Use on_send_stream(callback) to register a callback that will be passed this ZMQStream as the first argument, in addition to the other two. on_send(None) disables recv event polling. Parameters ---------- callback : callable callback must take exactly two arguments, which will be the message being sent (always a list), and the return result of socket.send_multipart(msg) - MessageTracker or None. if callback is None, send callbacks are disabled. """ self._check_closed() assert callback is None or callable(callback) self._send_callback = stack_context.wrap(callback) def on_send_stream(self, callback): """Same as on_send, but callback will get this stream as first argument Callback will be passed three arguments:: callback(stream, msg, status) Useful when a single callback should be used with multiple streams. """ if callback is None: self.stop_on_send() else: self.on_send(lambda msg, status: callback(self, msg, status)) def send(self, msg, flags=0, copy=True, track=False, callback=None): """Send a message, optionally also register a new callback for sends. See zmq.socket.send for details. """ return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback) def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None): """Send a multipart message, optionally also register a new callback for sends. See zmq.socket.send_multipart for details. """ kwargs = dict(flags=flags, copy=copy, track=track) self._send_queue.put((msg, kwargs)) callback = callback or self._send_callback if callback is not None: self.on_send(callback) else: # noop callback self.on_send(lambda *args: None) self._add_io_state(self.io_loop.WRITE) def send_string(self, u, flags=0, encoding='utf-8', callback=None): """Send a unicode message with an encoding. See zmq.socket.send_unicode for details. """ if not isinstance(u, basestring): raise TypeError("unicode/str objects only") return self.send(u.encode(encoding), flags=flags, callback=callback) send_unicode = send_string def send_json(self, obj, flags=0, callback=None): """Send json-serialized version of an object. See zmq.socket.send_json for details. """ if jsonapi is None: raise ImportError('jsonlib{1,2}, json or simplejson library is required.') else: msg = jsonapi.dumps(obj) return self.send(msg, flags=flags, callback=callback) def send_pyobj(self, obj, flags=0, protocol=-1, callback=None): """Send a Python object as a message using pickle to serialize. See zmq.socket.send_json for details. """ msg = pickle.dumps(obj, protocol) return self.send(msg, flags, callback=callback) def _finish_flush(self): """callback for unsetting _flushed flag.""" self._flushed = False def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None): """Flush pending messages. This method safely handles all pending incoming and/or outgoing messages, bypassing the inner loop, passing them to the registered callbacks. A limit can be specified, to prevent blocking under high load. flush will return the first time ANY of these conditions are met: * No more events matching the flag are pending. * the total number of events handled reaches the limit. Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback is registered, unlike normal IOLoop operation. This allows flush to be used to remove *and ignore* incoming messages. Parameters ---------- flag : int, default=POLLIN|POLLOUT 0MQ poll flags. If flag|POLLIN, recv events will be flushed. If flag|POLLOUT, send events will be flushed. Both flags can be set at once, which is the default. limit : None or int, optional The maximum number of messages to send or receive. Both send and recv count against this limit. Returns ------- int : count of events handled (both send and recv) """ self._check_closed() # unset self._flushed, so callbacks will execute, in case flush has # already been called this iteration already_flushed = self._flushed self._flushed = False # initialize counters count = 0 def update_flag(): """Update the poll flag, to prevent registering POLLOUT events if we don't have pending sends.""" return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT) flag = update_flag() if not flag: # nothing to do return 0 self.poller.register(self.socket, flag) events = self.poller.poll(0) while events and (not limit or count < limit): s,event = events[0] if event & zmq.POLLIN: # receiving self._handle_recv() count += 1 if self.socket is None: # break if socket was closed during callback break if event & zmq.POLLOUT and self.sending(): self._handle_send() count += 1 if self.socket is None: # break if socket was closed during callback break flag = update_flag() if flag: self.poller.register(self.socket, flag) events = self.poller.poll(0) else: events = [] if count: # only bypass loop if we actually flushed something # skip send/recv callbacks this iteration self._flushed = True # reregister them at the end of the loop if not already_flushed: # don't need to do it again self.io_loop.add_callback(self._finish_flush) elif already_flushed: self._flushed = True # update ioloop poll state, which may have changed self._rebuild_io_state() return count def set_close_callback(self, callback): """Call the given callback when the stream is closed.""" self._close_callback = stack_context.wrap(callback) def close(self, linger=None): """Close this stream.""" if self.socket is not None: self.io_loop.remove_handler(self.socket) self.socket.close(linger) self.socket = None if self._close_callback: self._run_callback(self._close_callback) def receiving(self): """Returns True if we are currently receiving from the stream.""" return self._recv_callback is not None def sending(self): """Returns True if we are currently sending to the stream.""" return not self._send_queue.empty() def closed(self): return self.socket is None def _run_callback(self, callback, *args, **kwargs): """Wrap running callbacks in try/except to allow us to close our socket.""" try: # Use a NullContext to ensure that all StackContexts are run # inside our blanket exception handler rather than outside. with stack_context.NullContext(): callback(*args, **kwargs) except: gen_log.error("Uncaught exception, closing connection.", exc_info=True) # Close the socket on an uncaught exception from a user callback # (It would eventually get closed when the socket object is # gc'd, but we don't want to rely on gc happening before we # run out of file descriptors) self.close() # Re-raise the exception so that IOLoop.handle_callback_exception # can see it and log the error raise def _handle_events(self, fd, events): """This method is the actual handler for IOLoop, that gets called whenever an event on my socket is posted. It dispatches to _handle_recv, etc.""" # print "handling events" if not self.socket: gen_log.warning("Got events for closed stream %s", fd) return try: # dispatch events: if events & IOLoop.ERROR: gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense") return if events & IOLoop.READ: self._handle_recv() if not self.socket: return if events & IOLoop.WRITE: self._handle_send() if not self.socket: return # rebuild the poll state self._rebuild_io_state() except: gen_log.error("Uncaught exception, closing connection.", exc_info=True) self.close() raise def _handle_recv(self): """Handle a recv event.""" if self._flushed: return try: msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: # state changed since poll event pass else: gen_log.error("RECV Error: %s"%zmq.strerror(e.errno)) else: if self._recv_callback: callback = self._recv_callback # self._recv_callback = None self._run_callback(callback, msg) # self.update_state() def _handle_send(self): """Handle a send event.""" if self._flushed: return if not self.sending(): gen_log.error("Shouldn't have handled a send event") return msg, kwargs = self._send_queue.get() try: status = self.socket.send_multipart(msg, **kwargs) except zmq.ZMQError as e: gen_log.error("SEND Error: %s", e) status = e if self._send_callback: callback = self._send_callback self._run_callback(callback, msg, status) # self.update_state() def _check_closed(self): if not self.socket: raise IOError("Stream is closed") def _rebuild_io_state(self): """rebuild io state based on self.sending() and receiving()""" if self.socket is None: return state = self.io_loop.ERROR if self.receiving(): state |= self.io_loop.READ if self.sending(): state |= self.io_loop.WRITE if state != self._state: self._state = state self._update_handler(state) def _add_io_state(self, state): """Add io_state to poller.""" if not self._state & state: self._state = self._state | state self._update_handler(self._state) def _drop_io_state(self, state): """Stop poller from watching an io_state.""" if self._state & state: self._state = self._state & (~state) self._update_handler(self._state) def _update_handler(self, state): """Update IOLoop handler with state.""" if self.socket is None: return self.io_loop.update_handler(self.socket, state) def _init_io_state(self): """initialize the ioloop event handler""" with stack_context.NullContext(): self.io_loop.add_handler(self.socket, self._handle_events, self._state) pyzmq-16.0.2/zmq/green/000077500000000000000000000000001301503633700146705ustar00rootroot00000000000000pyzmq-16.0.2/zmq/green/__init__.py000066400000000000000000000021351301503633700170020ustar00rootroot00000000000000# -*- coding: utf-8 -*- #----------------------------------------------------------------------------- # Copyright (C) 2011-2012 Travis Cline # # This file is part of pyzmq # It is adapted from upstream project zeromq_gevent under the New BSD License # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- """zmq.green - gevent compatibility with zeromq. Usage ----- Instead of importing zmq directly, do so in the following manner: .. import zmq.green as zmq Any calls that would have blocked the current thread will now only block the current green thread. This compatibility is accomplished by ensuring the nonblocking flag is set before any blocking operation and the ØMQ file descriptor is polled internally to trigger needed events. """ from zmq import * from zmq.green.core import _Context, _Socket from zmq.green.poll import _Poller Context = _Context Socket = _Socket Poller = _Poller from zmq.green.device import device pyzmq-16.0.2/zmq/green/core.py000066400000000000000000000243701301503633700162000ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Copyright (C) 2011-2012 Travis Cline # # This file is part of pyzmq # It is adapted from upstream project zeromq_gevent under the New BSD License # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- """This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq ` to be non blocking """ from __future__ import print_function import sys import time import warnings import zmq from zmq import Context as _original_Context from zmq import Socket as _original_Socket from .poll import _Poller import gevent from gevent.event import AsyncResult from gevent.hub import get_hub if hasattr(zmq, 'RCVTIMEO'): TIMEOS = (zmq.RCVTIMEO, zmq.SNDTIMEO) else: TIMEOS = () def _stop(evt): """simple wrapper for stopping an Event, allowing for method rename in gevent 1.0""" try: evt.stop() except AttributeError as e: # gevent<1.0 compat evt.cancel() class _Socket(_original_Socket): """Green version of :class:`zmq.Socket` The following methods are overridden: * send * recv To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised. The `__state_changed` method is triggered when the zmq.FD for the socket is marked as readable and triggers the necessary read and write events (which are waited for in the recv and send methods). Some double underscore prefixes are used to minimize pollution of :class:`zmq.Socket`'s namespace. """ __in_send_multipart = False __in_recv_multipart = False __writable = None __readable = None _state_event = None _gevent_bug_timeout = 11.6 # timeout for not trusting gevent _debug_gevent = False # turn on if you think gevent is missing events _poller_class = _Poller def __init__(self, *a, **kw): super(_Socket, self).__init__(*a, **kw) self.__in_send_multipart = False self.__in_recv_multipart = False self.__setup_events() def __del__(self): self.close() def close(self, linger=None): super(_Socket, self).close(linger) self.__cleanup_events() def __cleanup_events(self): # close the _state_event event, keeps the number of active file descriptors down if getattr(self, '_state_event', None): _stop(self._state_event) self._state_event = None # if the socket has entered a close state resume any waiting greenlets self.__writable.set() self.__readable.set() def __setup_events(self): self.__readable = AsyncResult() self.__writable = AsyncResult() self.__readable.set() self.__writable.set() try: self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher self._state_event.start(self.__state_changed) except AttributeError: # for gevent<1.0 compatibility from gevent.core import read_event self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True) def __state_changed(self, event=None, _evtype=None): if self.closed: self.__cleanup_events() return try: # avoid triggering __state_changed from inside __state_changed events = super(_Socket, self).getsockopt(zmq.EVENTS) except zmq.ZMQError as exc: self.__writable.set_exception(exc) self.__readable.set_exception(exc) else: if events & zmq.POLLOUT: self.__writable.set() if events & zmq.POLLIN: self.__readable.set() def _wait_write(self): assert self.__writable.ready(), "Only one greenlet can be waiting on this event" self.__writable = AsyncResult() # timeout is because libzmq cannot be trusted to properly signal a new send event: # this is effectively a maximum poll interval of 1s tic = time.time() dt = self._gevent_bug_timeout if dt: timeout = gevent.Timeout(seconds=dt) else: timeout = None try: if timeout: timeout.start() self.__writable.get(block=True) except gevent.Timeout as t: if t is not timeout: raise toc = time.time() # gevent bug: get can raise timeout even on clean return # don't display zmq bug warning for gevent bug (this is getting ridiculous) if self._debug_gevent and timeout and toc-tic > dt and \ self.getsockopt(zmq.EVENTS) & zmq.POLLOUT: print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr) finally: if timeout: timeout.cancel() self.__writable.set() def _wait_read(self): assert self.__readable.ready(), "Only one greenlet can be waiting on this event" self.__readable = AsyncResult() # timeout is because libzmq cannot always be trusted to play nice with libevent. # I can only confirm that this actually happens for send, but lets be symmetrical # with our dirty hacks. # this is effectively a maximum poll interval of 1s tic = time.time() dt = self._gevent_bug_timeout if dt: timeout = gevent.Timeout(seconds=dt) else: timeout = None try: if timeout: timeout.start() self.__readable.get(block=True) except gevent.Timeout as t: if t is not timeout: raise toc = time.time() # gevent bug: get can raise timeout even on clean return # don't display zmq bug warning for gevent bug (this is getting ridiculous) if self._debug_gevent and timeout and toc-tic > dt and \ self.getsockopt(zmq.EVENTS) & zmq.POLLIN: print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr) finally: if timeout: timeout.cancel() self.__readable.set() def send(self, data, flags=0, copy=True, track=False): """send, which will only block current greenlet state_changed always fires exactly once (success or fail) at the end of this method. """ # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised if flags & zmq.NOBLOCK: try: msg = super(_Socket, self).send(data, flags, copy, track) finally: if not self.__in_send_multipart: self.__state_changed() return msg # ensure the zmq.NOBLOCK flag is part of flags flags |= zmq.NOBLOCK while True: # Attempt to complete this operation indefinitely, blocking the current greenlet try: # attempt the actual call msg = super(_Socket, self).send(data, flags, copy, track) except zmq.ZMQError as e: # if the raised ZMQError is not EAGAIN, reraise if e.errno != zmq.EAGAIN: if not self.__in_send_multipart: self.__state_changed() raise else: if not self.__in_send_multipart: self.__state_changed() return msg # defer to the event loop until we're notified the socket is writable self._wait_write() def recv(self, flags=0, copy=True, track=False): """recv, which will only block current greenlet state_changed always fires exactly once (success or fail) at the end of this method. """ if flags & zmq.NOBLOCK: try: msg = super(_Socket, self).recv(flags, copy, track) finally: if not self.__in_recv_multipart: self.__state_changed() return msg flags |= zmq.NOBLOCK while True: try: msg = super(_Socket, self).recv(flags, copy, track) except zmq.ZMQError as e: if e.errno != zmq.EAGAIN: if not self.__in_recv_multipart: self.__state_changed() raise else: if not self.__in_recv_multipart: self.__state_changed() return msg self._wait_read() def send_multipart(self, *args, **kwargs): """wrap send_multipart to prevent state_changed on each partial send""" self.__in_send_multipart = True try: msg = super(_Socket, self).send_multipart(*args, **kwargs) finally: self.__in_send_multipart = False self.__state_changed() return msg def recv_multipart(self, *args, **kwargs): """wrap recv_multipart to prevent state_changed on each partial recv""" self.__in_recv_multipart = True try: msg = super(_Socket, self).recv_multipart(*args, **kwargs) finally: self.__in_recv_multipart = False self.__state_changed() return msg def get(self, opt): """trigger state_changed on getsockopt(EVENTS)""" if opt in TIMEOS: warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning) optval = super(_Socket, self).get(opt) if opt == zmq.EVENTS: self.__state_changed() return optval def set(self, opt, val): """set socket option""" if opt in TIMEOS: warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning) return super(_Socket, self).set(opt, val) class _Context(_original_Context): """Replacement for :class:`zmq.Context` Ensures that the greened Socket above is used in calls to `socket`. """ _socket_class = _Socket pyzmq-16.0.2/zmq/green/device.py000066400000000000000000000016661301503633700165120ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import zmq from zmq.green import Poller def device(device_type, isocket, osocket): """Start a zeromq device (gevent-compatible). Unlike the true zmq.device, this does not release the GIL. Parameters ---------- device_type : (QUEUE, FORWARDER, STREAMER) The type of device to start (ignored). isocket : Socket The Socket instance for the incoming traffic. osocket : Socket The Socket instance for the outbound traffic. """ p = Poller() if osocket == -1: osocket = isocket p.register(isocket, zmq.POLLIN) p.register(osocket, zmq.POLLIN) while True: events = dict(p.poll()) if isocket in events: osocket.send_multipart(isocket.recv_multipart()) if osocket in events: isocket.send_multipart(osocket.recv_multipart()) pyzmq-16.0.2/zmq/green/eventloop/000077500000000000000000000000001301503633700167035ustar00rootroot00000000000000pyzmq-16.0.2/zmq/green/eventloop/__init__.py000066400000000000000000000001031301503633700210060ustar00rootroot00000000000000from zmq.green.eventloop.ioloop import IOLoop __all__ = ['IOLoop']pyzmq-16.0.2/zmq/green/eventloop/ioloop.py000066400000000000000000000005631301503633700205620ustar00rootroot00000000000000from zmq.eventloop.ioloop import * from zmq.green import Poller RealIOLoop = IOLoop RealZMQPoller = ZMQPoller class ZMQPoller(RealZMQPoller): """gevent-compatible version of ioloop.ZMQPoller""" def __init__(self): self._poller = Poller() class IOLoop(RealIOLoop): """gevent-and-zmq-aware tornado IOLoop implementation""" _zmq_impl = ZMQPoller pyzmq-16.0.2/zmq/green/eventloop/zmqstream.py000066400000000000000000000004641301503633700213040ustar00rootroot00000000000000from zmq.eventloop.zmqstream import * from zmq.green.eventloop.ioloop import IOLoop RealZMQStream = ZMQStream class ZMQStream(RealZMQStream): def __init__(self, socket, io_loop=None): io_loop = io_loop or IOLoop.instance() super(ZMQStream, self).__init__(socket, io_loop=io_loop) pyzmq-16.0.2/zmq/green/poll.py000066400000000000000000000055661301503633700162240ustar00rootroot00000000000000import zmq import gevent from gevent import select from zmq import Poller as _original_Poller class _Poller(_original_Poller): """Replacement for :class:`zmq.Poller` Ensures that the greened Poller below is used in calls to :meth:`zmq.Poller.poll`. """ _gevent_bug_timeout = 1.33 # minimum poll interval, for working around gevent bug def _get_descriptors(self): """Returns three elements tuple with socket descriptors ready for gevent.select.select """ rlist = [] wlist = [] xlist = [] for socket, flags in self.sockets: if isinstance(socket, zmq.Socket): rlist.append(socket.getsockopt(zmq.FD)) continue elif isinstance(socket, int): fd = socket elif hasattr(socket, 'fileno'): try: fd = int(socket.fileno()) except: raise ValueError('fileno() must return an valid integer fd') else: raise TypeError('Socket must be a 0MQ socket, an integer fd ' 'or have a fileno() method: %r' % socket) if flags & zmq.POLLIN: rlist.append(fd) if flags & zmq.POLLOUT: wlist.append(fd) if flags & zmq.POLLERR: xlist.append(fd) return (rlist, wlist, xlist) def poll(self, timeout=-1): """Overridden method to ensure that the green version of Poller is used. Behaves the same as :meth:`zmq.core.Poller.poll` """ if timeout is None: timeout = -1 if timeout < 0: timeout = -1 rlist = None wlist = None xlist = None if timeout > 0: tout = gevent.Timeout.start_new(timeout/1000.0) else: tout = None try: # Loop until timeout or events available rlist, wlist, xlist = self._get_descriptors() while True: events = super(_Poller, self).poll(0) if events or timeout == 0: return events # wait for activity on sockets in a green way # set a minimum poll frequency, # because gevent < 1.0 cannot be trusted to catch edge-triggered FD events _bug_timeout = gevent.Timeout.start_new(self._gevent_bug_timeout) try: select.select(rlist, wlist, xlist) except gevent.Timeout as t: if t is not _bug_timeout: raise finally: _bug_timeout.cancel() except gevent.Timeout as t: if t is not tout: raise return [] finally: if timeout > 0: tout.cancel() pyzmq-16.0.2/zmq/log/000077500000000000000000000000001301503633700143515ustar00rootroot00000000000000pyzmq-16.0.2/zmq/log/__init__.py000066400000000000000000000000001301503633700164500ustar00rootroot00000000000000pyzmq-16.0.2/zmq/log/handlers.py000066400000000000000000000106421301503633700165260ustar00rootroot00000000000000"""pyzmq logging handlers. This mainly defines the PUBHandler object for publishing logging messages over a zmq.PUB socket. The PUBHandler can be used with the regular logging module, as in:: >>> import logging >>> handler = PUBHandler('tcp://127.0.0.1:12345') >>> handler.root_topic = 'foo' >>> logger = logging.getLogger('foobar') >>> logger.setLevel(logging.DEBUG) >>> logger.addHandler(handler) After this point, all messages logged by ``logger`` will be published on the PUB socket. Code adapted from StarCluster: http://github.com/jtriley/StarCluster/blob/master/starcluster/logger.py """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import logging from logging import INFO, DEBUG, WARN, ERROR, FATAL import zmq from zmq.utils.strtypes import bytes, unicode, cast_bytes TOPIC_DELIM="::" # delimiter for splitting topics on the receiving end. class PUBHandler(logging.Handler): """A basic logging handler that emits log messages through a PUB socket. Takes a PUB socket already bound to interfaces or an interface to bind to. Example:: sock = context.socket(zmq.PUB) sock.bind('inproc://log') handler = PUBHandler(sock) Or:: handler = PUBHandler('inproc://loc') These are equivalent. Log messages handled by this handler are broadcast with ZMQ topics ``this.root_topic`` comes first, followed by the log level (DEBUG,INFO,etc.), followed by any additional subtopics specified in the message by: log.debug("subtopic.subsub::the real message") """ root_topic="" socket = None formatters = { logging.DEBUG: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.INFO: logging.Formatter("%(message)s\n"), logging.WARN: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.ERROR: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"), logging.CRITICAL: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")} def __init__(self, interface_or_socket, context=None): logging.Handler.__init__(self) if isinstance(interface_or_socket, zmq.Socket): self.socket = interface_or_socket self.ctx = self.socket.context else: self.ctx = context or zmq.Context() self.socket = self.ctx.socket(zmq.PUB) self.socket.bind(interface_or_socket) def format(self,record): """Format a record.""" return self.formatters[record.levelno].format(record) def emit(self, record): """Emit a log message on my socket.""" try: topic, record.msg = record.msg.split(TOPIC_DELIM,1) except Exception: topic = "" try: bmsg = cast_bytes(self.format(record)) except Exception: self.handleError(record) return topic_list = [] if self.root_topic: topic_list.append(self.root_topic) topic_list.append(record.levelname) if topic: topic_list.append(topic) btopic = b'.'.join(cast_bytes(t) for t in topic_list) self.socket.send_multipart([btopic, bmsg]) class TopicLogger(logging.Logger): """A simple wrapper that takes an additional argument to log methods. All the regular methods exist, but instead of one msg argument, two arguments: topic, msg are passed. That is:: logger.debug('msg') Would become:: logger.debug('topic.sub', 'msg') """ def log(self, level, topic, msg, *args, **kwargs): """Log 'msg % args' with level and topic. To pass exception information, use the keyword argument exc_info with a True value:: logger.log(level, "zmq.fun", "We have a %s", "mysterious problem", exc_info=1) """ logging.Logger.log(self, level, '%s::%s'%(topic,msg), *args, **kwargs) # Generate the methods of TopicLogger, since they are just adding a # topic prefix to a message. for name in "debug warn warning error critical fatal".split(): meth = getattr(logging.Logger,name) setattr(TopicLogger, name, lambda self, level, topic, msg, *args, **kwargs: meth(self, level, topic+TOPIC_DELIM+msg,*args, **kwargs)) pyzmq-16.0.2/zmq/ssh/000077500000000000000000000000001301503633700143655ustar00rootroot00000000000000pyzmq-16.0.2/zmq/ssh/__init__.py000066400000000000000000000000351301503633700164740ustar00rootroot00000000000000from zmq.ssh.tunnel import * pyzmq-16.0.2/zmq/ssh/forward.py000066400000000000000000000067351301503633700164160ustar00rootroot00000000000000# # This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1. # Original Copyright (C) 2003-2007 Robey Pointer # Edits Copyright (C) 2010 The IPython Team # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA. """ Sample script showing how to do local port forwarding over paramiko. This script connects to the requested SSH server and sets up local port forwarding (the openssh -L option) from a local port through a tunneled connection to a destination reachable from the SSH server machine. """ from __future__ import print_function import logging import select try: # Python 3 import socketserver except ImportError: # Python 2 import SocketServer as socketserver logger = logging.getLogger('ssh') class ForwardServer (socketserver.ThreadingTCPServer): daemon_threads = True allow_reuse_address = True class Handler (socketserver.BaseRequestHandler): def handle(self): try: chan = self.ssh_transport.open_channel('direct-tcpip', (self.chain_host, self.chain_port), self.request.getpeername()) except Exception as e: logger.debug('Incoming request to %s:%d failed: %s' % (self.chain_host, self.chain_port, repr(e))) return if chan is None: logger.debug('Incoming request to %s:%d was rejected by the SSH server.' % (self.chain_host, self.chain_port)) return logger.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(), chan.getpeername(), (self.chain_host, self.chain_port))) while True: r, w, x = select.select([self.request, chan], [], []) if self.request in r: data = self.request.recv(1024) if len(data) == 0: break chan.send(data) if chan in r: data = chan.recv(1024) if len(data) == 0: break self.request.send(data) chan.close() self.request.close() logger.debug('Tunnel closed ') def forward_tunnel(local_port, remote_host, remote_port, transport): # this is a little convoluted, but lets me configure things for the Handler # object. (SocketServer doesn't give Handlers any way to access the outer # server normally.) class SubHander (Handler): chain_host = remote_host chain_port = remote_port ssh_transport = transport ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever() __all__ = ['forward_tunnel'] pyzmq-16.0.2/zmq/ssh/tunnel.py000066400000000000000000000306711301503633700162530ustar00rootroot00000000000000"""Basic ssh tunnel utilities, and convenience functions for tunneling zeromq connections. """ # Copyright (C) 2010-2011 IPython Development Team # Copyright (C) 2011- PyZMQ Developers # # Redistributed from IPython under the terms of the BSD License. from __future__ import print_function import atexit import os import re import signal import socket import sys import warnings from getpass import getpass, getuser from multiprocessing import Process try: with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) import paramiko SSHException = paramiko.ssh_exception.SSHException except ImportError: paramiko = None class SSHException(Exception): pass else: from .forward import forward_tunnel try: import pexpect except ImportError: pexpect = None from ..utils.strtypes import b def select_random_ports(n): """Select and return n random ports that are available.""" ports = [] sockets = [] for i in range(n): sock = socket.socket() sock.bind(('', 0)) ports.append(sock.getsockname()[1]) sockets.append(sock) for sock in sockets: sock.close() return ports #----------------------------------------------------------------------------- # Check for passwordless login #----------------------------------------------------------------------------- _password_pat = re.compile(b(r'pass(word|phrase):'), re.IGNORECASE) def try_passwordless_ssh(server, keyfile, paramiko=None): """Attempt to make an ssh connection without a password. This is mainly used for requiring password input only once when many tunnels may be connected to the same server. If paramiko is None, the default for the platform is chosen. """ if paramiko is None: paramiko = sys.platform == 'win32' if not paramiko: f = _try_passwordless_openssh else: f = _try_passwordless_paramiko return f(server, keyfile) def _try_passwordless_openssh(server, keyfile): """Try passwordless login with shell ssh command.""" if pexpect is None: raise ImportError("pexpect unavailable, use paramiko") cmd = 'ssh -f '+ server if keyfile: cmd += ' -i ' + keyfile cmd += ' exit' # pop SSH_ASKPASS from env env = os.environ.copy() env.pop('SSH_ASKPASS', None) ssh_newkey = 'Are you sure you want to continue connecting' p = pexpect.spawn(cmd, env=env) while True: try: i = p.expect([ssh_newkey, _password_pat], timeout=.1) if i==0: raise SSHException('The authenticity of the host can\'t be established.') except pexpect.TIMEOUT: continue except pexpect.EOF: return True else: return False def _try_passwordless_paramiko(server, keyfile): """Try passwordless login with paramiko.""" if paramiko is None: msg = "Paramiko unavaliable, " if sys.platform == 'win32': msg += "Paramiko is required for ssh tunneled connections on Windows." else: msg += "use OpenSSH." raise ImportError(msg) username, server, port = _split_server(server) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True) except paramiko.AuthenticationException: return False else: client.close() return True def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60): """Connect a socket to an address via an ssh tunnel. This is a wrapper for socket.connect(addr), when addr is not accessible from the local machine. It simply creates an ssh tunnel using the remaining args, and calls socket.connect('tcp://localhost:lport') where lport is the randomly selected local port of the tunnel. """ new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout) socket.connect(new_url) return tunnel def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60): """Open a tunneled connection from a 0MQ url. For use inside tunnel_connection. Returns ------- (url, tunnel) : (str, object) The 0MQ url that has been forwarded, and the tunnel object """ lport = select_random_ports(1)[0] transport, addr = addr.split('://') ip,rport = addr.split(':') rport = int(rport) if paramiko is None: paramiko = sys.platform == 'win32' if paramiko: tunnelf = paramiko_tunnel else: tunnelf = openssh_tunnel tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout) return 'tcp://127.0.0.1:%i'%lport, tunnel def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): """Create an ssh tunnel using command-line ssh that connects port lport on this machine to localhost:rport on server. The tunnel will automatically close when not in use, remaining open for a minimum of timeout seconds for an initial connection. This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, as seen from `server`. keyfile and password may be specified, but ssh config is checked for defaults. Parameters ---------- lport : int local port for connecting to the tunnel from this machine. rport : int port on the remote machine to connect to. server : str The ssh server to connect to. The full ssh server string will be parsed. user@server:port remoteip : str [Default: 127.0.0.1] The remote ip, specifying the destination of the tunnel. Default is localhost, which means that the tunnel would redirect localhost:lport on this machine to localhost:rport on the *server*. keyfile : str; path to public key file This specifies a key to be used in ssh login, default None. Regular default ssh keys will be used without specifying this argument. password : str; Your ssh password to the ssh server. Note that if this is left None, you will be prompted for it if passwordless key based login is unavailable. timeout : int [default: 60] The time (in seconds) after which no activity will result in the tunnel closing. This prevents orphaned tunnels from running forever. """ if pexpect is None: raise ImportError("pexpect unavailable, use paramiko_tunnel") ssh="ssh " if keyfile: ssh += "-i " + keyfile if ':' in server: server, port = server.split(':') ssh += " -p %s" % port cmd = "%s -O check %s" % (ssh, server) (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) if not exitstatus: pid = int(output[output.find(b"(pid=")+5:output.find(b")")]) cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % ( ssh, lport, remoteip, rport, server) (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) if not exitstatus: atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1)) return pid cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % ( ssh, lport, remoteip, rport, server, timeout) # pop SSH_ASKPASS from env env = os.environ.copy() env.pop('SSH_ASKPASS', None) ssh_newkey = 'Are you sure you want to continue connecting' tunnel = pexpect.spawn(cmd, env=env) failed = False while True: try: i = tunnel.expect([ssh_newkey, _password_pat], timeout=.1) if i==0: raise SSHException('The authenticity of the host can\'t be established.') except pexpect.TIMEOUT: continue except pexpect.EOF: if tunnel.exitstatus: print(tunnel.exitstatus) print(tunnel.before) print(tunnel.after) raise RuntimeError("tunnel '%s' failed to start"%(cmd)) else: return tunnel.pid else: if failed: print("Password rejected, try again") password=None if password is None: password = getpass("%s's password: "%(server)) tunnel.sendline(password) failed = True def _stop_tunnel(cmd): pexpect.run(cmd) def _split_server(server): if '@' in server: username,server = server.split('@', 1) else: username = getuser() if ':' in server: server, port = server.split(':') port = int(port) else: port = 22 return username, server, port def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): """launch a tunner with paramiko in a subprocess. This should only be used when shell ssh is unavailable (e.g. Windows). This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, as seen from `server`. If you are familiar with ssh tunnels, this creates the tunnel: ssh server -L localhost:lport:remoteip:rport keyfile and password may be specified, but ssh config is checked for defaults. Parameters ---------- lport : int local port for connecting to the tunnel from this machine. rport : int port on the remote machine to connect to. server : str The ssh server to connect to. The full ssh server string will be parsed. user@server:port remoteip : str [Default: 127.0.0.1] The remote ip, specifying the destination of the tunnel. Default is localhost, which means that the tunnel would redirect localhost:lport on this machine to localhost:rport on the *server*. keyfile : str; path to public key file This specifies a key to be used in ssh login, default None. Regular default ssh keys will be used without specifying this argument. password : str; Your ssh password to the ssh server. Note that if this is left None, you will be prompted for it if passwordless key based login is unavailable. timeout : int [default: 60] The time (in seconds) after which no activity will result in the tunnel closing. This prevents orphaned tunnels from running forever. """ if paramiko is None: raise ImportError("Paramiko not available") if password is None: if not _try_passwordless_paramiko(server, keyfile): password = getpass("%s's password: "%(server)) p = Process(target=_paramiko_tunnel, args=(lport, rport, server, remoteip), kwargs=dict(keyfile=keyfile, password=password)) p.daemon = True p.start() return p def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None): """Function for actually starting a paramiko tunnel, to be passed to multiprocessing.Process(target=this), and not called directly. """ username, server, port = _split_server(server) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True, password=password) # except paramiko.AuthenticationException: # if password is None: # password = getpass("%s@%s's password: "%(username, server)) # client.connect(server, port, username=username, password=password) # else: # raise except Exception as e: print('*** Failed to connect to %s:%d: %r' % (server, port, e)) sys.exit(1) # Don't let SIGINT kill the tunnel subprocess signal.signal(signal.SIGINT, signal.SIG_IGN) try: forward_tunnel(lport, remoteip, rport, client.get_transport()) except KeyboardInterrupt: print('SIGINT: Port forwarding stopped cleanly') sys.exit(0) except Exception as e: print("Port forwarding stopped uncleanly: %s"%e) sys.exit(255) if sys.platform == 'win32': ssh_tunnel = paramiko_tunnel else: ssh_tunnel = openssh_tunnel __all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh'] pyzmq-16.0.2/zmq/sugar/000077500000000000000000000000001301503633700147115ustar00rootroot00000000000000pyzmq-16.0.2/zmq/sugar/__init__.py000066400000000000000000000013421301503633700170220ustar00rootroot00000000000000"""pure-Python sugar wrappers for core 0MQ objects.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from zmq.sugar import ( constants, context, frame, poll, socket, tracker, version ) from zmq import error __all__ = ['constants'] for submod in ( constants, context, error, frame, poll, socket, tracker, version ): __all__.extend(submod.__all__) from zmq.error import * from zmq.sugar.context import * from zmq.sugar.tracker import * from zmq.sugar.socket import * from zmq.sugar.constants import * from zmq.sugar.frame import * from zmq.sugar.poll import * from zmq.sugar.version import * # deprecated: from zmq.sugar.stopwatch import Stopwatch __all__.append('Stopwatch') pyzmq-16.0.2/zmq/sugar/attrsettr.py000066400000000000000000000031311301503633700173150ustar00rootroot00000000000000# coding: utf-8 """Mixin for mapping set/getattr to self.set/get""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from . import constants class AttributeSetter(object): def __setattr__(self, key, value): """set zmq options by attribute""" # regular setattr only allowed for class-defined attributes for obj in [self] + self.__class__.mro(): if key in obj.__dict__: object.__setattr__(self, key, value) return upper_key = key.upper() try: opt = getattr(constants, upper_key) except AttributeError: raise AttributeError("%s has no such option: %s" % ( self.__class__.__name__, upper_key) ) else: self._set_attr_opt(upper_key, opt, value) def _set_attr_opt(self, name, opt, value): """override if setattr should do something other than call self.set""" self.set(opt, value) def __getattr__(self, key): """get zmq options by attribute""" upper_key = key.upper() try: opt = getattr(constants, upper_key) except AttributeError: raise AttributeError("%s has no such option: %s" % ( self.__class__.__name__, upper_key) ) else: return self._get_attr_opt(upper_key, opt) def _get_attr_opt(self, name, opt): """override if getattr should do something other than call self.get""" return self.get(opt) __all__ = ['AttributeSetter'] pyzmq-16.0.2/zmq/sugar/constants.py000066400000000000000000000044631301503633700173060ustar00rootroot00000000000000"""0MQ Constants.""" # Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. from zmq.backend import constants from zmq.utils.constant_names import ( base_names, switched_sockopt_names, int_sockopt_names, int64_sockopt_names, bytes_sockopt_names, fd_sockopt_names, ctx_opt_names, msg_opt_names, ) #----------------------------------------------------------------------------- # Python module level constants #----------------------------------------------------------------------------- __all__ = [ 'int_sockopts', 'int64_sockopts', 'bytes_sockopts', 'ctx_opts', 'ctx_opt_names', 'DRAFT_API', ] DRAFT_API = constants.DRAFT_API int_sockopts = set() int64_sockopts = set() bytes_sockopts = set() fd_sockopts = set() ctx_opts = set() msg_opts = set() if constants.VERSION < 30000: int64_sockopt_names.extend(switched_sockopt_names) else: int_sockopt_names.extend(switched_sockopt_names) _UNDEFINED = -9999 def _add_constant(name, container=None): """add a constant to be defined optionally add it to one of the sets for use in get/setopt checkers """ c = getattr(constants, name, _UNDEFINED) if c == _UNDEFINED: return globals()[name] = c __all__.append(name) if container is not None: container.add(c) return c for name in base_names: _add_constant(name) for name in int_sockopt_names: _add_constant(name, int_sockopts) for name in int64_sockopt_names: _add_constant(name, int64_sockopts) for name in bytes_sockopt_names: _add_constant(name, bytes_sockopts) for name in fd_sockopt_names: _add_constant(name, fd_sockopts) for name in ctx_opt_names: _add_constant(name, ctx_opts) for name in msg_opt_names: _add_constant(name, msg_opts) # ensure some aliases are always defined aliases = [ ('DONTWAIT', 'NOBLOCK'), ('XREQ', 'DEALER'), ('XREP', 'ROUTER'), ] for group in aliases: undefined = set() found = None for name in group: value = getattr(constants, name, -1) if value != -1: found = value else: undefined.add(name) if found is not None: for name in undefined: globals()[name] = found __all__.append(name) pyzmq-16.0.2/zmq/sugar/context.py000066400000000000000000000141751301503633700167570ustar00rootroot00000000000000# coding: utf-8 """Python bindings for 0MQ.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import atexit from threading import Lock from zmq.backend import Context as ContextBase from . import constants from .attrsettr import AttributeSetter from .constants import ENOTSUP, ctx_opt_names from .socket import Socket from zmq.error import ZMQError # notice when exiting, to avoid triggering term on exit _exiting = False def _notice_atexit(): global _exiting _exiting = True atexit.register(_notice_atexit) class Context(ContextBase, AttributeSetter): """Create a zmq Context A zmq Context creates sockets via its ``ctx.socket`` method. """ sockopts = None _instance = None _instance_lock = Lock() _shadow = False def __init__(self, io_threads=1, **kwargs): super(Context, self).__init__(io_threads=io_threads, **kwargs) if kwargs.get('shadow', False): self._shadow = True else: self._shadow = False self.sockopts = {} def __del__(self): """deleting a Context should terminate it, without trying non-threadsafe destroy""" if not self._shadow and not _exiting: self.term() def __enter__(self): return self def __exit__(self, *args, **kwargs): self.term() def __copy__(self, memo=None): """Copying a Context creates a shadow copy""" return self.__class__.shadow(self.underlying) __deepcopy__ = __copy__ @classmethod def shadow(cls, address): """Shadow an existing libzmq context address is the integer address of the libzmq context or an FFI pointer to it. .. versionadded:: 14.1 """ from zmq.utils.interop import cast_int_addr address = cast_int_addr(address) return cls(shadow=address) @classmethod def shadow_pyczmq(cls, ctx): """Shadow an existing pyczmq context ctx is the FFI `zctx_t *` pointer .. versionadded:: 14.1 """ from pyczmq import zctx from zmq.utils.interop import cast_int_addr underlying = zctx.underlying(ctx) address = cast_int_addr(underlying) return cls(shadow=address) # static method copied from tornado IOLoop.instance @classmethod def instance(cls, io_threads=1): """Returns a global Context instance. Most single-threaded applications have a single, global Context. Use this method instead of passing around Context instances throughout your code. A common pattern for classes that depend on Contexts is to use a default argument to enable programs with multiple Contexts but not require the argument for simpler applications: class MyClass(object): def __init__(self, context=None): self.context = context or Context.instance() """ if cls._instance is None or cls._instance.closed: with cls._instance_lock: if cls._instance is None or cls._instance.closed: cls._instance = cls(io_threads=io_threads) return cls._instance #------------------------------------------------------------------------- # Hooks for ctxopt completion #------------------------------------------------------------------------- def __dir__(self): keys = dir(self.__class__) for collection in ( ctx_opt_names, ): keys.extend(collection) return keys #------------------------------------------------------------------------- # Creating Sockets #------------------------------------------------------------------------- @property def _socket_class(self): return Socket def socket(self, socket_type, **kwargs): """Create a Socket associated with this Context. Parameters ---------- socket_type : int The socket type, which can be any of the 0MQ socket types: REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc. kwargs: will be passed to the __init__ method of the socket class. """ if self.closed: raise ZMQError(ENOTSUP) s = self._socket_class(self, socket_type, **kwargs) for opt, value in self.sockopts.items(): try: s.setsockopt(opt, value) except ZMQError: # ignore ZMQErrors, which are likely for socket options # that do not apply to a particular socket type, e.g. # SUBSCRIBE for non-SUB sockets. pass return s def setsockopt(self, opt, value): """set default socket options for new sockets created by this Context .. versionadded:: 13.0 """ self.sockopts[opt] = value def getsockopt(self, opt): """get default socket options for new sockets created by this Context .. versionadded:: 13.0 """ return self.sockopts[opt] def _set_attr_opt(self, name, opt, value): """set default sockopts as attributes""" if name in constants.ctx_opt_names: return self.set(opt, value) else: self.sockopts[opt] = value def _get_attr_opt(self, name, opt): """get default sockopts as attributes""" if name in constants.ctx_opt_names: return self.get(opt) else: if opt not in self.sockopts: raise AttributeError(name) else: return self.sockopts[opt] def __delattr__(self, key): """delete default sockopts as attributes""" key = key.upper() try: opt = getattr(constants, key) except AttributeError: raise AttributeError("no such socket option: %s" % key) else: if opt not in self.sockopts: raise AttributeError(key) else: del self.sockopts[opt] __all__ = ['Context'] pyzmq-16.0.2/zmq/sugar/frame.py000066400000000000000000000007141301503633700163570ustar00rootroot00000000000000# coding: utf-8 """0MQ Frame pure Python methods.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from .attrsettr import AttributeSetter from zmq.backend import Frame as FrameBase class Frame(FrameBase, AttributeSetter): def __getitem__(self, key): # map Frame['User-Id'] to Frame.get('User-Id') return self.get(key) # keep deprecated alias Message = Frame __all__ = ['Frame', 'Message']pyzmq-16.0.2/zmq/sugar/poll.py000066400000000000000000000123141301503633700162320ustar00rootroot00000000000000"""0MQ polling related functions and classes.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import zmq from zmq.backend import zmq_poll from .constants import POLLIN, POLLOUT, POLLERR #----------------------------------------------------------------------------- # Polling related methods #----------------------------------------------------------------------------- class Poller(object): """A stateful poll interface that mirrors Python's built-in poll.""" sockets = None _map = {} def __init__(self): self.sockets = [] self._map = {} def __contains__(self, socket): return socket in self._map def register(self, socket, flags=POLLIN|POLLOUT): """p.register(socket, flags=POLLIN|POLLOUT) Register a 0MQ socket or native fd for I/O monitoring. register(s,0) is equivalent to unregister(s). Parameters ---------- socket : zmq.Socket or native socket A zmq.Socket or any Python object having a ``fileno()`` method that returns a valid file descriptor. flags : int The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT. If `flags=0`, socket will be unregistered. """ if flags: if socket in self._map: idx = self._map[socket] self.sockets[idx] = (socket, flags) else: idx = len(self.sockets) self.sockets.append((socket, flags)) self._map[socket] = idx elif socket in self._map: # uregister sockets registered with no events self.unregister(socket) else: # ignore new sockets with no events pass def modify(self, socket, flags=POLLIN|POLLOUT): """Modify the flags for an already registered 0MQ socket or native fd.""" self.register(socket, flags) def unregister(self, socket): """Remove a 0MQ socket or native fd for I/O monitoring. Parameters ---------- socket : Socket The socket instance to stop polling. """ idx = self._map.pop(socket) self.sockets.pop(idx) # shift indices after deletion for socket, flags in self.sockets[idx:]: self._map[socket] -= 1 def poll(self, timeout=None): """Poll the registered 0MQ or native fds for I/O. Parameters ---------- timeout : float, int The timeout in milliseconds. If None, no `timeout` (infinite). This is in milliseconds to be compatible with ``select.poll()``. Returns ------- events : list of tuples The list of events that are ready to be processed. This is a list of tuples of the form ``(socket, event)``, where the 0MQ Socket or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second. It is common to call ``events = dict(poller.poll())``, which turns the list of tuples into a mapping of ``socket : event``. """ if timeout is None or timeout < 0: timeout = -1 elif isinstance(timeout, float): timeout = int(timeout) return zmq_poll(self.sockets, timeout=timeout) def select(rlist, wlist, xlist, timeout=None): """select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist) Return the result of poll as a lists of sockets ready for r/w/exception. This has the same interface as Python's built-in ``select.select()`` function. Parameters ---------- timeout : float, int, optional The timeout in seconds. If None, no timeout (infinite). This is in seconds to be compatible with ``select.select()``. rlist : list of sockets/FDs sockets/FDs to be polled for read events wlist : list of sockets/FDs sockets/FDs to be polled for write events xlist : list of sockets/FDs sockets/FDs to be polled for error events Returns ------- (rlist, wlist, xlist) : tuple of lists of sockets (length 3) Lists correspond to sockets available for read/write/error events respectively. """ if timeout is None: timeout = -1 # Convert from sec -> us for zmq_poll. # zmq_poll accepts 3.x style timeout in ms timeout = int(timeout*1000.0) if timeout < 0: timeout = -1 sockets = [] for s in set(rlist + wlist + xlist): flags = 0 if s in rlist: flags |= POLLIN if s in wlist: flags |= POLLOUT if s in xlist: flags |= POLLERR sockets.append((s, flags)) return_sockets = zmq_poll(sockets, timeout) rlist, wlist, xlist = [], [], [] for s, flags in return_sockets: if flags & POLLIN: rlist.append(s) if flags & POLLOUT: wlist.append(s) if flags & POLLERR: xlist.append(s) return rlist, wlist, xlist #----------------------------------------------------------------------------- # Symbols to export #----------------------------------------------------------------------------- __all__ = [ 'Poller', 'select' ] pyzmq-16.0.2/zmq/sugar/socket.py000066400000000000000000000464131301503633700165630ustar00rootroot00000000000000# coding: utf-8 """0MQ Socket pure Python methods.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import errno import random import sys import warnings import zmq from zmq.backend import Socket as SocketBase from .poll import Poller from . import constants from .attrsettr import AttributeSetter from zmq.error import ZMQError, ZMQBindError from zmq.utils.strtypes import bytes, unicode, basestring from .constants import ( SNDMORE, ENOTSUP, POLLIN, int64_sockopt_names, int_sockopt_names, bytes_sockopt_names, fd_sockopt_names, ) try: import cPickle pickle = cPickle except: cPickle = None import pickle try: DEFAULT_PROTOCOL = pickle.DEFAULT_PROTOCOL except AttributeError: DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL try: _buffer_type = memoryview except NameError: _buffer_type = buffer class Socket(SocketBase, AttributeSetter): """The ZMQ socket object To create a Socket, first create a Context:: ctx = zmq.Context.instance() then call ``ctx.socket(socket_type)``:: s = ctx.socket(zmq.ROUTER) """ _shadow = False _monitor_socket = None def __init__(self, *a, **kw): super(Socket, self).__init__(*a, **kw) if 'shadow' in kw: self._shadow = True else: self._shadow = False def __del__(self): if not self._shadow: self.close() # socket as context manager: def __enter__(self): """Sockets are context managers .. versionadded:: 14.4 """ return self def __exit__(self, *args, **kwargs): self.close() #------------------------------------------------------------------------- # Socket creation #------------------------------------------------------------------------- def __copy__(self, memo=None): """Copying a Socket creates a shadow copy""" return self.__class__.shadow(self.underlying) __deepcopy__ = __copy__ @classmethod def shadow(cls, address): """Shadow an existing libzmq socket address is the integer address of the libzmq socket or an FFI pointer to it. .. versionadded:: 14.1 """ from zmq.utils.interop import cast_int_addr address = cast_int_addr(address) return cls(shadow=address) #------------------------------------------------------------------------- # Deprecated aliases #------------------------------------------------------------------------- @property def socket_type(self): warnings.warn("Socket.socket_type is deprecated, use Socket.type", DeprecationWarning ) return self.type #------------------------------------------------------------------------- # Hooks for sockopt completion #------------------------------------------------------------------------- def __dir__(self): keys = dir(self.__class__) for collection in ( bytes_sockopt_names, int_sockopt_names, int64_sockopt_names, fd_sockopt_names, ): keys.extend(collection) return keys #------------------------------------------------------------------------- # Getting/Setting options #------------------------------------------------------------------------- setsockopt = SocketBase.set getsockopt = SocketBase.get def __setattr__(self, key, value): """override to allow setting zmq.[UN]SUBSCRIBE even though we have a subscribe method""" _key = key.lower() if _key in ('subscribe', 'unsubscribe'): if isinstance(value, unicode): value = value.encode('utf8') if _key == 'subscribe': self.set(zmq.SUBSCRIBE, value) else: self.set(zmq.UNSUBSCRIBE, value) return super(Socket, self).__setattr__(key, value) def subscribe(self, topic): """Subscribe to a topic Only for SUB sockets. .. versionadded:: 15.3 """ if isinstance(topic, unicode): topic = topic.encode('utf8') self.set(zmq.SUBSCRIBE, topic) def unsubscribe(self, topic): """Unsubscribe from a topic Only for SUB sockets. .. versionadded:: 15.3 """ if isinstance(topic, unicode): topic = topic.encode('utf8') self.set(zmq.UNSUBSCRIBE, topic) def set_string(self, option, optval, encoding='utf-8'): """set socket options with a unicode object This is simply a wrapper for setsockopt to protect from encoding ambiguity. See the 0MQ documentation for details on specific options. Parameters ---------- option : int The name of the option to set. Can be any of: SUBSCRIBE, UNSUBSCRIBE, IDENTITY optval : unicode string (unicode on py2, str on py3) The value of the option to set. encoding : str The encoding to be used, default is utf8 """ if not isinstance(optval, unicode): raise TypeError("unicode strings only") return self.set(option, optval.encode(encoding)) setsockopt_unicode = setsockopt_string = set_string def get_string(self, option, encoding='utf-8'): """get the value of a socket option See the 0MQ documentation for details on specific options. Parameters ---------- option : int The option to retrieve. Returns ------- optval : unicode string (unicode on py2, str on py3) The value of the option as a unicode string. """ if option not in constants.bytes_sockopts: raise TypeError("option %i will not return a string to be decoded"%option) return self.getsockopt(option).decode(encoding) getsockopt_unicode = getsockopt_string = get_string def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100): """bind this socket to a random port in a range If the port range is unspecified, the system will choose the port. Parameters ---------- addr : str The address string without the port to pass to ``Socket.bind()``. min_port : int, optional The minimum port in the range of ports to try (inclusive). max_port : int, optional The maximum port in the range of ports to try (exclusive). max_tries : int, optional The maximum number of bind attempts to make. Returns ------- port : int The port the socket was bound to. Raises ------ ZMQBindError if `max_tries` reached before successful bind """ if hasattr(constants, 'LAST_ENDPOINT') and min_port == 49152 and max_port == 65536: # if LAST_ENDPOINT is supported, and min_port / max_port weren't specified, # we can bind to port 0 and let the OS do the work self.bind("%s:*" % addr) url = self.last_endpoint.decode('ascii', 'replace') _, port_s = url.rsplit(':', 1) return int(port_s) for i in range(max_tries): try: port = random.randrange(min_port, max_port) self.bind('%s:%s' % (addr, port)) except ZMQError as exception: en = exception.errno if en == zmq.EADDRINUSE: continue elif sys.platform == 'win32' and en == errno.EACCES: continue else: raise else: return port raise ZMQBindError("Could not bind socket to random port.") def get_hwm(self): """get the High Water Mark On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM """ major = zmq.zmq_version_info()[0] if major >= 3: # return sndhwm, fallback on rcvhwm try: return self.getsockopt(zmq.SNDHWM) except zmq.ZMQError: pass return self.getsockopt(zmq.RCVHWM) else: return self.getsockopt(zmq.HWM) def set_hwm(self, value): """set the High Water Mark On libzmq ≥ 3, this sets both SNDHWM and RCVHWM .. warning:: New values only take effect for subsequent socket bind/connects. """ major = zmq.zmq_version_info()[0] if major >= 3: raised = None try: self.sndhwm = value except Exception as e: raised = e try: self.rcvhwm = value except Exception as e: raised = e if raised: raise raised else: return self.setsockopt(zmq.HWM, value) hwm = property(get_hwm, set_hwm, """property for High Water Mark Setting hwm sets both SNDHWM and RCVHWM as appropriate. It gets SNDHWM if available, otherwise RCVHWM. """ ) #------------------------------------------------------------------------- # Sending and receiving messages #------------------------------------------------------------------------- def send_multipart(self, msg_parts, flags=0, copy=True, track=False): """send a sequence of buffers as a multipart message The zmq.SNDMORE flag is added to all msg parts before the last. Parameters ---------- msg_parts : iterable A sequence of objects to send as a multipart message. Each element can be any sendable object (Frame, bytes, buffer-providers) flags : int, optional SNDMORE is handled automatically for frames before the last. copy : bool, optional Should the frame(s) be sent in a copying or non-copying manner. track : bool, optional Should the frame(s) be tracked for notification that ZMQ has finished with it (ignored if copy=True). Returns ------- None : if copy or not track MessageTracker : if track and not copy a MessageTracker object, whose `pending` property will be True until the last send is completed. """ # typecheck parts before sending: for i,msg in enumerate(msg_parts): if isinstance(msg, (zmq.Frame, bytes, _buffer_type)): continue try: _buffer_type(msg) except Exception: rmsg = repr(msg) if len(rmsg) > 32: rmsg = rmsg[:32] + '...' raise TypeError( "Frame %i (%s) does not support the buffer interface." % ( i, rmsg, )) for msg in msg_parts[:-1]: self.send(msg, SNDMORE|flags, copy=copy, track=track) # Send the last part without the extra SNDMORE flag. return self.send(msg_parts[-1], flags, copy=copy, track=track) def recv_multipart(self, flags=0, copy=True, track=False): """receive a multipart message as a list of bytes or Frame objects Parameters ---------- flags : int, optional Any supported flag: NOBLOCK. If NOBLOCK is set, this method will raise a ZMQError with EAGAIN if a message is not ready. If NOBLOCK is not set, then this method will block until a message arrives. copy : bool, optional Should the message frame(s) be received in a copying or non-copying manner? If False a Frame object is returned for each part, if True a copy of the bytes is made for each frame. track : bool, optional Should the message frame(s) be tracked for notification that ZMQ has finished with it? (ignored if copy=True) Returns ------- msg_parts : list A list of frames in the multipart message; either Frames or bytes, depending on `copy`. """ parts = [self.recv(flags, copy=copy, track=track)] # have first part already, only loop while more to receive while self.getsockopt(zmq.RCVMORE): part = self.recv(flags, copy=copy, track=track) parts.append(part) return parts def _deserialize(self, recvd, load): """Deserialize a received message Override in subclass (e.g. Futures) if recvd is not the raw bytes. The default implementation expects bytes and returns the deserialized message immediately. Parameters ---------- load: callable Callable that deserializes bytes recvd: The object returned by self.recv """ return load(recvd) def send_string(self, u, flags=0, copy=True, encoding='utf-8'): """send a Python unicode string as a message with an encoding 0MQ communicates with raw bytes, so you must encode/decode text (unicode on py2, str on py3) around 0MQ. Parameters ---------- u : Python unicode string (unicode on py2, str on py3) The unicode string to send. flags : int, optional Any valid send flag. encoding : str [default: 'utf-8'] The encoding to be used """ if not isinstance(u, basestring): raise TypeError("unicode/str objects only") return self.send(u.encode(encoding), flags=flags, copy=copy) send_unicode = send_string def recv_string(self, flags=0, encoding='utf-8'): """receive a unicode string, as sent by send_string Parameters ---------- flags : int Any valid recv flag. encoding : str [default: 'utf-8'] The encoding to be used Returns ------- s : unicode string (unicode on py2, str on py3) The Python unicode string that arrives as encoded bytes. """ msg = self.recv(flags=flags) return self._deserialize(msg, lambda buf: buf.decode(encoding)) recv_unicode = recv_string def send_pyobj(self, obj, flags=0, protocol=DEFAULT_PROTOCOL): """send a Python object as a message using pickle to serialize Parameters ---------- obj : Python object The Python object to send. flags : int Any valid send flag. protocol : int The pickle protocol number to use. The default is pickle.DEFAULT_PROTOCOL where defined, and pickle.HIGHEST_PROTOCOL elsewhere. """ msg = pickle.dumps(obj, protocol) return self.send(msg, flags) def recv_pyobj(self, flags=0): """receive a Python object as a message using pickle to serialize Parameters ---------- flags : int Any valid recv flag. Returns ------- obj : Python object The Python object that arrives as a message. """ msg = self.recv(flags) return self._deserialize(msg, pickle.loads) def send_json(self, obj, flags=0, **kwargs): """send a Python object as a message using json to serialize Keyword arguments are passed on to json.dumps Parameters ---------- obj : Python object The Python object to send flags : int Any valid send flag """ from zmq.utils import jsonapi msg = jsonapi.dumps(obj, **kwargs) return self.send(msg, flags) def recv_json(self, flags=0, **kwargs): """receive a Python object as a message using json to serialize Keyword arguments are passed on to json.loads Parameters ---------- flags : int Any valid recv flag. Returns ------- obj : Python object The Python object that arrives as a message. """ from zmq.utils import jsonapi msg = self.recv(flags) return self._deserialize(msg, lambda buf: jsonapi.loads(buf, **kwargs)) _poller_class = Poller def poll(self, timeout=None, flags=POLLIN): """poll the socket for events The default is to poll forever for incoming events. Timeout is in milliseconds, if specified. Parameters ---------- timeout : int [default: None] The timeout (in milliseconds) to wait for an event. If unspecified (or specified None), will wait forever for an event. flags : bitfield (int) [default: POLLIN] The event flags to poll for (any combination of POLLIN|POLLOUT). The default is to check for incoming events (POLLIN). Returns ------- events : bitfield (int) The events that are ready and waiting. Will be 0 if no events were ready by the time timeout was reached. """ if self.closed: raise ZMQError(ENOTSUP) p = self._poller_class() p.register(self, flags) evts = dict(p.poll(timeout)) # return 0 if no events, otherwise return event bitfield return evts.get(self, 0) def get_monitor_socket(self, events=None, addr=None): """Return a connected PAIR socket ready to receive the event notifications. .. versionadded:: libzmq-4.0 .. versionadded:: 14.0 Parameters ---------- events : bitfield (int) [default: ZMQ_EVENTS_ALL] The bitmask defining which events are wanted. addr : string [default: None] The optional endpoint for the monitoring sockets. Returns ------- socket : (PAIR) The socket is already connected and ready to receive messages. """ # safe-guard, method only available on libzmq >= 4 if zmq.zmq_version_info() < (4,): raise NotImplementedError("get_monitor_socket requires libzmq >= 4, have %s" % zmq.zmq_version()) # if already monitoring, return existing socket if self._monitor_socket: if self._monitor_socket.closed: self._monitor_socket = None else: return self._monitor_socket if addr is None: # create endpoint name from internal fd addr = "inproc://monitor.s-%d" % self.FD if events is None: # use all events events = zmq.EVENT_ALL # attach monitoring socket self.monitor(addr, events) # create new PAIR socket and connect it self._monitor_socket = self.context.socket(zmq.PAIR) self._monitor_socket.connect(addr) return self._monitor_socket def disable_monitor(self): """Shutdown the PAIR socket (created using get_monitor_socket) that is serving socket events. .. versionadded:: 14.4 """ self._monitor_socket = None self.monitor(None, 0) __all__ = ['Socket'] pyzmq-16.0.2/zmq/sugar/stopwatch.py000066400000000000000000000016231301503633700173010ustar00rootroot00000000000000"""Deprecated Stopwatch implementation""" # Copyright (c) PyZMQ Development Team. # Distributed under the terms of the Modified BSD License. class Stopwatch(object): """Deprecated zmq.Stopwatch implementation You can use Python's builtin timers (time.monotonic, etc.). """ def __init__(self): import warnings warnings.warn("zmq.Stopwatch is deprecated. Use stdlib time.monotonic and friends instead", DeprecationWarning, stacklevel=2, ) self._start = 0 import time try: self._monotonic = time.monotonic except AttributeError: self._monotonic = time.time def start(self): """Start the counter""" self._start = self._monotonic() def stop(self): """Return time since start in microseconds""" stop = self._monotonic() return int(1e6 * (stop - self._start)) pyzmq-16.0.2/zmq/sugar/tracker.py000066400000000000000000000071651301503633700167270ustar00rootroot00000000000000"""Tracker for zero-copy messages with 0MQ.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import time try: # below 3.3 from threading import _Event as Event except (ImportError, AttributeError): # python throws ImportError, cython throws AttributeError from threading import Event from zmq.error import NotDone from zmq.backend import Frame class MessageTracker(object): """MessageTracker(*towatch) A class for tracking if 0MQ is done using one or more messages. When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread sends the message at some later time. Often you want to know when 0MQ has actually sent the message though. This is complicated by the fact that a single 0MQ message can be sent multiple times using different sockets. This class allows you to track all of the 0MQ usages of a message. Parameters ---------- towatch : Event, MessageTracker, Message instances. This objects to track. This class can track the low-level Events used by the Message class, other MessageTrackers or actual Messages. """ events = None peers = None def __init__(self, *towatch): """MessageTracker(*towatch) Create a message tracker to track a set of mesages. Parameters ---------- *towatch : tuple of Event, MessageTracker, Message instances. This list of objects to track. This class can track the low-level Events used by the Message class, other MessageTrackers or actual Messages. """ self.events = set() self.peers = set() for obj in towatch: if isinstance(obj, Event): self.events.add(obj) elif isinstance(obj, MessageTracker): self.peers.add(obj) elif isinstance(obj, Frame): if not obj.tracker: raise ValueError("Not a tracked message") self.peers.add(obj.tracker) else: raise TypeError("Require Events or Message Frames, not %s"%type(obj)) @property def done(self): """Is 0MQ completely done with the message(s) being tracked?""" for evt in self.events: if not evt.is_set(): return False for pm in self.peers: if not pm.done: return False return True def wait(self, timeout=-1): """mt.wait(timeout=-1) Wait for 0MQ to be done with the message or until `timeout`. Parameters ---------- timeout : float [default: -1, wait forever] Maximum time in (s) to wait before raising NotDone. Returns ------- None if done before `timeout` Raises ------ NotDone if `timeout` reached before I am done. """ tic = time.time() if timeout is False or timeout < 0: remaining = 3600*24*7 # a week else: remaining = timeout done = False for evt in self.events: if remaining < 0: raise NotDone evt.wait(timeout=remaining) if not evt.is_set(): raise NotDone toc = time.time() remaining -= (toc-tic) tic = toc for peer in self.peers: if remaining < 0: raise NotDone peer.wait(timeout=remaining) toc = time.time() remaining -= (toc-tic) tic = toc __all__ = ['MessageTracker']pyzmq-16.0.2/zmq/sugar/version.py000066400000000000000000000023171301503633700167530ustar00rootroot00000000000000"""PyZMQ and 0MQ version functions.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from zmq.backend import zmq_version_info VERSION_MAJOR = 16 VERSION_MINOR = 0 VERSION_PATCH = 2 VERSION_EXTRA = "" __version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) if VERSION_EXTRA: __version__ = "%s.%s" % (__version__, VERSION_EXTRA) version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf')) else: version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) __revision__ = '' def pyzmq_version(): """return the version of pyzmq as a string""" if __revision__: return '@'.join([__version__,__revision__[:6]]) else: return __version__ def pyzmq_version_info(): """return the pyzmq version as a tuple of at least three numbers If pyzmq is a development version, `inf` will be appended after the third integer. """ return version_info def zmq_version(): """return the version of libzmq as a string""" return "%i.%i.%i" % zmq_version_info() __all__ = ['zmq_version', 'zmq_version_info', 'pyzmq_version','pyzmq_version_info', '__version__', '__revision__' ] pyzmq-16.0.2/zmq/tests/000077500000000000000000000000001301503633700147325ustar00rootroot00000000000000pyzmq-16.0.2/zmq/tests/__init__.py000066400000000000000000000140741301503633700170510ustar00rootroot00000000000000# Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. import sys import time from threading import Thread from unittest import TestCase try: from unittest import SkipTest except ImportError: from unittest2 import SkipTest from pytest import mark import zmq from zmq.utils import jsonapi try: import gevent from zmq import green as gzmq have_gevent = True except ImportError: have_gevent = False PYPY = 'PyPy' in sys.version #----------------------------------------------------------------------------- # skip decorators (directly from unittest) #----------------------------------------------------------------------------- _id = lambda x: x skip_pypy = mark.skipif(PYPY, reason="Doesn't work on PyPy") require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4") #----------------------------------------------------------------------------- # Base test class #----------------------------------------------------------------------------- class BaseZMQTestCase(TestCase): green = False @property def Context(self): if self.green: return gzmq.Context else: return zmq.Context def socket(self, socket_type): s = self.context.socket(socket_type) self.sockets.append(s) return s def setUp(self): super(BaseZMQTestCase, self).setUp() if self.green and not have_gevent: raise SkipTest("requires gevent") self.context = self.Context.instance() self.sockets = [] def tearDown(self): contexts = set([self.context]) while self.sockets: sock = self.sockets.pop() contexts.add(sock.context) # in case additional contexts are created sock.close(0) for ctx in contexts: t = Thread(target=ctx.term) t.daemon = True t.start() t.join(timeout=2) if t.is_alive(): # reset Context.instance, so the failure to term doesn't corrupt subsequent tests zmq.sugar.context.Context._instance = None raise RuntimeError("context could not terminate, open sockets likely remain in test") super(BaseZMQTestCase, self).tearDown() def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'): """Create a bound socket pair using a random port.""" s1 = self.context.socket(type1) s1.setsockopt(zmq.LINGER, 0) port = s1.bind_to_random_port(interface) s2 = self.context.socket(type2) s2.setsockopt(zmq.LINGER, 0) s2.connect('%s:%s' % (interface, port)) self.sockets.extend([s1,s2]) return s1, s2 def ping_pong(self, s1, s2, msg): s1.send(msg) msg2 = s2.recv() s2.send(msg2) msg3 = s1.recv() return msg3 def ping_pong_json(self, s1, s2, o): if jsonapi.jsonmod is None: raise SkipTest("No json library") s1.send_json(o) o2 = s2.recv_json() s2.send_json(o2) o3 = s1.recv_json() return o3 def ping_pong_pyobj(self, s1, s2, o): s1.send_pyobj(o) o2 = s2.recv_pyobj() s2.send_pyobj(o2) o3 = s1.recv_pyobj() return o3 def assertRaisesErrno(self, errno, func, *args, **kwargs): try: func(*args, **kwargs) except zmq.ZMQError as e: self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \ got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno))) else: self.fail("Function did not raise any error") def _select_recv(self, multipart, socket, **kwargs): """call recv[_multipart] in a way that raises if there is nothing to receive""" if zmq.zmq_version_info() >= (3,1,0): # zmq 3.1 has a bug, where poll can return false positives, # so we wait a little bit just in case # See LIBZMQ-280 on JIRA time.sleep(0.1) r,w,x = zmq.select([socket], [], [], timeout=5) assert len(r) > 0, "Should have received a message" kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0) recv = socket.recv_multipart if multipart else socket.recv return recv(**kwargs) def recv(self, socket, **kwargs): """call recv in a way that raises if there is nothing to receive""" return self._select_recv(False, socket, **kwargs) def recv_multipart(self, socket, **kwargs): """call recv_multipart in a way that raises if there is nothing to receive""" return self._select_recv(True, socket, **kwargs) class PollZMQTestCase(BaseZMQTestCase): pass class GreenTest: """Mixin for making green versions of test classes""" green = True def assertRaisesErrno(self, errno, func, *args, **kwargs): if errno == zmq.EAGAIN: raise SkipTest("Skipping because we're green.") try: func(*args, **kwargs) except zmq.ZMQError: e = sys.exc_info()[1] self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \ got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno))) else: self.fail("Function did not raise any error") def tearDown(self): contexts = set([self.context]) while self.sockets: sock = self.sockets.pop() contexts.add(sock.context) # in case additional contexts are created sock.close() try: gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True) except gevent.Timeout: raise RuntimeError("context could not terminate, open sockets likely remain in test") def skip_green(self): raise SkipTest("Skipping because we are green") def skip_green(f): def skipping_test(self, *args, **kwargs): if self.green: raise SkipTest("Skipping because we are green") else: return f(self, *args, **kwargs) return skipping_test pyzmq-16.0.2/zmq/tests/asyncio/000077500000000000000000000000001301503633700163775ustar00rootroot00000000000000pyzmq-16.0.2/zmq/tests/asyncio/__init__.py000066400000000000000000000000001301503633700204760ustar00rootroot00000000000000pyzmq-16.0.2/zmq/tests/asyncio/_test_asyncio.py000066400000000000000000000255121301503633700216210ustar00rootroot00000000000000"""Test asyncio support""" # Copyright (c) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import os import sys import pytest from pytest import mark import zmq from zmq.utils.strtypes import u try: import asyncio import zmq.asyncio as zaio from zmq.auth.asyncio import AsyncioAuthenticator except ImportError: if sys.version_info >= (3,4): raise asyncio = None from concurrent.futures import CancelledError from zmq.tests import BaseZMQTestCase, SkipTest from zmq.tests.test_auth import TestThreadAuthentication class TestAsyncIOSocket(BaseZMQTestCase): if asyncio is not None: Context = zaio.Context def setUp(self): if asyncio is None: raise SkipTest() self.loop = zaio.ZMQEventLoop() asyncio.set_event_loop(self.loop) super(TestAsyncIOSocket, self).setUp() def tearDown(self): self.loop.close() super().tearDown() def test_socket_class(self): s = self.context.socket(zmq.PUSH) assert isinstance(s, zaio.Socket) s.close() def test_recv_multipart(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_multipart() assert not f.done() yield from a.send(b'hi') recvd = yield from f self.assertEqual(recvd, [b'hi']) self.loop.run_until_complete(test()) def test_recv(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f1 = b.recv() f2 = b.recv() assert not f1.done() assert not f2.done() yield from a.send_multipart([b'hi', b'there']) recvd = yield from f2 assert f1.done() self.assertEqual(f1.result(), b'hi') self.assertEqual(recvd, b'there') self.loop.run_until_complete(test()) @mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO") def test_recv_timeout(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) b.rcvtimeo = 100 f1 = b.recv() b.rcvtimeo = 1000 f2 = b.recv_multipart() with self.assertRaises(zmq.Again): yield from f1 yield from a.send_multipart([b'hi', b'there']) recvd = yield from f2 assert f2.done() self.assertEqual(recvd, [b'hi', b'there']) self.loop.run_until_complete(test()) @mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO") def test_send_timeout(self): @asyncio.coroutine def test(): s = self.socket(zmq.PUSH) s.sndtimeo = 100 with self.assertRaises(zmq.Again): yield from s.send(b'not going anywhere') self.loop.run_until_complete(test()) def test_recv_string(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_string() assert not f.done() msg = u('πøøπ') yield from a.send_string(msg) recvd = yield from f assert f.done() self.assertEqual(f.result(), msg) self.assertEqual(recvd, msg) self.loop.run_until_complete(test()) def test_recv_json(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_json() assert not f.done() obj = dict(a=5) yield from a.send_json(obj) recvd = yield from f assert f.done() self.assertEqual(f.result(), obj) self.assertEqual(recvd, obj) self.loop.run_until_complete(test()) def test_recv_json_cancelled(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_json() assert not f.done() f.cancel() # cycle eventloop to allow cancel events to fire yield from asyncio.sleep(0) obj = dict(a=5) yield from a.send_json(obj) with pytest.raises(CancelledError): recvd = yield from f assert f.done() # give it a chance to incorrectly consume the event events = yield from b.poll(timeout=5) assert events yield from asyncio.sleep(0) # make sure cancelled recv didn't eat up event f = b.recv_json() recvd = yield from asyncio.wait_for(f, timeout=5) assert recvd == obj self.loop.run_until_complete(test()) def test_recv_pyobj(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_pyobj() assert not f.done() obj = dict(a=5) yield from a.send_pyobj(obj) recvd = yield from f assert f.done() self.assertEqual(f.result(), obj) self.assertEqual(recvd, obj) self.loop.run_until_complete(test()) def test_recv_dontwait(self): @asyncio.coroutine def test(): push, pull = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = pull.recv(zmq.DONTWAIT) with self.assertRaises(zmq.Again): yield from f yield from push.send(b'ping') yield from pull.poll() # ensure message will be waiting f = pull.recv(zmq.DONTWAIT) assert f.done() msg = yield from f self.assertEqual(msg, b'ping') self.loop.run_until_complete(test()) def test_recv_cancel(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f1 = b.recv() f2 = b.recv_multipart() assert f1.cancel() assert f1.done() assert not f2.done() yield from a.send_multipart([b'hi', b'there']) recvd = yield from f2 assert f1.cancelled() assert f2.done() self.assertEqual(recvd, [b'hi', b'there']) self.loop.run_until_complete(test()) def test_poll(self): @asyncio.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.poll(timeout=0) yield from asyncio.sleep(0) self.assertEqual(f.result(), 0) f = b.poll(timeout=1) assert not f.done() evt = yield from f self.assertEqual(evt, 0) f = b.poll(timeout=1000) assert not f.done() yield from a.send_multipart([b'hi', b'there']) evt = yield from f self.assertEqual(evt, zmq.POLLIN) recvd = yield from b.recv_multipart() self.assertEqual(recvd, [b'hi', b'there']) self.loop.run_until_complete(test()) def test_aiohttp(self): try: import aiohttp except ImportError: raise SkipTest("Requires aiohttp") from aiohttp import web zmq.asyncio.install() @asyncio.coroutine def echo(request): print(request.path) return web.Response(body=str(request).encode('utf8')) @asyncio.coroutine def server(loop): app = web.Application(loop=loop) app.router.add_route('GET', '/', echo) srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 8080) print("Server started at http://127.0.0.1:8080") return srv @asyncio.coroutine def client(): push, pull = self.create_bound_pair(zmq.PUSH, zmq.PULL) res = yield from aiohttp.request('GET', 'http://127.0.0.1:8080/') text = yield from res.text() yield from push.send(text.encode('utf8')) rcvd = yield from pull.recv() self.assertEqual(rcvd.decode('utf8'), text) loop = asyncio.get_event_loop() loop.run_until_complete(server(loop)) print("servered") loop.run_until_complete(client()) def test_poll_raw(self): @asyncio.coroutine def test(): p = zaio.Poller() # make a pipe r, w = os.pipe() r = os.fdopen(r, 'rb') w = os.fdopen(w, 'wb') # POLLOUT p.register(r, zmq.POLLIN) p.register(w, zmq.POLLOUT) evts = yield from p.poll(timeout=1) evts = dict(evts) assert r.fileno() not in evts assert w.fileno() in evts assert evts[w.fileno()] == zmq.POLLOUT # POLLIN p.unregister(w) w.write(b'x') w.flush() evts = yield from p.poll(timeout=1000) evts = dict(evts) assert r.fileno() in evts assert evts[r.fileno()] == zmq.POLLIN assert r.read(1) == b'x' r.close() w.close() loop = asyncio.get_event_loop() loop.run_until_complete(test()) class TestAsyncioAuthentication(TestThreadAuthentication): """Test authentication running in a asyncio task""" if asyncio is not None: Context = zaio.Context def shortDescription(self): """Rewrite doc strings from TestThreadAuthentication from 'threaded' to 'asyncio'. """ doc = self._testMethodDoc if doc: doc = doc.split("\n")[0].strip() if doc.startswith('threaded auth'): doc = doc.replace('threaded auth', 'asyncio auth') return doc def setUp(self): if asyncio is None: raise SkipTest() self.loop = zaio.ZMQEventLoop() asyncio.set_event_loop(self.loop) super().setUp() def tearDown(self): super().tearDown() self.loop.close() def make_auth(self): return AsyncioAuthenticator(self.context) def can_connect(self, server, client): """Check if client can connect to server using tcp transport""" @asyncio.coroutine def go(): result = False iface = 'tcp://127.0.0.1' port = server.bind_to_random_port(iface) client.connect("%s:%i" % (iface, port)) msg = [b"Hello World"] yield from server.send_multipart(msg) if (yield from client.poll(1000)): rcvd_msg = yield from client.recv_multipart() self.assertEqual(rcvd_msg, msg) result = True return result return self.loop.run_until_complete(go()) pyzmq-16.0.2/zmq/tests/asyncio/test_asyncio.py000066400000000000000000000002121301503633700214500ustar00rootroot00000000000000"""Test asyncio support""" try: from ._test_asyncio import TestAsyncIOSocket, TestAsyncioAuthentication except SyntaxError: pass pyzmq-16.0.2/zmq/tests/test_auth.py000066400000000000000000000375651301503633700173240ustar00rootroot00000000000000# -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import logging import os import shutil import sys import tempfile import zmq.auth from zmq.auth.ioloop import IOLoopAuthenticator from zmq.auth.thread import ThreadAuthenticator from zmq.eventloop import ioloop, zmqstream from zmq.tests import (BaseZMQTestCase, SkipTest) class BaseAuthTestCase(BaseZMQTestCase): def setUp(self): if zmq.zmq_version_info() < (4,0): raise SkipTest("security is new in libzmq 4.0") try: zmq.curve_keypair() except zmq.ZMQError: raise SkipTest("security requires libzmq to have curve support") super(BaseAuthTestCase, self).setUp() # enable debug logging while we run tests logging.getLogger('zmq.auth').setLevel(logging.DEBUG) self.auth = self.make_auth() self.auth.start() self.base_dir, self.public_keys_dir, self.secret_keys_dir = self.create_certs() def make_auth(self): raise NotImplementedError() def tearDown(self): if self.auth: self.auth.stop() self.auth = None self.remove_certs(self.base_dir) super(BaseAuthTestCase, self).tearDown() def create_certs(self): """Create CURVE certificates for a test""" # Create temporary CURVE keypairs for this test run. We create all keys in a # temp directory and then move them into the appropriate private or public # directory. base_dir = tempfile.mkdtemp() keys_dir = os.path.join(base_dir, 'certificates') public_keys_dir = os.path.join(base_dir, 'public_keys') secret_keys_dir = os.path.join(base_dir, 'private_keys') os.mkdir(keys_dir) os.mkdir(public_keys_dir) os.mkdir(secret_keys_dir) server_public_file, server_secret_file = zmq.auth.create_certificates(keys_dir, "server") client_public_file, client_secret_file = zmq.auth.create_certificates(keys_dir, "client") for key_file in os.listdir(keys_dir): if key_file.endswith(".key"): shutil.move(os.path.join(keys_dir, key_file), os.path.join(public_keys_dir, '.')) for key_file in os.listdir(keys_dir): if key_file.endswith(".key_secret"): shutil.move(os.path.join(keys_dir, key_file), os.path.join(secret_keys_dir, '.')) return (base_dir, public_keys_dir, secret_keys_dir) def remove_certs(self, base_dir): """Remove certificates for a test""" shutil.rmtree(base_dir) def load_certs(self, secret_keys_dir): """Return server and client certificate keys""" server_secret_file = os.path.join(secret_keys_dir, "server.key_secret") client_secret_file = os.path.join(secret_keys_dir, "client.key_secret") server_public, server_secret = zmq.auth.load_certificate(server_secret_file) client_public, client_secret = zmq.auth.load_certificate(client_secret_file) return server_public, server_secret, client_public, client_secret class TestThreadAuthentication(BaseAuthTestCase): """Test authentication running in a thread""" def make_auth(self): return ThreadAuthenticator(self.context) def can_connect(self, server, client): """Check if client can connect to server using tcp transport""" result = False iface = 'tcp://127.0.0.1' port = server.bind_to_random_port(iface) client.connect("%s:%i" % (iface, port)) msg = [b"Hello World"] if server.poll(1000, zmq.POLLOUT): server.send_multipart(msg) if client.poll(1000): rcvd_msg = client.recv_multipart() self.assertEqual(rcvd_msg, msg) result = True return result def test_null(self): """threaded auth - NULL""" # A default NULL connection should always succeed, and not # go through our authentication infrastructure at all. self.auth.stop() self.auth = None # use a new context, so ZAP isn't inherited self.context = self.Context() server = self.socket(zmq.PUSH) client = self.socket(zmq.PULL) self.assertTrue(self.can_connect(server, client)) # By setting a domain we switch on authentication for NULL sockets, # though no policies are configured yet. The client connection # should still be allowed. server = self.socket(zmq.PUSH) server.zap_domain = b'global' client = self.socket(zmq.PULL) self.assertTrue(self.can_connect(server, client)) def test_blacklist(self): """threaded auth - Blacklist""" # Blacklist 127.0.0.1, connection should fail self.auth.deny('127.0.0.1') server = self.socket(zmq.PUSH) # By setting a domain we switch on authentication for NULL sockets, # though no policies are configured yet. server.zap_domain = b'global' client = self.socket(zmq.PULL) self.assertFalse(self.can_connect(server, client)) def test_whitelist(self): """threaded auth - Whitelist""" # Whitelist 127.0.0.1, connection should pass" self.auth.allow('127.0.0.1') server = self.socket(zmq.PUSH) # By setting a domain we switch on authentication for NULL sockets, # though no policies are configured yet. server.zap_domain = b'global' client = self.socket(zmq.PULL) self.assertTrue(self.can_connect(server, client)) def test_plain(self): """threaded auth - PLAIN""" # Try PLAIN authentication - without configuring server, connection should fail server = self.socket(zmq.PUSH) server.plain_server = True client = self.socket(zmq.PULL) client.plain_username = b'admin' client.plain_password = b'Password' self.assertFalse(self.can_connect(server, client)) # Try PLAIN authentication - with server configured, connection should pass server = self.socket(zmq.PUSH) server.plain_server = True client = self.socket(zmq.PULL) client.plain_username = b'admin' client.plain_password = b'Password' self.auth.configure_plain(domain='*', passwords={'admin': 'Password'}) self.assertTrue(self.can_connect(server, client)) # Try PLAIN authentication - with bogus credentials, connection should fail server = self.socket(zmq.PUSH) server.plain_server = True client = self.socket(zmq.PULL) client.plain_username = b'admin' client.plain_password = b'Bogus' self.assertFalse(self.can_connect(server, client)) # Remove authenticator and check that a normal connection works self.auth.stop() self.auth = None server = self.socket(zmq.PUSH) client = self.socket(zmq.PULL) self.assertTrue(self.can_connect(server, client)) client.close() server.close() def test_curve(self): """threaded auth - CURVE""" self.auth.allow('127.0.0.1') certs = self.load_certs(self.secret_keys_dir) server_public, server_secret, client_public, client_secret = certs #Try CURVE authentication - without configuring server, connection should fail server = self.socket(zmq.PUSH) server.curve_publickey = server_public server.curve_secretkey = server_secret server.curve_server = True client = self.socket(zmq.PULL) client.curve_publickey = client_public client.curve_secretkey = client_secret client.curve_serverkey = server_public self.assertFalse(self.can_connect(server, client)) #Try CURVE authentication - with server configured to CURVE_ALLOW_ANY, connection should pass self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) server = self.socket(zmq.PUSH) server.curve_publickey = server_public server.curve_secretkey = server_secret server.curve_server = True client = self.socket(zmq.PULL) client.curve_publickey = client_public client.curve_secretkey = client_secret client.curve_serverkey = server_public self.assertTrue(self.can_connect(server, client)) # Try CURVE authentication - with server configured, connection should pass self.auth.configure_curve(domain='*', location=self.public_keys_dir) server = self.socket(zmq.PUSH) server.curve_publickey = server_public server.curve_secretkey = server_secret server.curve_server = True client = self.socket(zmq.PULL) client.curve_publickey = client_public client.curve_secretkey = client_secret client.curve_serverkey = server_public self.assertTrue(self.can_connect(server, client)) # Remove authenticator and check that a normal connection works self.auth.stop() self.auth = None # Try connecting using NULL and no authentication enabled, connection should pass server = self.socket(zmq.PUSH) client = self.socket(zmq.PULL) self.assertTrue(self.can_connect(server, client)) def with_ioloop(method, expect_success=True): """decorator for running tests with an IOLoop""" def test_method(self): r = method(self) loop = self.io_loop if expect_success: self.pullstream.on_recv(self.on_message_succeed) else: self.pullstream.on_recv(self.on_message_fail) loop.call_later(1, self.attempt_connection) loop.call_later(1.2, self.send_msg) if expect_success: loop.call_later(2, self.on_test_timeout_fail) else: loop.call_later(2, self.on_test_timeout_succeed) loop.start() if self.fail_msg: self.fail(self.fail_msg) return r return test_method def should_auth(method): return with_ioloop(method, True) def should_not_auth(method): return with_ioloop(method, False) class TestIOLoopAuthentication(BaseAuthTestCase): """Test authentication running in ioloop""" def setUp(self): self.fail_msg = None self.io_loop = ioloop.IOLoop() super(TestIOLoopAuthentication, self).setUp() self.server = self.socket(zmq.PUSH) self.client = self.socket(zmq.PULL) self.pushstream = zmqstream.ZMQStream(self.server, self.io_loop) self.pullstream = zmqstream.ZMQStream(self.client, self.io_loop) def make_auth(self): return IOLoopAuthenticator(self.context, io_loop=self.io_loop) def tearDown(self): if self.auth: self.auth.stop() self.auth = None self.io_loop.close(all_fds=True) super(TestIOLoopAuthentication, self).tearDown() def attempt_connection(self): """Check if client can connect to server using tcp transport""" iface = 'tcp://127.0.0.1' port = self.server.bind_to_random_port(iface) self.client.connect("%s:%i" % (iface, port)) def send_msg(self): """Send a message from server to a client""" msg = [b"Hello World"] self.pushstream.send_multipart(msg) def on_message_succeed(self, frames): """A message was received, as expected.""" if frames != [b"Hello World"]: self.fail_msg = "Unexpected message received" self.io_loop.stop() def on_message_fail(self, frames): """A message was received, unexpectedly.""" self.fail_msg = 'Received messaged unexpectedly, security failed' self.io_loop.stop() def on_test_timeout_succeed(self): """Test timer expired, indicates test success""" self.io_loop.stop() def on_test_timeout_fail(self): """Test timer expired, indicates test failure""" self.fail_msg = 'Test timed out' self.io_loop.stop() @should_auth def test_none(self): """ioloop auth - NONE""" # A default NULL connection should always succeed, and not # go through our authentication infrastructure at all. # no auth should be running self.auth.stop() self.auth = None @should_auth def test_null(self): """ioloop auth - NULL""" # By setting a domain we switch on authentication for NULL sockets, # though no policies are configured yet. The client connection # should still be allowed. self.server.zap_domain = b'global' @should_not_auth def test_blacklist(self): """ioloop auth - Blacklist""" # Blacklist 127.0.0.1, connection should fail self.auth.deny('127.0.0.1') self.server.zap_domain = b'global' @should_auth def test_whitelist(self): """ioloop auth - Whitelist""" # Whitelist 127.0.0.1, which overrides the blacklist, connection should pass" self.auth.allow('127.0.0.1') self.server.setsockopt(zmq.ZAP_DOMAIN, b'global') @should_not_auth def test_plain_unconfigured_server(self): """ioloop auth - PLAIN, unconfigured server""" self.client.plain_username = b'admin' self.client.plain_password = b'Password' # Try PLAIN authentication - without configuring server, connection should fail self.server.plain_server = True @should_auth def test_plain_configured_server(self): """ioloop auth - PLAIN, configured server""" self.client.plain_username = b'admin' self.client.plain_password = b'Password' # Try PLAIN authentication - with server configured, connection should pass self.server.plain_server = True self.auth.configure_plain(domain='*', passwords={'admin': 'Password'}) @should_not_auth def test_plain_bogus_credentials(self): """ioloop auth - PLAIN, bogus credentials""" self.client.plain_username = b'admin' self.client.plain_password = b'Bogus' self.server.plain_server = True self.auth.configure_plain(domain='*', passwords={'admin': 'Password'}) @should_not_auth def test_curve_unconfigured_server(self): """ioloop auth - CURVE, unconfigured server""" certs = self.load_certs(self.secret_keys_dir) server_public, server_secret, client_public, client_secret = certs self.auth.allow('127.0.0.1') self.server.curve_publickey = server_public self.server.curve_secretkey = server_secret self.server.curve_server = True self.client.curve_publickey = client_public self.client.curve_secretkey = client_secret self.client.curve_serverkey = server_public @should_auth def test_curve_allow_any(self): """ioloop auth - CURVE, CURVE_ALLOW_ANY""" certs = self.load_certs(self.secret_keys_dir) server_public, server_secret, client_public, client_secret = certs self.auth.allow('127.0.0.1') self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) self.server.curve_publickey = server_public self.server.curve_secretkey = server_secret self.server.curve_server = True self.client.curve_publickey = client_public self.client.curve_secretkey = client_secret self.client.curve_serverkey = server_public @should_auth def test_curve_configured_server(self): """ioloop auth - CURVE, configured server""" self.auth.allow('127.0.0.1') certs = self.load_certs(self.secret_keys_dir) server_public, server_secret, client_public, client_secret = certs self.auth.configure_curve(domain='*', location=self.public_keys_dir) self.server.curve_publickey = server_public self.server.curve_secretkey = server_secret self.server.curve_server = True self.client.curve_publickey = client_public self.client.curve_secretkey = client_secret self.client.curve_serverkey = server_public pyzmq-16.0.2/zmq/tests/test_cffi_backend.py000066400000000000000000000224111301503633700207210ustar00rootroot00000000000000# -*- coding: utf8 -*- import sys import time from unittest import TestCase from zmq.tests import BaseZMQTestCase, SkipTest try: from zmq.backend.cffi import ( zmq_version_info, PUSH, PULL, IDENTITY, REQ, REP, POLLIN, POLLOUT, ) from zmq.backend.cffi._cffi import ffi, C have_ffi_backend = True except ImportError: have_ffi_backend = False class TestCFFIBackend(TestCase): def setUp(self): if not have_ffi_backend: raise SkipTest('CFFI not available') def test_zmq_version_info(self): version = zmq_version_info() assert version[0] in range(2,11) def test_zmq_ctx_new_destroy(self): ctx = C.zmq_ctx_new() assert ctx != ffi.NULL assert 0 == C.zmq_ctx_destroy(ctx) def test_zmq_socket_open_close(self): ctx = C.zmq_ctx_new() socket = C.zmq_socket(ctx, PUSH) assert ctx != ffi.NULL assert ffi.NULL != socket assert 0 == C.zmq_close(socket) assert 0 == C.zmq_ctx_destroy(ctx) def test_zmq_setsockopt(self): ctx = C.zmq_ctx_new() socket = C.zmq_socket(ctx, PUSH) identity = ffi.new('char[3]', b'zmq') ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3) assert ret == 0 assert ctx != ffi.NULL assert ffi.NULL != socket assert 0 == C.zmq_close(socket) assert 0 == C.zmq_ctx_destroy(ctx) def test_zmq_getsockopt(self): ctx = C.zmq_ctx_new() socket = C.zmq_socket(ctx, PUSH) identity = ffi.new('char[]', b'zmq') ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3) assert ret == 0 option_len = ffi.new('size_t*', 3) option = ffi.new('char[3]') ret = C.zmq_getsockopt(socket, IDENTITY, ffi.cast('void*', option), option_len) assert ret == 0 assert ffi.string(ffi.cast('char*', option))[0:1] == b"z" assert ffi.string(ffi.cast('char*', option))[1:2] == b"m" assert ffi.string(ffi.cast('char*', option))[2:3] == b"q" assert ctx != ffi.NULL assert ffi.NULL != socket assert 0 == C.zmq_close(socket) assert 0 == C.zmq_ctx_destroy(ctx) def test_zmq_bind(self): ctx = C.zmq_ctx_new() socket = C.zmq_socket(ctx, 8) assert 0 == C.zmq_bind(socket, b'tcp://*:4444') assert ctx != ffi.NULL assert ffi.NULL != socket assert 0 == C.zmq_close(socket) assert 0 == C.zmq_ctx_destroy(ctx) def test_zmq_bind_connect(self): ctx = C.zmq_ctx_new() socket1 = C.zmq_socket(ctx, PUSH) socket2 = C.zmq_socket(ctx, PULL) assert 0 == C.zmq_bind(socket1, b'tcp://*:4444') assert 0 == C.zmq_connect(socket2, b'tcp://127.0.0.1:4444') assert ctx != ffi.NULL assert ffi.NULL != socket1 assert ffi.NULL != socket2 assert 0 == C.zmq_close(socket1) assert 0 == C.zmq_close(socket2) assert 0 == C.zmq_ctx_destroy(ctx) def test_zmq_msg_init_close(self): zmq_msg = ffi.new('zmq_msg_t*') assert ffi.NULL != zmq_msg assert 0 == C.zmq_msg_init(zmq_msg) assert 0 == C.zmq_msg_close(zmq_msg) def test_zmq_msg_init_size(self): zmq_msg = ffi.new('zmq_msg_t*') assert ffi.NULL != zmq_msg assert 0 == C.zmq_msg_init_size(zmq_msg, 10) assert 0 == C.zmq_msg_close(zmq_msg) def test_zmq_msg_init_data(self): zmq_msg = ffi.new('zmq_msg_t*') message = ffi.new('char[5]', b'Hello') assert 0 == C.zmq_msg_init_data(zmq_msg, ffi.cast('void*', message), 5, ffi.NULL, ffi.NULL) assert ffi.NULL != zmq_msg assert 0 == C.zmq_msg_close(zmq_msg) def test_zmq_msg_data(self): zmq_msg = ffi.new('zmq_msg_t*') message = ffi.new('char[]', b'Hello') assert 0 == C.zmq_msg_init_data(zmq_msg, ffi.cast('void*', message), 5, ffi.NULL, ffi.NULL) data = C.zmq_msg_data(zmq_msg) assert ffi.NULL != zmq_msg assert ffi.string(ffi.cast("char*", data)) == b'Hello' assert 0 == C.zmq_msg_close(zmq_msg) def test_zmq_send(self): ctx = C.zmq_ctx_new() sender = C.zmq_socket(ctx, REQ) receiver = C.zmq_socket(ctx, REP) assert 0 == C.zmq_bind(receiver, b'tcp://*:7777') assert 0 == C.zmq_connect(sender, b'tcp://127.0.0.1:7777') time.sleep(0.1) zmq_msg = ffi.new('zmq_msg_t*') message = ffi.new('char[5]', b'Hello') C.zmq_msg_init_data(zmq_msg, ffi.cast('void*', message), ffi.cast('size_t', 5), ffi.NULL, ffi.NULL) assert 5 == C.zmq_msg_send(zmq_msg, sender, 0) assert 0 == C.zmq_msg_close(zmq_msg) assert C.zmq_close(sender) == 0 assert C.zmq_close(receiver) == 0 assert C.zmq_ctx_destroy(ctx) == 0 def test_zmq_recv(self): ctx = C.zmq_ctx_new() sender = C.zmq_socket(ctx, REQ) receiver = C.zmq_socket(ctx, REP) assert 0 == C.zmq_bind(receiver, b'tcp://*:2222') assert 0 == C.zmq_connect(sender, b'tcp://127.0.0.1:2222') time.sleep(0.1) zmq_msg = ffi.new('zmq_msg_t*') message = ffi.new('char[5]', b'Hello') C.zmq_msg_init_data(zmq_msg, ffi.cast('void*', message), ffi.cast('size_t', 5), ffi.NULL, ffi.NULL) zmq_msg2 = ffi.new('zmq_msg_t*') C.zmq_msg_init(zmq_msg2) assert 5 == C.zmq_msg_send(zmq_msg, sender, 0) assert 5 == C.zmq_msg_recv(zmq_msg2, receiver, 0) assert 5 == C.zmq_msg_size(zmq_msg2) assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2), C.zmq_msg_size(zmq_msg2))[:] assert C.zmq_close(sender) == 0 assert C.zmq_close(receiver) == 0 assert C.zmq_ctx_destroy(ctx) == 0 def test_zmq_poll(self): ctx = C.zmq_ctx_new() sender = C.zmq_socket(ctx, REQ) receiver = C.zmq_socket(ctx, REP) r1 = C.zmq_bind(receiver, b'tcp://*:3333') r2 = C.zmq_connect(sender, b'tcp://127.0.0.1:3333') zmq_msg = ffi.new('zmq_msg_t*') message = ffi.new('char[5]', b'Hello') C.zmq_msg_init_data(zmq_msg, ffi.cast('void*', message), ffi.cast('size_t', 5), ffi.NULL, ffi.NULL) receiver_pollitem = ffi.new('zmq_pollitem_t*') receiver_pollitem.socket = receiver receiver_pollitem.fd = 0 receiver_pollitem.events = POLLIN | POLLOUT receiver_pollitem.revents = 0 ret = C.zmq_poll(ffi.NULL, 0, 0) assert ret == 0 ret = C.zmq_poll(receiver_pollitem, 1, 0) assert ret == 0 ret = C.zmq_msg_send(zmq_msg, sender, 0) print(ffi.string(C.zmq_strerror(C.zmq_errno()))) assert ret == 5 time.sleep(0.2) ret = C.zmq_poll(receiver_pollitem, 1, 0) assert ret == 1 assert int(receiver_pollitem.revents) & POLLIN assert not int(receiver_pollitem.revents) & POLLOUT zmq_msg2 = ffi.new('zmq_msg_t*') C.zmq_msg_init(zmq_msg2) ret_recv = C.zmq_msg_recv(zmq_msg2, receiver, 0) assert ret_recv == 5 assert 5 == C.zmq_msg_size(zmq_msg2) assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2), C.zmq_msg_size(zmq_msg2))[:] sender_pollitem = ffi.new('zmq_pollitem_t*') sender_pollitem.socket = sender sender_pollitem.fd = 0 sender_pollitem.events = POLLIN | POLLOUT sender_pollitem.revents = 0 ret = C.zmq_poll(sender_pollitem, 1, 0) assert ret == 0 zmq_msg_again = ffi.new('zmq_msg_t*') message_again = ffi.new('char[11]', b'Hello Again') C.zmq_msg_init_data(zmq_msg_again, ffi.cast('void*', message_again), ffi.cast('size_t', 11), ffi.NULL, ffi.NULL) assert 11 == C.zmq_msg_send(zmq_msg_again, receiver, 0) time.sleep(0.2) assert 0 <= C.zmq_poll(sender_pollitem, 1, 0) assert int(sender_pollitem.revents) & POLLIN assert 11 == C.zmq_msg_recv(zmq_msg2, sender, 0) assert 11 == C.zmq_msg_size(zmq_msg2) assert b"Hello Again" == ffi.buffer(C.zmq_msg_data(zmq_msg2), int(C.zmq_msg_size(zmq_msg2)))[:] assert 0 == C.zmq_close(sender) assert 0 == C.zmq_close(receiver) assert 0 == C.zmq_ctx_destroy(ctx) assert 0 == C.zmq_msg_close(zmq_msg) assert 0 == C.zmq_msg_close(zmq_msg2) assert 0 == C.zmq_msg_close(zmq_msg_again) pyzmq-16.0.2/zmq/tests/test_constants.py000066400000000000000000000107321301503633700203620ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import json from unittest import TestCase import pytest import zmq from zmq.utils import constant_names from zmq.sugar import constants as sugar_constants from zmq.backend import constants as backend_constants all_set = set(constant_names.all_names) class TestConstants(TestCase): def _duplicate_test(self, namelist, listname): """test that a given list has no duplicates""" dupes = {} for name in set(namelist): cnt = namelist.count(name) if cnt > 1: dupes[name] = cnt if dupes: self.fail("The following names occur more than once in %s: %s" % (listname, json.dumps(dupes, indent=2))) def test_duplicate_all(self): return self._duplicate_test(constant_names.all_names, "all_names") def _change_key(self, change, version): """return changed-in key""" return "%s-in %d.%d.%d" % tuple([change] + list(version)) def test_duplicate_changed(self): all_changed = [] for change in ("new", "removed"): d = getattr(constant_names, change + "_in") for version, namelist in d.items(): all_changed.extend(namelist) self._duplicate_test(namelist, self._change_key(change, version)) self._duplicate_test(all_changed, "all-changed") def test_changed_in_all(self): missing = {} for change in ("new", "removed"): d = getattr(constant_names, change + "_in") for version, namelist in d.items(): key = self._change_key(change, version) for name in namelist: if name not in all_set: if key not in missing: missing[key] = [] missing[key].append(name) if missing: self.fail( "The following names are missing in `all_names`: %s" % json.dumps(missing, indent=2) ) def test_no_negative_constants(self): for name in sugar_constants.__all__: self.assertNotEqual(getattr(zmq, name), sugar_constants._UNDEFINED) def test_undefined_constants(self): all_aliases = [] for alias_group in sugar_constants.aliases: all_aliases.extend(alias_group) for name in all_set.difference(all_aliases): raw = getattr(backend_constants, name) if raw == sugar_constants._UNDEFINED: self.assertRaises(AttributeError, getattr, zmq, name) else: self.assertEqual(getattr(zmq, name), raw) def test_new(self): zmq_version = zmq.zmq_version_info() for version, new_names in constant_names.new_in.items(): should_have = zmq_version >= version for name in new_names: try: value = getattr(zmq, name) except AttributeError: if should_have: self.fail("AttributeError: zmq.%s" % name) else: if not should_have: self.fail("Shouldn't have: zmq.%s=%s" % (name, value)) @pytest.mark.skipif(not zmq.DRAFT_API, reason="Only test draft API if built with draft API") def test_draft(self): zmq_version = zmq.zmq_version_info() for version, new_names in constant_names.draft_in.items(): should_have = zmq_version >= version for name in new_names: try: value = getattr(zmq, name) except AttributeError: if should_have: self.fail("AttributeError: zmq.%s" % name) else: if not should_have: self.fail("Shouldn't have: zmq.%s=%s" % (name, value)) def test_removed(self): zmq_version = zmq.zmq_version_info() for version, new_names in constant_names.removed_in.items(): should_have = zmq_version < version for name in new_names: try: value = getattr(zmq, name) except AttributeError: if should_have: self.fail("AttributeError: zmq.%s" % name) else: if not should_have: self.fail("Shouldn't have: zmq.%s=%s" % (name, value)) pyzmq-16.0.2/zmq/tests/test_context.py000066400000000000000000000235071301503633700200360ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import copy import gc import sys import time from threading import Thread, Event try: from queue import Queue except ImportError: from Queue import Queue import zmq from zmq.tests import ( BaseZMQTestCase, have_gevent, GreenTest, skip_green, PYPY, SkipTest, ) class KwargTestSocket(zmq.Socket): test_kwarg_value = None def __init__(self, *args, **kwargs): self.test_kwarg_value = kwargs.pop('test_kwarg', None) super(KwargTestSocket, self).__init__(*args, **kwargs) class KwargTestContext(zmq.Context): _socket_class = KwargTestSocket class TestContext(BaseZMQTestCase): def test_init(self): c1 = self.Context() self.assert_(isinstance(c1, self.Context)) del c1 c2 = self.Context() self.assert_(isinstance(c2, self.Context)) del c2 c3 = self.Context() self.assert_(isinstance(c3, self.Context)) del c3 def test_dir(self): ctx = self.Context() self.assertTrue('socket' in dir(ctx)) if zmq.zmq_version_info() > (3,): self.assertTrue('IO_THREADS' in dir(ctx)) ctx.term() def test_term(self): c = self.Context() c.term() self.assert_(c.closed) def test_context_manager(self): with self.Context() as c: pass self.assert_(c.closed) def test_fail_init(self): self.assertRaisesErrno(zmq.EINVAL, self.Context, -1) def test_term_hang(self): rep,req = self.create_bound_pair(zmq.ROUTER, zmq.DEALER) req.setsockopt(zmq.LINGER, 0) req.send(b'hello', copy=False) req.close() rep.close() self.context.term() def test_instance(self): ctx = self.Context.instance() c2 = self.Context.instance(io_threads=2) self.assertTrue(c2 is ctx) c2.term() c3 = self.Context.instance() c4 = self.Context.instance() self.assertFalse(c3 is c2) self.assertFalse(c3.closed) self.assertTrue(c3 is c4) def test_instance_threadsafe(self): self.context.term() # clear default context q = Queue() # slow context initialization, # to ensure that we are both trying to create one at the same time class SlowContext(self.Context): def __init__(self, *a, **kw): time.sleep(1) super(SlowContext, self).__init__(*a, **kw) def f(): q.put(SlowContext.instance()) # call ctx.instance() in several threads at once N = 16 threads = [ Thread(target=f) for i in range(N) ] [ t.start() for t in threads ] # also call it in the main thread (not first) ctx = SlowContext.instance() assert isinstance(ctx, SlowContext) # check that all the threads got the same context for i in range(N): thread_ctx = q.get(timeout=5) assert thread_ctx is ctx # cleanup ctx.term() [ t.join(timeout=5) for t in threads ] def test_socket_passes_kwargs(self): test_kwarg_value = 'testing one two three' with KwargTestContext() as ctx: with ctx.socket(zmq.DEALER, test_kwarg=test_kwarg_value) as socket: self.assertTrue(socket.test_kwarg_value is test_kwarg_value) def test_many_sockets(self): """opening and closing many sockets shouldn't cause problems""" ctx = self.Context() for i in range(16): sockets = [ ctx.socket(zmq.REP) for i in range(65) ] [ s.close() for s in sockets ] # give the reaper a chance time.sleep(1e-2) ctx.term() def test_sockopts(self): """setting socket options with ctx attributes""" ctx = self.Context() ctx.linger = 5 self.assertEqual(ctx.linger, 5) s = ctx.socket(zmq.REQ) self.assertEqual(s.linger, 5) self.assertEqual(s.getsockopt(zmq.LINGER), 5) s.close() # check that subscribe doesn't get set on sockets that don't subscribe: ctx.subscribe = b'' s = ctx.socket(zmq.REQ) s.close() ctx.term() def test_destroy(self): """Context.destroy should close sockets""" ctx = self.Context() sockets = [ ctx.socket(zmq.REP) for i in range(65) ] # close half of the sockets [ s.close() for s in sockets[::2] ] ctx.destroy() # reaper is not instantaneous time.sleep(1e-2) for s in sockets: self.assertTrue(s.closed) def test_destroy_linger(self): """Context.destroy should set linger on closing sockets""" req,rep = self.create_bound_pair(zmq.REQ, zmq.REP) req.send(b'hi') time.sleep(1e-2) self.context.destroy(linger=0) # reaper is not instantaneous time.sleep(1e-2) for s in (req,rep): self.assertTrue(s.closed) def test_term_noclose(self): """Context.term won't close sockets""" ctx = self.Context() s = ctx.socket(zmq.REQ) self.assertFalse(s.closed) t = Thread(target=ctx.term) t.start() t.join(timeout=0.1) self.assertTrue(t.is_alive(), "Context should be waiting") s.close() t.join(timeout=0.1) self.assertFalse(t.is_alive(), "Context should have closed") def test_gc(self): """test close&term by garbage collection alone""" if PYPY: raise SkipTest("GC doesn't work ") # test credit @dln (GH #137): def gcf(): def inner(): ctx = self.Context() s = ctx.socket(zmq.PUSH) inner() gc.collect() t = Thread(target=gcf) t.start() t.join(timeout=1) self.assertFalse(t.is_alive(), "Garbage collection should have cleaned up context") def test_cyclic_destroy(self): """ctx.destroy should succeed when cyclic ref prevents gc""" # test credit @dln (GH #137): class CyclicReference(object): def __init__(self, parent=None): self.parent = parent def crash(self, sock): self.sock = sock self.child = CyclicReference(self) def crash_zmq(): ctx = self.Context() sock = ctx.socket(zmq.PULL) c = CyclicReference() c.crash(sock) ctx.destroy() crash_zmq() def test_term_thread(self): """ctx.term should not crash active threads (#139)""" ctx = self.Context() evt = Event() evt.clear() def block(): s = ctx.socket(zmq.REP) s.bind_to_random_port('tcp://127.0.0.1') evt.set() try: s.recv() except zmq.ZMQError as e: self.assertEqual(e.errno, zmq.ETERM) return finally: s.close() self.fail("recv should have been interrupted with ETERM") t = Thread(target=block) t.start() evt.wait(1) self.assertTrue(evt.is_set(), "sync event never fired") time.sleep(0.01) ctx.term() t.join(timeout=1) self.assertFalse(t.is_alive(), "term should have interrupted s.recv()") def test_destroy_no_sockets(self): ctx = self.Context() s = ctx.socket(zmq.PUB) s.bind_to_random_port('tcp://127.0.0.1') s.close() ctx.destroy() assert s.closed assert ctx.closed def test_ctx_opts(self): if zmq.zmq_version_info() < (3,): raise SkipTest("context options require libzmq 3") ctx = self.Context() ctx.set(zmq.MAX_SOCKETS, 2) self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 2) ctx.max_sockets = 100 self.assertEqual(ctx.max_sockets, 100) self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 100) def test_copy(self): c1 = self.Context() c2 = copy.copy(c1) c2b = copy.deepcopy(c1) c3 = copy.deepcopy(c2) self.assert_(c2._shadow) self.assert_(c3._shadow) self.assertEqual(c1.underlying, c2.underlying) self.assertEqual(c1.underlying, c3.underlying) self.assertEqual(c1.underlying, c2b.underlying) s = c3.socket(zmq.PUB) s.close() c1.term() def test_shadow(self): ctx = self.Context() ctx2 = self.Context.shadow(ctx.underlying) self.assertEqual(ctx.underlying, ctx2.underlying) s = ctx.socket(zmq.PUB) s.close() del ctx2 self.assertFalse(ctx.closed) s = ctx.socket(zmq.PUB) ctx2 = self.Context.shadow(ctx.underlying) s2 = ctx2.socket(zmq.PUB) s.close() s2.close() ctx.term() self.assertRaisesErrno(zmq.EFAULT, ctx2.socket, zmq.PUB) del ctx2 def test_shadow_pyczmq(self): try: from pyczmq import zctx, zsocket, zstr except Exception: raise SkipTest("Requires pyczmq") ctx = zctx.new() a = zsocket.new(ctx, zmq.PUSH) zsocket.bind(a, "inproc://a") ctx2 = self.Context.shadow_pyczmq(ctx) b = ctx2.socket(zmq.PULL) b.connect("inproc://a") zstr.send(a, b'hi') rcvd = self.recv(b) self.assertEqual(rcvd, b'hi') b.close() if False: # disable green context tests class TestContextGreen(GreenTest, TestContext): """gevent subclass of context tests""" # skip tests that use real threads: test_gc = GreenTest.skip_green test_term_thread = GreenTest.skip_green test_destroy_linger = GreenTest.skip_green pyzmq-16.0.2/zmq/tests/test_decorators.py000066400000000000000000000224271301503633700205170ustar00rootroot00000000000000import threading import zmq from pytest import raises from zmq.decorators import context, socket ############################################## # Test cases for @context ############################################## def test_ctx(): @context() def test(ctx): assert isinstance(ctx, zmq.Context), ctx test() def test_ctx_orig_args(): @context() def f(foo, bar, ctx, baz=None): assert isinstance(ctx, zmq.Context), ctx assert foo == 42 assert bar is True assert baz == 'mock' f(42, True, baz='mock') def test_ctx_arg_naming(): @context('myctx') def test(myctx): assert isinstance(myctx, zmq.Context), myctx test() def test_ctx_args(): @context('ctx', 5) def test(ctx): assert isinstance(ctx, zmq.Context), ctx assert ctx.IO_THREADS == 5, ctx.IO_THREADS test() def test_ctx_arg_kwarg(): @context('ctx', io_threads=5) def test(ctx): assert isinstance(ctx, zmq.Context), ctx assert ctx.IO_THREADS == 5, ctx.IO_THREADS test() def test_ctx_kw_naming(): @context(name='myctx') def test(myctx): assert isinstance(myctx, zmq.Context), myctx test() def test_ctx_kwargs(): @context(name='ctx', io_threads=5) def test(ctx): assert isinstance(ctx, zmq.Context), ctx assert ctx.IO_THREADS == 5, ctx.IO_THREADS test() def test_ctx_kwargs_default(): @context(name='ctx', io_threads=5) def test(ctx=None): assert isinstance(ctx, zmq.Context), ctx assert ctx.IO_THREADS == 5, ctx.IO_THREADS test() def test_ctx_keyword_miss(): @context(name='ctx') def test(other_name): pass # the keyword ``ctx`` not found with raises(TypeError): test() def test_ctx_multi_assign(): @context(name='ctx') def test(ctx): pass # explosion with raises(TypeError): test('mock') def test_ctx_reinit(): result = {'foo': None, 'bar': None} @context() def f(key, ctx): assert isinstance(ctx, zmq.Context), ctx result[key] = ctx foo_t = threading.Thread(target=f, args=('foo',)) bar_t = threading.Thread(target=f, args=('bar',)) foo_t.start() bar_t.start() foo_t.join() bar_t.join() assert result['foo'] is not None, result assert result['bar'] is not None, result assert result['foo'] is not result['bar'], result def test_ctx_multi_thread(): @context() @context() def f(foo, bar): assert isinstance(foo, zmq.Context), foo assert isinstance(bar, zmq.Context), bar assert len(set(map(id, [foo, bar]))) == 2, set(map(id, [foo, bar])) threads = [threading.Thread(target=f) for i in range(8)] [t.start() for t in threads] [t.join() for t in threads] ############################################## # Test cases for @socket ############################################## def test_ctx_skt(): @context() @socket(zmq.PUB) def test(ctx, skt): assert isinstance(ctx, zmq.Context), ctx assert isinstance(skt, zmq.Socket), skt assert skt.type == zmq.PUB test() def test_skt_name(): @context() @socket('myskt', zmq.PUB) def test(ctx, myskt): assert isinstance(myskt, zmq.Socket), myskt assert isinstance(ctx, zmq.Context), ctx assert myskt.type == zmq.PUB test() def test_skt_kwarg(): @context() @socket(zmq.PUB, name='myskt') def test(ctx, myskt): assert isinstance(myskt, zmq.Socket), myskt assert isinstance(ctx, zmq.Context), ctx assert myskt.type == zmq.PUB test() def test_ctx_skt_name(): @context('ctx') @socket('skt', zmq.PUB, context_name='ctx') def test(ctx, skt): assert isinstance(skt, zmq.Socket), skt assert isinstance(ctx, zmq.Context), ctx assert skt.type == zmq.PUB test() def test_skt_default_ctx(): @socket(zmq.PUB) def test(skt): assert isinstance(skt, zmq.Socket), skt assert skt.context is zmq.Context.instance() assert skt.type == zmq.PUB test() def test_skt_reinit(): result = {'foo': None, 'bar': None} @socket(zmq.PUB) def f(key, skt): assert isinstance(skt, zmq.Socket), skt result[key] = skt foo_t = threading.Thread(target=f, args=('foo',)) bar_t = threading.Thread(target=f, args=('bar',)) foo_t.start() bar_t.start() foo_t.join() bar_t.join() assert result['foo'] is not None, result assert result['bar'] is not None, result assert result['foo'] is not result['bar'], result def test_ctx_skt_reinit(): result = {'foo': {'ctx': None, 'skt': None}, 'bar': {'ctx': None, 'skt': None}} @context() @socket(zmq.PUB) def f(key, ctx, skt): assert isinstance(ctx, zmq.Context), ctx assert isinstance(skt, zmq.Socket), skt result[key]['ctx'] = ctx result[key]['skt'] = skt foo_t = threading.Thread(target=f, args=('foo',)) bar_t = threading.Thread(target=f, args=('bar',)) foo_t.start() bar_t.start() foo_t.join() bar_t.join() assert result['foo']['ctx'] is not None, result assert result['foo']['skt'] is not None, result assert result['bar']['ctx'] is not None, result assert result['bar']['skt'] is not None, result assert result['foo']['ctx'] is not result['bar']['ctx'], result assert result['foo']['skt'] is not result['bar']['skt'], result def test_skt_type_miss(): @context() @socket('myskt') def f(ctx, myskt): pass # the socket type is missing with raises(TypeError): f() def test_multi_skts(): @socket(zmq.PUB) @socket(zmq.SUB) @socket(zmq.PUSH) def test(pub, sub, push): assert isinstance(pub, zmq.Socket), pub assert isinstance(sub, zmq.Socket), sub assert isinstance(push, zmq.Socket), push assert pub.context is zmq.Context.instance() assert sub.context is zmq.Context.instance() assert push.context is zmq.Context.instance() assert pub.type == zmq.PUB assert sub.type == zmq.SUB assert push.type == zmq.PUSH test() def test_multi_skts_single_ctx(): @context() @socket(zmq.PUB) @socket(zmq.SUB) @socket(zmq.PUSH) def test(ctx, pub, sub, push): assert isinstance(ctx, zmq.Context), ctx assert isinstance(pub, zmq.Socket), pub assert isinstance(sub, zmq.Socket), sub assert isinstance(push, zmq.Socket), push assert pub.context is ctx assert sub.context is ctx assert push.context is ctx assert pub.type == zmq.PUB assert sub.type == zmq.SUB assert push.type == zmq.PUSH test() def test_multi_skts_with_name(): @socket('foo', zmq.PUSH) @socket('bar', zmq.SUB) @socket('baz', zmq.PUB) def test(foo, bar, baz): assert isinstance(foo, zmq.Socket), foo assert isinstance(bar, zmq.Socket), bar assert isinstance(baz, zmq.Socket), baz assert foo.context is zmq.Context.instance() assert bar.context is zmq.Context.instance() assert baz.context is zmq.Context.instance() assert foo.type == zmq.PUSH assert bar.type == zmq.SUB assert baz.type == zmq.PUB test() def test_func_return(): @context() def f(ctx): assert isinstance(ctx, zmq.Context), ctx return 'something' assert f() == 'something' def test_skt_multi_thread(): @socket(zmq.PUB) @socket(zmq.SUB) @socket(zmq.PUSH) def f(pub, sub, push): assert isinstance(pub, zmq.Socket), pub assert isinstance(sub, zmq.Socket), sub assert isinstance(push, zmq.Socket), push assert pub.context is zmq.Context.instance() assert sub.context is zmq.Context.instance() assert push.context is zmq.Context.instance() assert pub.type == zmq.PUB assert sub.type == zmq.SUB assert push.type == zmq.PUSH assert len(set(map(id, [pub, sub, push]))) == 3 threads = [threading.Thread(target=f) for i in range(8)] [t.start() for t in threads] [t.join() for t in threads] class TestMethodDecorators(): @context() @socket(zmq.PUB) @socket(zmq.SUB) def multi_skts_method(self, ctx, pub, sub, foo='bar'): assert isinstance(self, TestMethodDecorators), self assert isinstance(ctx, zmq.Context), ctx assert isinstance(pub, zmq.Socket), pub assert isinstance(sub, zmq.Socket), sub assert foo == 'bar' assert pub.context is ctx assert sub.context is ctx assert pub.type is zmq.PUB assert sub.type is zmq.SUB def test_multi_skts_method(self): self.multi_skts_method() def multi_skts_method_other_args(self): @socket(zmq.PUB) @socket(zmq.SUB) def f(foo, pub, sub, bar=None): assert isinstance(pub, zmq.Socket), pub assert isinstance(sub, zmq.Socket), sub assert foo == 'mock' assert bar == 'fake' assert pub.context is zmq.Context.instance() assert sub.context is zmq.Context.instance() assert pub.type is zmq.PUB assert sub.type is zmq.SUB f('mock', bar='fake') def test_multi_skts_method_other_args(self): self.multi_skts_method_other_args() pyzmq-16.0.2/zmq/tests/test_device.py000066400000000000000000000117671301503633700176160ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import time import zmq from zmq import devices from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest, PYPY from zmq.utils.strtypes import (bytes,unicode,basestring) if PYPY: # cleanup of shared Context doesn't work on PyPy devices.Device.context_factory = zmq.Context class TestDevice(BaseZMQTestCase): def test_device_types(self): for devtype in (zmq.STREAMER, zmq.FORWARDER, zmq.QUEUE): dev = devices.Device(devtype, zmq.PAIR, zmq.PAIR) self.assertEqual(dev.device_type, devtype) del dev def test_device_attributes(self): dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB) self.assertEqual(dev.in_type, zmq.SUB) self.assertEqual(dev.out_type, zmq.PUB) self.assertEqual(dev.device_type, zmq.QUEUE) self.assertEqual(dev.daemon, True) del dev def test_single_socket_forwarder_connect(self): if zmq.zmq_version() in ('4.1.1', '4.0.6'): raise SkipTest("libzmq-%s broke single-socket devices" % zmq.zmq_version()) dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) req = self.context.socket(zmq.REQ) port = req.bind_to_random_port('tcp://127.0.0.1') dev.connect_in('tcp://127.0.0.1:%i'%port) dev.start() time.sleep(.25) msg = b'hello' req.send(msg) self.assertEqual(msg, self.recv(req)) del dev req.close() dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) req = self.context.socket(zmq.REQ) port = req.bind_to_random_port('tcp://127.0.0.1') dev.connect_out('tcp://127.0.0.1:%i'%port) dev.start() time.sleep(.25) msg = b'hello again' req.send(msg) self.assertEqual(msg, self.recv(req)) del dev req.close() def test_single_socket_forwarder_bind(self): if zmq.zmq_version() in ('4.1.1', '4.0.6'): raise SkipTest("libzmq-%s broke single-socket devices" % zmq.zmq_version()) dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) # select random port: binder = self.context.socket(zmq.REQ) port = binder.bind_to_random_port('tcp://127.0.0.1') binder.close() time.sleep(0.1) req = self.context.socket(zmq.REQ) req.connect('tcp://127.0.0.1:%i'%port) dev.bind_in('tcp://127.0.0.1:%i'%port) dev.start() time.sleep(.25) msg = b'hello' req.send(msg) self.assertEqual(msg, self.recv(req)) del dev req.close() dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) # select random port: binder = self.context.socket(zmq.REQ) port = binder.bind_to_random_port('tcp://127.0.0.1') binder.close() time.sleep(0.1) req = self.context.socket(zmq.REQ) req.connect('tcp://127.0.0.1:%i'%port) dev.bind_in('tcp://127.0.0.1:%i'%port) dev.start() time.sleep(.25) msg = b'hello again' req.send(msg) self.assertEqual(msg, self.recv(req)) del dev req.close() def test_proxy(self): if zmq.zmq_version_info() < (3,2): raise SkipTest("Proxies only in libzmq >= 3") dev = devices.ThreadProxy(zmq.PULL, zmq.PUSH, zmq.PUSH) binder = self.context.socket(zmq.REQ) iface = 'tcp://127.0.0.1' port = binder.bind_to_random_port(iface) port2 = binder.bind_to_random_port(iface) port3 = binder.bind_to_random_port(iface) binder.close() time.sleep(0.1) dev.bind_in("%s:%i" % (iface, port)) dev.bind_out("%s:%i" % (iface, port2)) dev.bind_mon("%s:%i" % (iface, port3)) dev.start() time.sleep(0.25) msg = b'hello' push = self.context.socket(zmq.PUSH) push.connect("%s:%i" % (iface, port)) pull = self.context.socket(zmq.PULL) pull.connect("%s:%i" % (iface, port2)) mon = self.context.socket(zmq.PULL) mon.connect("%s:%i" % (iface, port3)) push.send(msg) self.sockets.extend([push, pull, mon]) self.assertEqual(msg, self.recv(pull)) self.assertEqual(msg, self.recv(mon)) if have_gevent: import gevent import zmq.green class TestDeviceGreen(GreenTest, BaseZMQTestCase): def test_green_device(self): rep = self.context.socket(zmq.REP) req = self.context.socket(zmq.REQ) self.sockets.extend([req, rep]) port = rep.bind_to_random_port('tcp://127.0.0.1') g = gevent.spawn(zmq.green.device, zmq.QUEUE, rep, rep) req.connect('tcp://127.0.0.1:%i' % port) req.send(b'hi') timeout = gevent.Timeout(3) timeout.start() receiver = gevent.spawn(req.recv) self.assertEqual(receiver.get(2), b'hi') timeout.cancel() g.kill(block=True) pyzmq-16.0.2/zmq/tests/test_error.py000066400000000000000000000023201301503633700174710ustar00rootroot00000000000000# -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import sys import time import zmq from zmq import ZMQError, strerror, Again, ContextTerminated from zmq.tests import BaseZMQTestCase if sys.version_info[0] >= 3: long = int class TestZMQError(BaseZMQTestCase): def test_strerror(self): """test that strerror gets the right type.""" for i in range(10): e = strerror(i) self.assertTrue(isinstance(e, str)) def test_zmqerror(self): for errno in range(10): e = ZMQError(errno) self.assertEqual(e.errno, errno) self.assertEqual(str(e), strerror(errno)) def test_again(self): s = self.context.socket(zmq.REP) self.assertRaises(Again, s.recv, zmq.NOBLOCK) self.assertRaisesErrno(zmq.EAGAIN, s.recv, zmq.NOBLOCK) s.close() def atest_ctxterm(self): s = self.context.socket(zmq.REP) t = Thread(target=self.context.term) t.start() self.assertRaises(ContextTerminated, s.recv, zmq.NOBLOCK) self.assertRaisesErrno(zmq.TERM, s.recv, zmq.NOBLOCK) s.close() t.join() pyzmq-16.0.2/zmq/tests/test_etc.py000066400000000000000000000010151301503633700171130ustar00rootroot00000000000000# Copyright (c) PyZMQ Developers. # Distributed under the terms of the Modified BSD License. import sys import zmq from pytest import mark @mark.skipif('zmq.zmq_version_info() < (4,1)') def test_has(): assert not zmq.has('something weird') has_ipc = zmq.has('ipc') not_windows = not sys.platform.startswith('win') assert has_ipc == not_windows @mark.skipif(not hasattr(zmq, '_libzmq'), reason="bundled libzmq") def test_has_curve(): """bundled libzmq has curve support""" assert zmq.has('curve') pyzmq-16.0.2/zmq/tests/test_future.py000066400000000000000000000165401301503633700176630ustar00rootroot00000000000000# coding: utf-8 # Copyright (c) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from datetime import timedelta import os import pytest gen = pytest.importorskip('tornado.gen') import zmq from zmq.eventloop import future from zmq.eventloop.ioloop import IOLoop from zmq.utils.strtypes import u from zmq.tests import BaseZMQTestCase class TestFutureSocket(BaseZMQTestCase): Context = future.Context def setUp(self): self.loop = IOLoop() self.loop.make_current() super(TestFutureSocket, self).setUp() def tearDown(self): super(TestFutureSocket, self).tearDown() self.loop.close(all_fds=True) def test_socket_class(self): s = self.context.socket(zmq.PUSH) assert isinstance(s, future.Socket) s.close() def test_recv_multipart(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_multipart() assert not f.done() yield a.send(b'hi') recvd = yield f self.assertEqual(recvd, [b'hi']) self.loop.run_sync(test) def test_recv(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f1 = b.recv() f2 = b.recv() assert not f1.done() assert not f2.done() yield a.send_multipart([b'hi', b'there']) recvd = yield f2 assert f1.done() self.assertEqual(f1.result(), b'hi') self.assertEqual(recvd, b'there') self.loop.run_sync(test) def test_recv_cancel(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f1 = b.recv() f2 = b.recv_multipart() assert f1.cancel() assert f1.done() assert not f2.done() yield a.send_multipart([b'hi', b'there']) recvd = yield f2 assert f1.cancelled() assert f2.done() self.assertEqual(recvd, [b'hi', b'there']) self.loop.run_sync(test) @pytest.mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO") def test_recv_timeout(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) b.rcvtimeo = 100 f1 = b.recv() b.rcvtimeo = 1000 f2 = b.recv_multipart() with pytest.raises(zmq.Again): yield f1 yield a.send_multipart([b'hi', b'there']) recvd = yield f2 assert f2.done() self.assertEqual(recvd, [b'hi', b'there']) self.loop.run_sync(test) @pytest.mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO") def test_send_timeout(self): @gen.coroutine def test(): s = self.socket(zmq.PUSH) s.sndtimeo = 100 with pytest.raises(zmq.Again): yield s.send(b'not going anywhere') self.loop.run_sync(test) @pytest.mark.now def test_send_noblock(self): @gen.coroutine def test(): s = self.socket(zmq.PUSH) with pytest.raises(zmq.Again): yield s.send(b'not going anywhere', flags=zmq.NOBLOCK) self.loop.run_sync(test) @pytest.mark.now def test_send_multipart_noblock(self): @gen.coroutine def test(): s = self.socket(zmq.PUSH) with pytest.raises(zmq.Again): yield s.send_multipart([b'not going anywhere'], flags=zmq.NOBLOCK) self.loop.run_sync(test) def test_recv_string(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_string() assert not f.done() msg = u('πøøπ') yield a.send_string(msg) recvd = yield f assert f.done() self.assertEqual(f.result(), msg) self.assertEqual(recvd, msg) self.loop.run_sync(test) def test_recv_json(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_json() assert not f.done() obj = dict(a=5) yield a.send_json(obj) recvd = yield f assert f.done() self.assertEqual(f.result(), obj) self.assertEqual(recvd, obj) self.loop.run_sync(test) def test_recv_json_cancelled(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_json() assert not f.done() f.cancel() # cycle eventloop to allow cancel events to fire yield gen.sleep(0) obj = dict(a=5) yield a.send_json(obj) with pytest.raises(future.CancelledError): recvd = yield f assert f.done() # give it a chance to incorrectly consume the event events = yield b.poll(timeout=5) assert events yield gen.sleep(0) # make sure cancelled recv didn't eat up event recvd = yield gen.with_timeout(timedelta(seconds=5), b.recv_json()) assert recvd == obj self.loop.run_sync(test) def test_recv_pyobj(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.recv_pyobj() assert not f.done() obj = dict(a=5) yield a.send_pyobj(obj) recvd = yield f assert f.done() self.assertEqual(f.result(), obj) self.assertEqual(recvd, obj) self.loop.run_sync(test) def test_poll(self): @gen.coroutine def test(): a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL) f = b.poll(timeout=0) self.assertEqual(f.result(), 0) f = b.poll(timeout=1) assert not f.done() evt = yield f self.assertEqual(evt, 0) f = b.poll(timeout=1000) assert not f.done() yield a.send_multipart([b'hi', b'there']) evt = yield f self.assertEqual(evt, zmq.POLLIN) recvd = yield b.recv_multipart() self.assertEqual(recvd, [b'hi', b'there']) self.loop.run_sync(test) def test_poll_raw(self): @gen.coroutine def test(): p = future.Poller() # make a pipe r, w = os.pipe() r = os.fdopen(r, 'rb') w = os.fdopen(w, 'wb') # POLLOUT p.register(r, zmq.POLLIN) p.register(w, zmq.POLLOUT) evts = yield p.poll(timeout=1) evts = dict(evts) assert r.fileno() not in evts assert w.fileno() in evts assert evts[w.fileno()] == zmq.POLLOUT # POLLIN p.unregister(w) w.write(b'x') w.flush() evts = yield p.poll(timeout=1000) evts = dict(evts) assert r.fileno() in evts assert evts[r.fileno()] == zmq.POLLIN assert r.read(1) == b'x' r.close() w.close() self.loop.run_sync(test) pyzmq-16.0.2/zmq/tests/test_imports.py000066400000000000000000000033771301503633700200520ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import sys from unittest import TestCase class TestImports(TestCase): """Test Imports - the quickest test to ensure that we haven't introduced version-incompatible syntax errors.""" def test_toplevel(self): """test toplevel import""" import zmq def test_core(self): """test core imports""" from zmq import Context from zmq import Socket from zmq import Poller from zmq import Frame from zmq import constants from zmq import device, proxy from zmq import ( zmq_version, zmq_version_info, pyzmq_version, pyzmq_version_info, ) def test_devices(self): """test device imports""" import zmq.devices from zmq.devices import basedevice from zmq.devices import monitoredqueue from zmq.devices import monitoredqueuedevice def test_log(self): """test log imports""" import zmq.log from zmq.log import handlers def test_eventloop(self): """test eventloop imports""" import zmq.eventloop from zmq.eventloop import ioloop from zmq.eventloop import zmqstream from zmq.eventloop.minitornado.platform import auto from zmq.eventloop.minitornado import ioloop def test_utils(self): """test util imports""" import zmq.utils from zmq.utils import strtypes from zmq.utils import jsonapi def test_ssh(self): """test ssh imports""" from zmq.ssh import tunnel def test_decorators(self): """test decorators imports""" from zmq.decorators import context, socket pyzmq-16.0.2/zmq/tests/test_includes.py000066400000000000000000000017651301503633700201620ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from unittest import TestCase import zmq import os class TestIncludes(TestCase): def test_get_includes(self): from os.path import dirname, basename includes = zmq.get_includes() self.assertTrue(isinstance(includes, list)) self.assertTrue(len(includes) >= 2) parent = includes[0] self.assertTrue(isinstance(parent, str)) utilsdir = includes[1] self.assertTrue(isinstance(utilsdir, str)) utils = basename(utilsdir) self.assertEqual(utils, "utils") def test_get_library_dirs(self): from os.path import dirname, basename libdirs = zmq.get_library_dirs() self.assertTrue(isinstance(libdirs, list)) self.assertEqual(len(libdirs), 1) parent = libdirs[0] self.assertTrue(isinstance(parent, str)) libdir = basename(parent) self.assertEqual(libdir, "zmq") pyzmq-16.0.2/zmq/tests/test_ioloop.py000066400000000000000000000102321301503633700176420ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import time import os import threading import pytest import zmq from zmq.tests import BaseZMQTestCase, have_gevent from zmq.eventloop import ioloop try: from tornado.ioloop import IOLoop as BaseIOLoop except ImportError: from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop def printer(): os.system("say hello") raise Exception print (time.time()) class Delay(threading.Thread): def __init__(self, f, delay=1): self.f=f self.delay=delay self.aborted=False self.cond=threading.Condition() super(Delay, self).__init__() def run(self): self.cond.acquire() self.cond.wait(self.delay) self.cond.release() if not self.aborted: self.f() def abort(self): self.aborted=True self.cond.acquire() self.cond.notify() self.cond.release() class TestIOLoop(BaseZMQTestCase): def tearDown(self): super(TestIOLoop, self).tearDown() BaseIOLoop.clear_current() BaseIOLoop.clear_instance() def test_simple(self): """simple IOLoop creation test""" loop = ioloop.IOLoop() dc = ioloop.PeriodicCallback(loop.stop, 200, loop) pc = ioloop.PeriodicCallback(lambda : None, 10, loop) pc.start() dc.start() t = Delay(loop.stop,1) t.start() loop.start() if t.isAlive(): t.abort() else: self.fail("IOLoop failed to exit") def test_poller_events(self): """Tornado poller implementation maps events correctly""" req,rep = self.create_bound_pair(zmq.REQ, zmq.REP) poller = ioloop.ZMQPoller() poller.register(req, ioloop.IOLoop.READ) poller.register(rep, ioloop.IOLoop.READ) events = dict(poller.poll(0)) self.assertEqual(events.get(rep), None) self.assertEqual(events.get(req), None) poller.register(req, ioloop.IOLoop.WRITE) poller.register(rep, ioloop.IOLoop.WRITE) events = dict(poller.poll(1)) self.assertEqual(events.get(req), ioloop.IOLoop.WRITE) self.assertEqual(events.get(rep), None) poller.register(rep, ioloop.IOLoop.READ) req.send(b'hi') events = dict(poller.poll(1)) self.assertEqual(events.get(rep), ioloop.IOLoop.READ) self.assertEqual(events.get(req), None) def test_instance(self): """Green IOLoop.instance returns the right object""" loop = ioloop.IOLoop.instance() assert isinstance(loop, ioloop.IOLoop) base_loop = BaseIOLoop.instance() assert base_loop is loop def test_current(self): """Green IOLoop.current returns the right object""" loop = ioloop.IOLoop.current() assert isinstance(loop, ioloop.IOLoop) base_loop = BaseIOLoop.current() assert base_loop is loop def test_close_all(self): """Test close(all_fds=True)""" loop = ioloop.IOLoop.instance() req,rep = self.create_bound_pair(zmq.REQ, zmq.REP) loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ) loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ) self.assertEqual(req.closed, False) self.assertEqual(rep.closed, False) loop.close(all_fds=True) self.assertEqual(req.closed, True) self.assertEqual(rep.closed, True) if have_gevent: import zmq.green.eventloop.ioloop as green_ioloop class TestIOLoopGreen(BaseZMQTestCase): def test_instance(self): """Green IOLoop.instance returns the right object""" loop = green_ioloop.IOLoop.instance() assert isinstance(loop, green_ioloop.IOLoop) base_loop = BaseIOLoop.instance() assert base_loop is loop def test_current(self): """Green IOLoop.current returns the right object""" loop = green_ioloop.IOLoop.current() assert isinstance(loop, green_ioloop.IOLoop) base_loop = BaseIOLoop.current() assert base_loop is loop pyzmq-16.0.2/zmq/tests/test_log.py000066400000000000000000000073771301503633700171420ustar00rootroot00000000000000# encoding: utf-8 # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import logging import time from unittest import TestCase import zmq from zmq.log import handlers from zmq.utils.strtypes import b, u from zmq.tests import BaseZMQTestCase class TestPubLog(BaseZMQTestCase): iface = 'inproc://zmqlog' topic= 'zmq' @property def logger(self): # print dir(self) logger = logging.getLogger('zmqtest') logger.setLevel(logging.DEBUG) return logger def connect_handler(self, topic=None): topic = self.topic if topic is None else topic logger = self.logger pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB) handler = handlers.PUBHandler(pub) handler.setLevel(logging.DEBUG) handler.root_topic = topic logger.addHandler(handler) sub.setsockopt(zmq.SUBSCRIBE, b(topic)) time.sleep(0.1) return logger, handler, sub def test_init_iface(self): logger = self.logger ctx = self.context handler = handlers.PUBHandler(self.iface) self.assertFalse(handler.ctx is ctx) self.sockets.append(handler.socket) # handler.ctx.term() handler = handlers.PUBHandler(self.iface, self.context) self.sockets.append(handler.socket) self.assertTrue(handler.ctx is ctx) handler.setLevel(logging.DEBUG) handler.root_topic = self.topic logger.addHandler(handler) sub = ctx.socket(zmq.SUB) self.sockets.append(sub) sub.setsockopt(zmq.SUBSCRIBE, b(self.topic)) sub.connect(self.iface) import time; time.sleep(0.25) msg1 = 'message' logger.info(msg1) (topic, msg2) = sub.recv_multipart() self.assertEqual(topic, b'zmq.INFO') self.assertEqual(msg2, b(msg1)+b'\n') logger.removeHandler(handler) def test_init_socket(self): pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB) logger = self.logger handler = handlers.PUBHandler(pub) handler.setLevel(logging.DEBUG) handler.root_topic = self.topic logger.addHandler(handler) self.assertTrue(handler.socket is pub) self.assertTrue(handler.ctx is pub.context) self.assertTrue(handler.ctx is self.context) sub.setsockopt(zmq.SUBSCRIBE, b(self.topic)) import time; time.sleep(0.1) msg1 = 'message' logger.info(msg1) (topic, msg2) = sub.recv_multipart() self.assertEqual(topic, b'zmq.INFO') self.assertEqual(msg2, b(msg1)+b'\n') logger.removeHandler(handler) def test_root_topic(self): logger, handler, sub = self.connect_handler() handler.socket.bind(self.iface) sub2 = sub.context.socket(zmq.SUB) self.sockets.append(sub2) sub2.connect(self.iface) sub2.setsockopt(zmq.SUBSCRIBE, b'') handler.root_topic = b'twoonly' msg1 = 'ignored' logger.info(msg1) self.assertRaisesErrno(zmq.EAGAIN, sub.recv, zmq.NOBLOCK) topic,msg2 = sub2.recv_multipart() self.assertEqual(topic, b'twoonly.INFO') self.assertEqual(msg2, b(msg1)+b'\n') logger.removeHandler(handler) def test_unicode_message(self): logger, handler, sub = self.connect_handler() base_topic = b(self.topic + '.INFO') for msg, expected in [ (u('hello'), [base_topic, b('hello\n')]), (u('héllo'), [base_topic, b('héllo\n')]), (u('tøpic::héllo'), [base_topic + b('.tøpic'), b('héllo\n')]), ]: logger.info(msg) received = sub.recv_multipart() self.assertEqual(received, expected) pyzmq-16.0.2/zmq/tests/test_message.py000066400000000000000000000276501301503633700200010ustar00rootroot00000000000000# -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import copy import sys try: from sys import getrefcount as grc except ImportError: grc = None import time from pprint import pprint from unittest import TestCase import zmq from zmq.tests import BaseZMQTestCase, SkipTest, skip_pypy, PYPY from zmq.utils.strtypes import unicode, bytes, b, u # some useful constants: x = b'x' try: view = memoryview except NameError: view = buffer if grc: rc0 = grc(x) v = view(x) view_rc = grc(x) - rc0 def await_gc(obj, rc): """wait for refcount on an object to drop to an expected value Necessary because of the zero-copy gc thread, which can take some time to receive its DECREF message. """ for i in range(50): # rc + 2 because of the refs in this function if grc(obj) <= rc + 2: return time.sleep(0.05) class TestFrame(BaseZMQTestCase): @skip_pypy def test_above_30(self): """Message above 30 bytes are never copied by 0MQ.""" for i in range(5, 16): # 32, 64,..., 65536 s = (2**i)*x self.assertEqual(grc(s), 2) m = zmq.Frame(s) self.assertEqual(grc(s), 4) del m await_gc(s, 2) self.assertEqual(grc(s), 2) del s def test_str(self): """Test the str representations of the Frames.""" for i in range(16): s = (2**i)*x m = zmq.Frame(s) m_str = str(m) m_str_b = b(m_str) # py3compat self.assertEqual(s, m_str_b) def test_bytes(self): """Test the Frame.bytes property.""" for i in range(1,16): s = (2**i)*x m = zmq.Frame(s) b = m.bytes self.assertEqual(s, m.bytes) if not PYPY: # check that it copies self.assert_(b is not s) # check that it copies only once self.assert_(b is m.bytes) def test_unicode(self): """Test the unicode representations of the Frames.""" s = u('asdf') self.assertRaises(TypeError, zmq.Frame, s) for i in range(16): s = (2**i)*u('§') m = zmq.Frame(s.encode('utf8')) self.assertEqual(s, unicode(m.bytes,'utf8')) def test_len(self): """Test the len of the Frames.""" for i in range(16): s = (2**i)*x m = zmq.Frame(s) self.assertEqual(len(s), len(m)) @skip_pypy def test_lifecycle1(self): """Run through a ref counting cycle with a copy.""" for i in range(5, 16): # 32, 64,..., 65536 s = (2**i)*x rc = 2 self.assertEqual(grc(s), rc) m = zmq.Frame(s) rc += 2 self.assertEqual(grc(s), rc) m2 = copy.copy(m) rc += 1 self.assertEqual(grc(s), rc) buf = m2.buffer rc += view_rc self.assertEqual(grc(s), rc) self.assertEqual(s, b(str(m))) self.assertEqual(s, bytes(m2)) self.assertEqual(s, m.bytes) # self.assert_(s is str(m)) # self.assert_(s is str(m2)) del m2 rc -= 1 self.assertEqual(grc(s), rc) rc -= view_rc del buf self.assertEqual(grc(s), rc) del m rc -= 2 await_gc(s, rc) self.assertEqual(grc(s), rc) self.assertEqual(rc, 2) del s @skip_pypy def test_lifecycle2(self): """Run through a different ref counting cycle with a copy.""" for i in range(5, 16): # 32, 64,..., 65536 s = (2**i)*x rc = 2 self.assertEqual(grc(s), rc) m = zmq.Frame(s) rc += 2 self.assertEqual(grc(s), rc) m2 = copy.copy(m) rc += 1 self.assertEqual(grc(s), rc) buf = m.buffer rc += view_rc self.assertEqual(grc(s), rc) self.assertEqual(s, b(str(m))) self.assertEqual(s, bytes(m2)) self.assertEqual(s, m2.bytes) self.assertEqual(s, m.bytes) # self.assert_(s is str(m)) # self.assert_(s is str(m2)) del buf self.assertEqual(grc(s), rc) del m # m.buffer is kept until m is del'd rc -= view_rc rc -= 1 self.assertEqual(grc(s), rc) del m2 rc -= 2 await_gc(s, rc) self.assertEqual(grc(s), rc) self.assertEqual(rc, 2) del s @skip_pypy def test_tracker(self): m = zmq.Frame(b'asdf', track=True) self.assertFalse(m.tracker.done) pm = zmq.MessageTracker(m) self.assertFalse(pm.done) del m for i in range(10): if pm.done: break time.sleep(0.1) self.assertTrue(pm.done) def test_no_tracker(self): m = zmq.Frame(b'asdf', track=False) self.assertEqual(m.tracker, None) m2 = copy.copy(m) self.assertEqual(m2.tracker, None) self.assertRaises(ValueError, zmq.MessageTracker, m) @skip_pypy def test_multi_tracker(self): m = zmq.Frame(b'asdf', track=True) m2 = zmq.Frame(b'whoda', track=True) mt = zmq.MessageTracker(m,m2) self.assertFalse(m.tracker.done) self.assertFalse(mt.done) self.assertRaises(zmq.NotDone, mt.wait, 0.1) del m time.sleep(0.1) self.assertRaises(zmq.NotDone, mt.wait, 0.1) self.assertFalse(mt.done) del m2 self.assertTrue(mt.wait() is None) self.assertTrue(mt.done) def test_buffer_in(self): """test using a buffer as input""" ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√") m = zmq.Frame(view(ins)) def test_bad_buffer_in(self): """test using a bad object""" self.assertRaises(TypeError, zmq.Frame, 5) self.assertRaises(TypeError, zmq.Frame, object()) def test_buffer_out(self): """receiving buffered output""" ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√") m = zmq.Frame(ins) outb = m.buffer self.assertTrue(isinstance(outb, view)) self.assert_(outb is m.buffer) self.assert_(m.buffer is m.buffer) @skip_pypy def test_memoryview_shape(self): """memoryview shape info""" if sys.version_info < (3,): raise SkipTest("only test memoryviews on Python 3") data = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√") n = len(data) f = zmq.Frame(data) view1 = f.buffer self.assertEqual(view1.ndim, 1) self.assertEqual(view1.shape, (n,)) self.assertEqual(view1.tobytes(), data) view2 = memoryview(f) self.assertEqual(view2.ndim, 1) self.assertEqual(view2.shape, (n,)) self.assertEqual(view2.tobytes(), data) def test_multisend(self): """ensure that a message remains intact after multiple sends""" a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) s = b"message" m = zmq.Frame(s) self.assertEqual(s, m.bytes) a.send(m, copy=False) time.sleep(0.1) self.assertEqual(s, m.bytes) a.send(m, copy=False) time.sleep(0.1) self.assertEqual(s, m.bytes) a.send(m, copy=True) time.sleep(0.1) self.assertEqual(s, m.bytes) a.send(m, copy=True) time.sleep(0.1) self.assertEqual(s, m.bytes) for i in range(4): r = b.recv() self.assertEqual(s,r) self.assertEqual(s, m.bytes) def test_buffer_numpy(self): """test non-copying numpy array messages""" try: import numpy except ImportError: raise SkipTest("numpy required") rand = numpy.random.randint shapes = [ rand(2,16) for i in range(5) ] for i in range(1,len(shapes)+1): shape = shapes[:i] A = numpy.random.random(shape) m = zmq.Frame(A) if view.__name__ == 'buffer': self.assertEqual(A.data, m.buffer) B = numpy.frombuffer(m.buffer,dtype=A.dtype).reshape(A.shape) else: self.assertEqual(memoryview(A), m.buffer) B = numpy.array(m.buffer,dtype=A.dtype).reshape(A.shape) self.assertEqual((A==B).all(), True) def test_memoryview(self): """test messages from memoryview""" major,minor = sys.version_info[:2] if not (major >= 3 or (major == 2 and minor >= 7)): raise SkipTest("memoryviews only in python >= 2.7") s = b'carrotjuice' v = memoryview(s) m = zmq.Frame(s) buf = m.buffer s2 = buf.tobytes() self.assertEqual(s2,s) self.assertEqual(m.bytes,s) def test_noncopying_recv(self): """check for clobbering message buffers""" null = b'\0'*64 sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR) for i in range(32): # try a few times sb.send(null, copy=False) m = sa.recv(copy=False) mb = m.bytes # buf = view(m) buf = m.buffer del m for i in range(5): ff=b'\xff'*(40 + i*10) sb.send(ff, copy=False) m2 = sa.recv(copy=False) if view.__name__ == 'buffer': b = bytes(buf) else: b = buf.tobytes() self.assertEqual(b, null) self.assertEqual(mb, null) self.assertEqual(m2.bytes, ff) @skip_pypy def test_buffer_numpy(self): """test non-copying numpy array messages""" try: import numpy except ImportError: raise SkipTest("requires numpy") if sys.version_info < (2,7): raise SkipTest("requires new-style buffer interface (py >= 2.7)") rand = numpy.random.randint shapes = [ rand(2,5) for i in range(5) ] a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) dtypes = [int, float, '>i4', 'B'] for i in range(1,len(shapes)+1): shape = shapes[:i] for dt in dtypes: A = numpy.empty(shape, dtype=dt) while numpy.isnan(A).any(): # don't let nan sneak in A = numpy.ndarray(shape, dtype=dt) a.send(A, copy=False) msg = b.recv(copy=False) B = numpy.frombuffer(msg, A.dtype).reshape(A.shape) self.assertEqual(A.shape, B.shape) self.assertTrue((A==B).all()) A = numpy.empty(shape, dtype=[('a', int), ('b', float), ('c', 'a32')]) A['a'] = 1024 A['b'] = 1e9 A['c'] = 'hello there' a.send(A, copy=False) msg = b.recv(copy=False) B = numpy.frombuffer(msg, A.dtype).reshape(A.shape) self.assertEqual(A.shape, B.shape) self.assertTrue((A==B).all()) def test_frame_more(self): """test Frame.more attribute""" frame = zmq.Frame(b"hello") self.assertFalse(frame.more) sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR) sa.send_multipart([b'hi', b'there']) frame = self.recv(sb, copy=False) self.assertTrue(frame.more) if zmq.zmq_version_info()[0] >= 3 and not PYPY: self.assertTrue(frame.get(zmq.MORE)) frame = self.recv(sb, copy=False) self.assertFalse(frame.more) if zmq.zmq_version_info()[0] >= 3 and not PYPY: self.assertFalse(frame.get(zmq.MORE)) pyzmq-16.0.2/zmq/tests/test_monitor.py000066400000000000000000000056461301503633700200450ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import errno import sys import time import struct from unittest import TestCase from pytest import mark import zmq from zmq.tests import BaseZMQTestCase, skip_pypy, require_zmq_4 from zmq.utils.monitor import recv_monitor_message class TestSocketMonitor(BaseZMQTestCase): @require_zmq_4 def test_monitor(self): """Test monitoring interface for sockets.""" s_rep = self.context.socket(zmq.REP) s_req = self.context.socket(zmq.REQ) self.sockets.extend([s_rep, s_req]) s_req.bind("tcp://127.0.0.1:6666") # try monitoring the REP socket s_rep.monitor("inproc://monitor.rep", zmq.EVENT_ALL) # create listening socket for monitor s_event = self.context.socket(zmq.PAIR) self.sockets.append(s_event) s_event.connect("inproc://monitor.rep") s_event.linger = 0 # test receive event for connect event s_rep.connect("tcp://127.0.0.1:6666") m = recv_monitor_message(s_event) if m['event'] == zmq.EVENT_CONNECT_DELAYED: self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666") # test receive event for connected event m = recv_monitor_message(s_event) self.assertEqual(m['event'], zmq.EVENT_CONNECTED) self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666") # test monitor can be disabled. s_rep.disable_monitor() m = recv_monitor_message(s_event) self.assertEqual(m['event'], zmq.EVENT_MONITOR_STOPPED) @require_zmq_4 def test_monitor_repeat(self): s = self.socket(zmq.PULL) m = s.get_monitor_socket() self.sockets.append(m) m2 = s.get_monitor_socket() assert m is m2 s.disable_monitor() evt = recv_monitor_message(m) self.assertEqual(evt['event'], zmq.EVENT_MONITOR_STOPPED) m.close() s.close() @require_zmq_4 def test_monitor_connected(self): """Test connected monitoring socket.""" s_rep = self.context.socket(zmq.REP) s_req = self.context.socket(zmq.REQ) self.sockets.extend([s_rep, s_req]) s_req.bind("tcp://127.0.0.1:6667") # try monitoring the REP socket # create listening socket for monitor s_event = s_rep.get_monitor_socket() s_event.linger = 0 self.sockets.append(s_event) # test receive event for connect event s_rep.connect("tcp://127.0.0.1:6667") m = recv_monitor_message(s_event) if m['event'] == zmq.EVENT_CONNECT_DELAYED: self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667") # test receive event for connected event m = recv_monitor_message(s_event) self.assertEqual(m['event'], zmq.EVENT_CONNECTED) self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667") pyzmq-16.0.2/zmq/tests/test_monqueue.py000066400000000000000000000203421301503633700202020ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import time from unittest import TestCase import zmq from zmq import devices from zmq.tests import BaseZMQTestCase, SkipTest, PYPY from zmq.utils.strtypes import unicode if PYPY or zmq.zmq_version_info() >= (4,1): # cleanup of shared Context doesn't work on PyPy # there also seems to be a bug in cleanup in libzmq-4.1 (zeromq/libzmq#1052) devices.Device.context_factory = zmq.Context class TestMonitoredQueue(BaseZMQTestCase): sockets = [] def build_device(self, mon_sub=b"", in_prefix=b'in', out_prefix=b'out'): self.device = devices.ThreadMonitoredQueue(zmq.PAIR, zmq.PAIR, zmq.PUB, in_prefix, out_prefix) alice = self.context.socket(zmq.PAIR) bob = self.context.socket(zmq.PAIR) mon = self.context.socket(zmq.SUB) aport = alice.bind_to_random_port('tcp://127.0.0.1') bport = bob.bind_to_random_port('tcp://127.0.0.1') mport = mon.bind_to_random_port('tcp://127.0.0.1') mon.setsockopt(zmq.SUBSCRIBE, mon_sub) self.device.connect_in("tcp://127.0.0.1:%i"%aport) self.device.connect_out("tcp://127.0.0.1:%i"%bport) self.device.connect_mon("tcp://127.0.0.1:%i"%mport) self.device.start() time.sleep(.2) try: # this is currenlty necessary to ensure no dropped monitor messages # see LIBZMQ-248 for more info mon.recv_multipart(zmq.NOBLOCK) except zmq.ZMQError: pass self.sockets.extend([alice, bob, mon]) return alice, bob, mon def teardown_device(self): for socket in self.sockets: socket.close() del socket del self.device def test_reply(self): alice, bob, mon = self.build_device() alices = b"hello bob".split() alice.send_multipart(alices) bobs = self.recv_multipart(bob) self.assertEqual(alices, bobs) bobs = b"hello alice".split() bob.send_multipart(bobs) alices = self.recv_multipart(alice) self.assertEqual(alices, bobs) self.teardown_device() def test_queue(self): alice, bob, mon = self.build_device() alices = b"hello bob".split() alice.send_multipart(alices) alices2 = b"hello again".split() alice.send_multipart(alices2) alices3 = b"hello again and again".split() alice.send_multipart(alices3) bobs = self.recv_multipart(bob) self.assertEqual(alices, bobs) bobs = self.recv_multipart(bob) self.assertEqual(alices2, bobs) bobs = self.recv_multipart(bob) self.assertEqual(alices3, bobs) bobs = b"hello alice".split() bob.send_multipart(bobs) alices = self.recv_multipart(alice) self.assertEqual(alices, bobs) self.teardown_device() def test_monitor(self): alice, bob, mon = self.build_device() alices = b"hello bob".split() alice.send_multipart(alices) alices2 = b"hello again".split() alice.send_multipart(alices2) alices3 = b"hello again and again".split() alice.send_multipart(alices3) bobs = self.recv_multipart(bob) self.assertEqual(alices, bobs) mons = self.recv_multipart(mon) self.assertEqual([b'in']+bobs, mons) bobs = self.recv_multipart(bob) self.assertEqual(alices2, bobs) bobs = self.recv_multipart(bob) self.assertEqual(alices3, bobs) mons = self.recv_multipart(mon) self.assertEqual([b'in']+alices2, mons) bobs = b"hello alice".split() bob.send_multipart(bobs) alices = self.recv_multipart(alice) self.assertEqual(alices, bobs) mons = self.recv_multipart(mon) self.assertEqual([b'in']+alices3, mons) mons = self.recv_multipart(mon) self.assertEqual([b'out']+bobs, mons) self.teardown_device() def test_prefix(self): alice, bob, mon = self.build_device(b"", b'foo', b'bar') alices = b"hello bob".split() alice.send_multipart(alices) alices2 = b"hello again".split() alice.send_multipart(alices2) alices3 = b"hello again and again".split() alice.send_multipart(alices3) bobs = self.recv_multipart(bob) self.assertEqual(alices, bobs) mons = self.recv_multipart(mon) self.assertEqual([b'foo']+bobs, mons) bobs = self.recv_multipart(bob) self.assertEqual(alices2, bobs) bobs = self.recv_multipart(bob) self.assertEqual(alices3, bobs) mons = self.recv_multipart(mon) self.assertEqual([b'foo']+alices2, mons) bobs = b"hello alice".split() bob.send_multipart(bobs) alices = self.recv_multipart(alice) self.assertEqual(alices, bobs) mons = self.recv_multipart(mon) self.assertEqual([b'foo']+alices3, mons) mons = self.recv_multipart(mon) self.assertEqual([b'bar']+bobs, mons) self.teardown_device() def test_monitor_subscribe(self): alice, bob, mon = self.build_device(b"out") alices = b"hello bob".split() alice.send_multipart(alices) alices2 = b"hello again".split() alice.send_multipart(alices2) alices3 = b"hello again and again".split() alice.send_multipart(alices3) bobs = self.recv_multipart(bob) self.assertEqual(alices, bobs) bobs = self.recv_multipart(bob) self.assertEqual(alices2, bobs) bobs = self.recv_multipart(bob) self.assertEqual(alices3, bobs) bobs = b"hello alice".split() bob.send_multipart(bobs) alices = self.recv_multipart(alice) self.assertEqual(alices, bobs) mons = self.recv_multipart(mon) self.assertEqual([b'out']+bobs, mons) self.teardown_device() def test_router_router(self): """test router-router MQ devices""" dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out') self.device = dev dev.setsockopt_in(zmq.LINGER, 0) dev.setsockopt_out(zmq.LINGER, 0) dev.setsockopt_mon(zmq.LINGER, 0) binder = self.context.socket(zmq.DEALER) porta = binder.bind_to_random_port('tcp://127.0.0.1') portb = binder.bind_to_random_port('tcp://127.0.0.1') binder.close() time.sleep(0.1) a = self.context.socket(zmq.DEALER) a.identity = b'a' b = self.context.socket(zmq.DEALER) b.identity = b'b' self.sockets.extend([a, b]) a.connect('tcp://127.0.0.1:%i'%porta) dev.bind_in('tcp://127.0.0.1:%i'%porta) b.connect('tcp://127.0.0.1:%i'%portb) dev.bind_out('tcp://127.0.0.1:%i'%portb) dev.start() time.sleep(0.2) if zmq.zmq_version_info() >= (3,1,0): # flush erroneous poll state, due to LIBZMQ-280 ping_msg = [ b'ping', b'pong' ] for s in (a,b): s.send_multipart(ping_msg) try: s.recv(zmq.NOBLOCK) except zmq.ZMQError: pass msg = [ b'hello', b'there' ] a.send_multipart([b'b']+msg) bmsg = self.recv_multipart(b) self.assertEqual(bmsg, [b'a']+msg) b.send_multipart(bmsg) amsg = self.recv_multipart(a) self.assertEqual(amsg, [b'b']+msg) self.teardown_device() def test_default_mq_args(self): self.device = dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.DEALER, zmq.PUB) dev.setsockopt_in(zmq.LINGER, 0) dev.setsockopt_out(zmq.LINGER, 0) dev.setsockopt_mon(zmq.LINGER, 0) # this will raise if default args are wrong dev.start() self.teardown_device() def test_mq_check_prefix(self): ins = self.context.socket(zmq.ROUTER) outs = self.context.socket(zmq.DEALER) mons = self.context.socket(zmq.PUB) self.sockets.extend([ins, outs, mons]) ins = unicode('in') outs = unicode('out') self.assertRaises(TypeError, devices.monitoredqueue, ins, outs, mons) pyzmq-16.0.2/zmq/tests/test_multipart.py000066400000000000000000000016601301503633700203670ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import zmq from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest class TestMultipart(BaseZMQTestCase): def test_router_dealer(self): router, dealer = self.create_bound_pair(zmq.ROUTER, zmq.DEALER) msg1 = b'message1' dealer.send(msg1) ident = self.recv(router) more = router.rcvmore self.assertEqual(more, True) msg2 = self.recv(router) self.assertEqual(msg1, msg2) more = router.rcvmore self.assertEqual(more, False) def test_basic_multipart(self): a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) msg = [ b'hi', b'there', b'b'] a.send_multipart(msg) recvd = b.recv_multipart() self.assertEqual(msg, recvd) if have_gevent: class TestMultipartGreen(GreenTest, TestMultipart): pass pyzmq-16.0.2/zmq/tests/test_pair.py000066400000000000000000000023541301503633700173020ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import zmq from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest x = b' ' class TestPair(BaseZMQTestCase): def test_basic(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) msg1 = b'message1' msg2 = self.ping_pong(s1, s2, msg1) self.assertEqual(msg1, msg2) def test_multiple(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) for i in range(10): msg = i*x s1.send(msg) for i in range(10): msg = i*x s2.send(msg) for i in range(10): msg = s1.recv() self.assertEqual(msg, i*x) for i in range(10): msg = s2.recv() self.assertEqual(msg, i*x) def test_json(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) o = dict(a=10,b=list(range(10))) o2 = self.ping_pong_json(s1, s2, o) def test_pyobj(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) o = dict(a=10,b=range(10)) o2 = self.ping_pong_pyobj(s1, s2, o) if have_gevent: class TestReqRepGreen(GreenTest, TestPair): pass pyzmq-16.0.2/zmq/tests/test_poll.py000066400000000000000000000160231301503633700173130ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import os import time from unittest import TestCase import zmq from zmq.tests import PollZMQTestCase, have_gevent, GreenTest def wait(): time.sleep(.25) class TestPoll(PollZMQTestCase): Poller = zmq.Poller def test_pair(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) # Sleep to allow sockets to connect. wait() poller = self.Poller() poller.register(s1, zmq.POLLIN|zmq.POLLOUT) poller.register(s2, zmq.POLLIN|zmq.POLLOUT) # Poll result should contain both sockets socks = dict(poller.poll()) # Now make sure that both are send ready. self.assertEqual(socks[s1], zmq.POLLOUT) self.assertEqual(socks[s2], zmq.POLLOUT) # Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN s1.send(b'msg1') s2.send(b'msg2') wait() socks = dict(poller.poll()) self.assertEqual(socks[s1], zmq.POLLOUT|zmq.POLLIN) self.assertEqual(socks[s2], zmq.POLLOUT|zmq.POLLIN) # Make sure that both are in POLLOUT after recv. s1.recv() s2.recv() socks = dict(poller.poll()) self.assertEqual(socks[s1], zmq.POLLOUT) self.assertEqual(socks[s2], zmq.POLLOUT) poller.unregister(s1) poller.unregister(s2) def test_reqrep(self): s1, s2 = self.create_bound_pair(zmq.REP, zmq.REQ) # Sleep to allow sockets to connect. wait() poller = self.Poller() poller.register(s1, zmq.POLLIN|zmq.POLLOUT) poller.register(s2, zmq.POLLIN|zmq.POLLOUT) # Make sure that s1 is in state 0 and s2 is in POLLOUT socks = dict(poller.poll()) self.assertEqual(s1 in socks, 0) self.assertEqual(socks[s2], zmq.POLLOUT) # Make sure that s2 goes immediately into state 0 after send. s2.send(b'msg1') socks = dict(poller.poll()) self.assertEqual(s2 in socks, 0) # Make sure that s1 goes into POLLIN state after a time.sleep(). time.sleep(0.5) socks = dict(poller.poll()) self.assertEqual(socks[s1], zmq.POLLIN) # Make sure that s1 goes into POLLOUT after recv. s1.recv() socks = dict(poller.poll()) self.assertEqual(socks[s1], zmq.POLLOUT) # Make sure s1 goes into state 0 after send. s1.send(b'msg2') socks = dict(poller.poll()) self.assertEqual(s1 in socks, 0) # Wait and then see that s2 is in POLLIN. time.sleep(0.5) socks = dict(poller.poll()) self.assertEqual(socks[s2], zmq.POLLIN) # Make sure that s2 is in POLLOUT after recv. s2.recv() socks = dict(poller.poll()) self.assertEqual(socks[s2], zmq.POLLOUT) poller.unregister(s1) poller.unregister(s2) def test_no_events(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) poller = self.Poller() poller.register(s1, zmq.POLLIN|zmq.POLLOUT) poller.register(s2, 0) self.assertTrue(s1 in poller) self.assertFalse(s2 in poller) poller.register(s1, 0) self.assertFalse(s1 in poller) def test_pubsub(self): s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB) s2.setsockopt(zmq.SUBSCRIBE, b'') # Sleep to allow sockets to connect. wait() poller = self.Poller() poller.register(s1, zmq.POLLIN|zmq.POLLOUT) poller.register(s2, zmq.POLLIN) # Now make sure that both are send ready. socks = dict(poller.poll()) self.assertEqual(socks[s1], zmq.POLLOUT) self.assertEqual(s2 in socks, 0) # Make sure that s1 stays in POLLOUT after a send. s1.send(b'msg1') socks = dict(poller.poll()) self.assertEqual(socks[s1], zmq.POLLOUT) # Make sure that s2 is POLLIN after waiting. wait() socks = dict(poller.poll()) self.assertEqual(socks[s2], zmq.POLLIN) # Make sure that s2 goes into 0 after recv. s2.recv() socks = dict(poller.poll()) self.assertEqual(s2 in socks, 0) poller.unregister(s1) poller.unregister(s2) def test_raw(self): r, w = os.pipe() r = os.fdopen(r, 'rb') w = os.fdopen(w, 'wb') p = self.Poller() p.register(r, zmq.POLLIN) socks = dict(p.poll(1)) assert socks == {} w.write(b'x') w.flush() socks = dict(p.poll(1)) assert socks == {r.fileno(): zmq.POLLIN} w.close() r.close() def test_timeout(self): """make sure Poller.poll timeout has the right units (milliseconds).""" s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) poller = self.Poller() poller.register(s1, zmq.POLLIN) tic = time.time() evt = poller.poll(.005) toc = time.time() self.assertTrue(toc-tic < 0.1) tic = time.time() evt = poller.poll(5) toc = time.time() self.assertTrue(toc-tic < 0.1) self.assertTrue(toc-tic > .001) tic = time.time() evt = poller.poll(500) toc = time.time() self.assertTrue(toc-tic < 1) self.assertTrue(toc-tic > 0.1) class TestSelect(PollZMQTestCase): def test_pair(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) # Sleep to allow sockets to connect. wait() rlist, wlist, xlist = zmq.select([s1, s2], [s1, s2], [s1, s2]) self.assert_(s1 in wlist) self.assert_(s2 in wlist) self.assert_(s1 not in rlist) self.assert_(s2 not in rlist) def test_timeout(self): """make sure select timeout has the right units (seconds).""" s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) tic = time.time() r,w,x = zmq.select([s1,s2],[],[],.005) toc = time.time() self.assertTrue(toc-tic < 1) self.assertTrue(toc-tic > 0.001) tic = time.time() r,w,x = zmq.select([s1,s2],[],[],.25) toc = time.time() self.assertTrue(toc-tic < 1) self.assertTrue(toc-tic > 0.1) if have_gevent: import gevent from zmq import green as gzmq class TestPollGreen(GreenTest, TestPoll): Poller = gzmq.Poller def test_wakeup(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) poller = self.Poller() poller.register(s2, zmq.POLLIN) tic = time.time() r = gevent.spawn(lambda: poller.poll(10000)) s = gevent.spawn(lambda: s1.send(b'msg1')) r.join() toc = time.time() self.assertTrue(toc-tic < 1) def test_socket_poll(self): s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) tic = time.time() r = gevent.spawn(lambda: s2.poll(10000)) s = gevent.spawn(lambda: s1.send(b'msg1')) r.join() toc = time.time() self.assertTrue(toc-tic < 1) pyzmq-16.0.2/zmq/tests/test_pubsub.py000066400000000000000000000020461301503633700176450ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import time from unittest import TestCase import zmq from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest class TestPubSub(BaseZMQTestCase): pass # We are disabling this test while an issue is being resolved. def test_basic(self): s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB) s2.setsockopt(zmq.SUBSCRIBE,b'') time.sleep(0.1) msg1 = b'message' s1.send(msg1) msg2 = s2.recv() # This is blocking! self.assertEqual(msg1, msg2) def test_topic(self): s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB) s2.setsockopt(zmq.SUBSCRIBE, b'x') time.sleep(0.1) msg1 = b'message' s1.send(msg1) self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK) msg1 = b'xmessage' s1.send(msg1) msg2 = s2.recv() self.assertEqual(msg1, msg2) if have_gevent: class TestPubSubGreen(GreenTest, TestPubSub): pass pyzmq-16.0.2/zmq/tests/test_reqrep.py000066400000000000000000000034611301503633700176450ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from unittest import TestCase import zmq from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest class TestReqRep(BaseZMQTestCase): def test_basic(self): s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) msg1 = b'message 1' msg2 = self.ping_pong(s1, s2, msg1) self.assertEqual(msg1, msg2) def test_multiple(self): s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) for i in range(10): msg1 = i*b' ' msg2 = self.ping_pong(s1, s2, msg1) self.assertEqual(msg1, msg2) def test_bad_send_recv(self): s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) if zmq.zmq_version() != '2.1.8': # this doesn't work on 2.1.8 for copy in (True,False): self.assertRaisesErrno(zmq.EFSM, s1.recv, copy=copy) self.assertRaisesErrno(zmq.EFSM, s2.send, b'asdf', copy=copy) # I have to have this or we die on an Abort trap. msg1 = b'asdf' msg2 = self.ping_pong(s1, s2, msg1) self.assertEqual(msg1, msg2) def test_json(self): s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) o = dict(a=10,b=list(range(10))) o2 = self.ping_pong_json(s1, s2, o) def test_pyobj(self): s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) o = dict(a=10,b=range(10)) o2 = self.ping_pong_pyobj(s1, s2, o) def test_large_msg(self): s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) msg1 = 10000*b'X' for i in range(10): msg2 = self.ping_pong(s1, s2, msg1) self.assertEqual(msg1, msg2) if have_gevent: class TestReqRepGreen(GreenTest, TestReqRep): pass pyzmq-16.0.2/zmq/tests/test_retry_eintr.py000066400000000000000000000056021301503633700207140ustar00rootroot00000000000000# -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import signal import time from threading import Thread from pytest import mark import zmq from zmq.tests import ( BaseZMQTestCase, SkipTest, skip_pypy ) from zmq.utils.strtypes import b # Partially based on EINTRBaseTest from CPython 3.5 eintr_tester class TestEINTRSysCall(BaseZMQTestCase): """ Base class for EINTR tests. """ # delay for initial signal delivery signal_delay = 0.1 # timeout for tests. Must be > signal_delay timeout = .25 timeout_ms = int(timeout * 1e3) @mark.skipif(not hasattr(signal, 'setitimer'), reason='EINTR tests require setitimer') def alarm(self, t=None): """start a timer to fire only once like signal.alarm, but with better resolution than integer seconds. """ if t is None: t = self.signal_delay self.timer_fired = False self.orig_handler = signal.signal(signal.SIGALRM, self.stop_timer) # signal_period ignored, since only one timer event is allowed to fire signal.setitimer(signal.ITIMER_REAL, t, 1000) def stop_timer(self, *args): self.timer_fired = True signal.setitimer(signal.ITIMER_REAL, 0, 0) signal.signal(signal.SIGALRM, self.orig_handler) @mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO") def test_retry_recv(self): pull = self.socket(zmq.PULL) pull.rcvtimeo = self.timeout_ms self.alarm() self.assertRaises(zmq.Again, pull.recv) assert self.timer_fired @mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO") def test_retry_send(self): push = self.socket(zmq.PUSH) push.sndtimeo = self.timeout_ms self.alarm() self.assertRaises(zmq.Again, push.send, b('buf')) assert self.timer_fired def test_retry_poll(self): x, y = self.create_bound_pair() poller = zmq.Poller() poller.register(x, zmq.POLLIN) self.alarm() def send(): time.sleep(2 * self.signal_delay) y.send(b('ping')) t = Thread(target=send) t.start() evts = dict(poller.poll(2 * self.timeout_ms)) t.join() assert x in evts assert self.timer_fired x.recv() def test_retry_term(self): push = self.socket(zmq.PUSH) push.linger = self.timeout_ms push.connect('tcp://127.0.0.1:5555') push.send(b('ping')) time.sleep(0.1) self.alarm() self.context.destroy() assert self.timer_fired assert self.context.closed def test_retry_getsockopt(self): raise SkipTest("TODO: find a way to interrupt getsockopt") def test_retry_setsockopt(self): raise SkipTest("TODO: find a way to interrupt setsockopt") pyzmq-16.0.2/zmq/tests/test_security.py000066400000000000000000000161061301503633700202160ustar00rootroot00000000000000"""Test libzmq security (libzmq >= 3.3.0)""" # -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import os from threading import Thread import zmq from zmq.tests import ( BaseZMQTestCase, SkipTest, PYPY ) from zmq.utils import z85 USER = b"admin" PASS = b"password" class TestSecurity(BaseZMQTestCase): def setUp(self): if zmq.zmq_version_info() < (4,0): raise SkipTest("security is new in libzmq 4.0") try: zmq.curve_keypair() except zmq.ZMQError: raise SkipTest("security requires libzmq to be built with CURVE support") super(TestSecurity, self).setUp() def zap_handler(self): socket = self.context.socket(zmq.REP) socket.bind("inproc://zeromq.zap.01") try: msg = self.recv_multipart(socket) version, sequence, domain, address, identity, mechanism = msg[:6] if mechanism == b'PLAIN': username, password = msg[6:] elif mechanism == b'CURVE': key = msg[6] self.assertEqual(version, b"1.0") self.assertEqual(identity, b"IDENT") reply = [version, sequence] if mechanism == b'CURVE' or \ (mechanism == b'PLAIN' and username == USER and password == PASS) or \ (mechanism == b'NULL'): reply.extend([ b"200", b"OK", b"anonymous", b"\5Hello\0\0\0\5World", ]) else: reply.extend([ b"400", b"Invalid username or password", b"", b"", ]) socket.send_multipart(reply) finally: socket.close() def start_zap(self): self.zap_thread = Thread(target=self.zap_handler) self.zap_thread.start() def stop_zap(self): self.zap_thread.join() def bounce(self, server, client, test_metadata=True): msg = [os.urandom(64), os.urandom(64)] client.send_multipart(msg) frames = self.recv_multipart(server, copy=False) recvd = list(map(lambda x: x.bytes, frames)) try: if test_metadata and not PYPY: for frame in frames: self.assertEqual(frame.get('User-Id'), 'anonymous') self.assertEqual(frame.get('Hello'), 'World') self.assertEqual(frame['Socket-Type'], 'DEALER') except zmq.ZMQVersionError: pass self.assertEqual(recvd, msg) server.send_multipart(recvd) msg2 = self.recv_multipart(client) self.assertEqual(msg2, msg) def test_null(self): """test NULL (default) security""" server = self.socket(zmq.DEALER) client = self.socket(zmq.DEALER) self.assertEqual(client.MECHANISM, zmq.NULL) self.assertEqual(server.mechanism, zmq.NULL) self.assertEqual(client.plain_server, 0) self.assertEqual(server.plain_server, 0) iface = 'tcp://127.0.0.1' port = server.bind_to_random_port(iface) client.connect("%s:%i" % (iface, port)) self.bounce(server, client, False) def test_plain(self): """test PLAIN authentication""" server = self.socket(zmq.DEALER) server.identity = b'IDENT' client = self.socket(zmq.DEALER) self.assertEqual(client.plain_username, b'') self.assertEqual(client.plain_password, b'') client.plain_username = USER client.plain_password = PASS self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER) self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS) self.assertEqual(client.plain_server, 0) self.assertEqual(server.plain_server, 0) server.plain_server = True self.assertEqual(server.mechanism, zmq.PLAIN) self.assertEqual(client.mechanism, zmq.PLAIN) assert not client.plain_server assert server.plain_server self.start_zap() iface = 'tcp://127.0.0.1' port = server.bind_to_random_port(iface) client.connect("%s:%i" % (iface, port)) self.bounce(server, client) self.stop_zap() def skip_plain_inauth(self): """test PLAIN failed authentication""" server = self.socket(zmq.DEALER) server.identity = b'IDENT' client = self.socket(zmq.DEALER) self.sockets.extend([server, client]) client.plain_username = USER client.plain_password = b'incorrect' server.plain_server = True self.assertEqual(server.mechanism, zmq.PLAIN) self.assertEqual(client.mechanism, zmq.PLAIN) self.start_zap() iface = 'tcp://127.0.0.1' port = server.bind_to_random_port(iface) client.connect("%s:%i" % (iface, port)) client.send(b'ping') server.rcvtimeo = 250 self.assertRaisesErrno(zmq.EAGAIN, server.recv) self.stop_zap() def test_keypair(self): """test curve_keypair""" try: public, secret = zmq.curve_keypair() except zmq.ZMQError: raise SkipTest("CURVE unsupported") self.assertEqual(type(secret), bytes) self.assertEqual(type(public), bytes) self.assertEqual(len(secret), 40) self.assertEqual(len(public), 40) # verify that it is indeed Z85 bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ] self.assertEqual(type(bsecret), bytes) self.assertEqual(type(bpublic), bytes) self.assertEqual(len(bsecret), 32) self.assertEqual(len(bpublic), 32) def test_curve(self): """test CURVE encryption""" server = self.socket(zmq.DEALER) server.identity = b'IDENT' client = self.socket(zmq.DEALER) self.sockets.extend([server, client]) try: server.curve_server = True except zmq.ZMQError as e: # will raise EINVAL if no CURVE support if e.errno == zmq.EINVAL: raise SkipTest("CURVE unsupported") server_public, server_secret = zmq.curve_keypair() client_public, client_secret = zmq.curve_keypair() server.curve_secretkey = server_secret server.curve_publickey = server_public client.curve_serverkey = server_public client.curve_publickey = client_public client.curve_secretkey = client_secret self.assertEqual(server.mechanism, zmq.CURVE) self.assertEqual(client.mechanism, zmq.CURVE) self.assertEqual(server.get(zmq.CURVE_SERVER), True) self.assertEqual(client.get(zmq.CURVE_SERVER), False) self.start_zap() iface = 'tcp://127.0.0.1' port = server.bind_to_random_port(iface) client.connect("%s:%i" % (iface, port)) self.bounce(server, client) self.stop_zap() pyzmq-16.0.2/zmq/tests/test_socket.py000066400000000000000000000441351301503633700176420ustar00rootroot00000000000000# -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import copy import os import platform import time import warnings import socket import sys from pytest import mark import zmq from zmq.tests import ( BaseZMQTestCase, SkipTest, have_gevent, GreenTest, skip_pypy ) from zmq.utils.strtypes import unicode pypy = platform.python_implementation().lower() == 'pypy' on_travis = bool(os.environ.get('TRAVIS_PYTHON_VERSION')) class TestSocket(BaseZMQTestCase): def test_create(self): ctx = self.Context() s = ctx.socket(zmq.PUB) # Superluminal protocol not yet implemented self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://a') self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://a') self.assertRaisesErrno(zmq.EINVAL, s.bind, 'tcp://') s.close() del ctx def test_context_manager(self): url = 'inproc://a' with self.Context() as ctx: with ctx.socket(zmq.PUSH) as a: a.bind(url) with ctx.socket(zmq.PULL) as b: b.connect(url) msg = b'hi' a.send(msg) rcvd = self.recv(b) self.assertEqual(rcvd, msg) self.assertEqual(b.closed, True) self.assertEqual(a.closed, True) self.assertEqual(ctx.closed, True) def test_dir(self): ctx = self.Context() s = ctx.socket(zmq.PUB) self.assertTrue('send' in dir(s)) self.assertTrue('IDENTITY' in dir(s)) self.assertTrue('AFFINITY' in dir(s)) self.assertTrue('FD' in dir(s)) s.close() ctx.term() def test_bind_unicode(self): s = self.socket(zmq.PUB) p = s.bind_to_random_port(unicode("tcp://*")) def test_connect_unicode(self): s = self.socket(zmq.PUB) s.connect(unicode("tcp://127.0.0.1:5555")) def test_bind_to_random_port(self): # Check that bind_to_random_port do not hide useful exception ctx = self.Context() c = ctx.socket(zmq.PUB) # Invalid format try: c.bind_to_random_port('tcp:*') except zmq.ZMQError as e: self.assertEqual(e.errno, zmq.EINVAL) # Invalid protocol try: c.bind_to_random_port('rand://*') except zmq.ZMQError as e: self.assertEqual(e.errno, zmq.EPROTONOSUPPORT) def test_identity(self): s = self.context.socket(zmq.PULL) self.sockets.append(s) ident = b'identity\0\0' s.identity = ident self.assertEqual(s.get(zmq.IDENTITY), ident) def test_unicode_sockopts(self): """test setting/getting sockopts with unicode strings""" topic = "tést" if str is not unicode: topic = topic.decode('utf8') p,s = self.create_bound_pair(zmq.PUB, zmq.SUB) self.assertEqual(s.send_unicode, s.send_unicode) self.assertEqual(p.recv_unicode, p.recv_unicode) self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic) self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic) s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16') self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic) s.setsockopt_unicode(zmq.SUBSCRIBE, topic) self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY) self.assertRaisesErrno(zmq.EINVAL, s.getsockopt_unicode, zmq.SUBSCRIBE) identb = s.getsockopt(zmq.IDENTITY) identu = identb.decode('utf16') identu2 = s.getsockopt_unicode(zmq.IDENTITY, 'utf16') self.assertEqual(identu, identu2) time.sleep(0.1) # wait for connection/subscription p.send_unicode(topic,zmq.SNDMORE) p.send_unicode(topic*2, encoding='latin-1') self.assertEqual(topic, s.recv_unicode()) self.assertEqual(topic*2, s.recv_unicode(encoding='latin-1')) def test_int_sockopts(self): "test integer sockopts" v = zmq.zmq_version_info() if v < (3,0): default_hwm = 0 else: default_hwm = 1000 p,s = self.create_bound_pair(zmq.PUB, zmq.SUB) p.setsockopt(zmq.LINGER, 0) self.assertEqual(p.getsockopt(zmq.LINGER), 0) p.setsockopt(zmq.LINGER, -1) self.assertEqual(p.getsockopt(zmq.LINGER), -1) self.assertEqual(p.hwm, default_hwm) p.hwm = 11 self.assertEqual(p.hwm, 11) # p.setsockopt(zmq.EVENTS, zmq.POLLIN) self.assertEqual(p.getsockopt(zmq.EVENTS), zmq.POLLOUT) self.assertRaisesErrno(zmq.EINVAL, p.setsockopt,zmq.EVENTS, 2**7-1) self.assertEqual(p.getsockopt(zmq.TYPE), p.socket_type) self.assertEqual(p.getsockopt(zmq.TYPE), zmq.PUB) self.assertEqual(s.getsockopt(zmq.TYPE), s.socket_type) self.assertEqual(s.getsockopt(zmq.TYPE), zmq.SUB) # check for overflow / wrong type: errors = [] backref = {} constants = zmq.constants for name in constants.__all__: value = getattr(constants, name) if isinstance(value, int): backref[value] = name for opt in zmq.constants.int_sockopts.union(zmq.constants.int64_sockopts): sopt = backref[opt] if sopt.startswith(( 'ROUTER', 'XPUB', 'TCP', 'FAIL', 'REQ_', 'CURVE_', 'PROBE_ROUTER', 'IPC_FILTER', 'GSSAPI', 'STREAM_', 'VMCI_BUFFER_SIZE', 'VMCI_BUFFER_MIN_SIZE', 'VMCI_BUFFER_MAX_SIZE', 'VMCI_CONNECT_TIMEOUT', )): # some sockopts are write-only continue try: n = p.getsockopt(opt) except zmq.ZMQError as e: errors.append("getsockopt(zmq.%s) raised '%s'."%(sopt, e)) else: if n > 2**31: errors.append("getsockopt(zmq.%s) returned a ridiculous value." " It is probably the wrong type."%sopt) if errors: self.fail('\n'.join([''] + errors)) def test_bad_sockopts(self): """Test that appropriate errors are raised on bad socket options""" s = self.context.socket(zmq.PUB) self.sockets.append(s) s.setsockopt(zmq.LINGER, 0) # unrecognized int sockopts pass through to libzmq, and should raise EINVAL self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, 9999, 5) self.assertRaisesErrno(zmq.EINVAL, s.getsockopt, 9999) # but only int sockopts are allowed through this way, otherwise raise a TypeError self.assertRaises(TypeError, s.setsockopt, 9999, b"5") # some sockopts are valid in general, but not on every socket: self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, zmq.SUBSCRIBE, b'hi') def test_sockopt_roundtrip(self): "test set/getsockopt roundtrip." p = self.context.socket(zmq.PUB) self.sockets.append(p) p.setsockopt(zmq.LINGER, 11) self.assertEqual(p.getsockopt(zmq.LINGER), 11) def test_send_unicode(self): "test sending unicode objects" a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) self.sockets.extend([a,b]) u = "çπ§" if str is not unicode: u = u.decode('utf8') self.assertRaises(TypeError, a.send, u,copy=False) self.assertRaises(TypeError, a.send, u,copy=True) a.send_unicode(u) s = b.recv() self.assertEqual(s,u.encode('utf8')) self.assertEqual(s.decode('utf8'),u) a.send_unicode(u,encoding='utf16') s = b.recv_unicode(encoding='utf16') self.assertEqual(s,u) def test_send_multipart_check_type(self): "check type on all frames in send_multipart" a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) self.sockets.extend([a,b]) self.assertRaises(TypeError, a.send_multipart, [b'a', 5]) a.send_multipart([b'b']) rcvd = self.recv_multipart(b) self.assertEqual(rcvd, [b'b']) @skip_pypy def test_tracker(self): "test the MessageTracker object for tracking when zmq is done with a buffer" addr = 'tcp://127.0.0.1' # get a port: sock = socket.socket() sock.bind(('127.0.0.1', 0)) port = sock.getsockname()[1] iface = "%s:%i" % (addr, port) sock.close() time.sleep(0.1) a = self.context.socket(zmq.PUSH) b = self.context.socket(zmq.PULL) self.sockets.extend([a,b]) a.connect(iface) time.sleep(0.1) p1 = a.send(b'something', copy=False, track=True) self.assertTrue(isinstance(p1, zmq.MessageTracker)) self.assertFalse(p1.done) p2 = a.send_multipart([b'something', b'else'], copy=False, track=True) self.assert_(isinstance(p2, zmq.MessageTracker)) self.assertEqual(p2.done, False) self.assertEqual(p1.done, False) b.bind(iface) msg = self.recv_multipart(b) for i in range(10): if p1.done: break time.sleep(0.1) self.assertEqual(p1.done, True) self.assertEqual(msg, [b'something']) msg = self.recv_multipart(b) for i in range(10): if p2.done: break time.sleep(0.1) self.assertEqual(p2.done, True) self.assertEqual(msg, [b'something', b'else']) m = zmq.Frame(b"again", track=True) self.assertEqual(m.tracker.done, False) p1 = a.send(m, copy=False) p2 = a.send(m, copy=False) self.assertEqual(m.tracker.done, False) self.assertEqual(p1.done, False) self.assertEqual(p2.done, False) msg = self.recv_multipart(b) self.assertEqual(m.tracker.done, False) self.assertEqual(msg, [b'again']) msg = self.recv_multipart(b) self.assertEqual(m.tracker.done, False) self.assertEqual(msg, [b'again']) self.assertEqual(p1.done, False) self.assertEqual(p2.done, False) pm = m.tracker del m for i in range(10): if p1.done: break time.sleep(0.1) self.assertEqual(p1.done, True) self.assertEqual(p2.done, True) m = zmq.Frame(b'something', track=False) self.assertRaises(ValueError, a.send, m, copy=False, track=True) def test_close(self): ctx = self.Context() s = ctx.socket(zmq.PUB) s.close() self.assertRaisesErrno(zmq.ENOTSOCK, s.bind, b'') self.assertRaisesErrno(zmq.ENOTSOCK, s.connect, b'') self.assertRaisesErrno(zmq.ENOTSOCK, s.setsockopt, zmq.SUBSCRIBE, b'') self.assertRaisesErrno(zmq.ENOTSOCK, s.send, b'asdf') self.assertRaisesErrno(zmq.ENOTSOCK, s.recv) del ctx def test_attr(self): """set setting/getting sockopts as attributes""" s = self.context.socket(zmq.DEALER) self.sockets.append(s) linger = 10 s.linger = linger self.assertEqual(linger, s.linger) self.assertEqual(linger, s.getsockopt(zmq.LINGER)) self.assertEqual(s.fd, s.getsockopt(zmq.FD)) def test_bad_attr(self): s = self.context.socket(zmq.DEALER) self.sockets.append(s) try: s.apple='foo' except AttributeError: pass else: self.fail("bad setattr should have raised AttributeError") try: s.apple except AttributeError: pass else: self.fail("bad getattr should have raised AttributeError") def test_subclass(self): """subclasses can assign attributes""" class S(zmq.Socket): a = None def __init__(self, *a, **kw): self.a=-1 super(S, self).__init__(*a, **kw) s = S(self.context, zmq.REP) self.sockets.append(s) self.assertEqual(s.a, -1) s.a=1 self.assertEqual(s.a, 1) a=s.a self.assertEqual(a, 1) def test_recv_multipart(self): a,b = self.create_bound_pair() msg = b'hi' for i in range(3): a.send(msg) time.sleep(0.1) for i in range(3): self.assertEqual(self.recv_multipart(b), [msg]) def test_close_after_destroy(self): """s.close() after ctx.destroy() should be fine""" ctx = self.Context() s = ctx.socket(zmq.REP) ctx.destroy() # reaper is not instantaneous time.sleep(1e-2) s.close() self.assertTrue(s.closed) def test_poll(self): a,b = self.create_bound_pair() tic = time.time() evt = a.poll(50) self.assertEqual(evt, 0) evt = a.poll(50, zmq.POLLOUT) self.assertEqual(evt, zmq.POLLOUT) msg = b'hi' a.send(msg) evt = b.poll(50) self.assertEqual(evt, zmq.POLLIN) msg2 = self.recv(b) evt = b.poll(50) self.assertEqual(evt, 0) self.assertEqual(msg2, msg) def test_ipc_path_max_length(self): """IPC_PATH_MAX_LEN is a sensible value""" if zmq.IPC_PATH_MAX_LEN == 0: raise SkipTest("IPC_PATH_MAX_LEN undefined") msg = "Surprising value for IPC_PATH_MAX_LEN: %s" % zmq.IPC_PATH_MAX_LEN self.assertTrue(zmq.IPC_PATH_MAX_LEN > 30, msg) self.assertTrue(zmq.IPC_PATH_MAX_LEN < 1025, msg) def test_ipc_path_max_length_msg(self): if zmq.IPC_PATH_MAX_LEN == 0: raise SkipTest("IPC_PATH_MAX_LEN undefined") s = self.context.socket(zmq.PUB) self.sockets.append(s) try: s.bind('ipc://{0}'.format('a' * (zmq.IPC_PATH_MAX_LEN + 1))) except zmq.ZMQError as e: self.assertTrue(str(zmq.IPC_PATH_MAX_LEN) in e.strerror) def test_hwm(self): zmq3 = zmq.zmq_version_info()[0] >= 3 for stype in (zmq.PUB, zmq.ROUTER, zmq.SUB, zmq.REQ, zmq.DEALER): s = self.context.socket(stype) s.hwm = 100 self.assertEqual(s.hwm, 100) if zmq3: try: self.assertEqual(s.sndhwm, 100) except AttributeError: pass try: self.assertEqual(s.rcvhwm, 100) except AttributeError: pass s.close() def test_copy(self): s = self.socket(zmq.PUB) scopy = copy.copy(s) sdcopy = copy.deepcopy(s) self.assert_(scopy._shadow) self.assert_(sdcopy._shadow) self.assertEqual(s.underlying, scopy.underlying) self.assertEqual(s.underlying, sdcopy.underlying) s.close() def test_shadow(self): p = self.socket(zmq.PUSH) p.bind("tcp://127.0.0.1:5555") p2 = zmq.Socket.shadow(p.underlying) self.assertEqual(p.underlying, p2.underlying) s = self.socket(zmq.PULL) s2 = zmq.Socket.shadow(s.underlying) self.assertNotEqual(s.underlying, p.underlying) self.assertEqual(s.underlying, s2.underlying) s2.connect("tcp://127.0.0.1:5555") sent = b'hi' p2.send(sent) rcvd = self.recv(s2) self.assertEqual(rcvd, sent) def test_shadow_pyczmq(self): try: from pyczmq import zctx, zsocket except Exception: raise SkipTest("Requires pyczmq") ctx = zctx.new() ca = zsocket.new(ctx, zmq.PUSH) cb = zsocket.new(ctx, zmq.PULL) a = zmq.Socket.shadow(ca) b = zmq.Socket.shadow(cb) a.bind("inproc://a") b.connect("inproc://a") a.send(b'hi') rcvd = self.recv(b) self.assertEqual(rcvd, b'hi') def test_subscribe_method(self): pub, sub = self.create_bound_pair(zmq.PUB, zmq.SUB) sub.subscribe('prefix') sub.subscribe = 'c' p = zmq.Poller() p.register(sub, zmq.POLLIN) # wait for subscription handshake for i in range(100): pub.send(b'canary') events = p.poll(250) if events: break self.recv(sub) pub.send(b'prefixmessage') msg = self.recv(sub) self.assertEqual(msg, b'prefixmessage') sub.unsubscribe('prefix') pub.send(b'prefixmessage') events = p.poll(1000) self.assertEqual(events, []) # Travis can't handle how much memory PyPy uses on this test @mark.skipif( ( pypy and on_travis ) or ( sys.maxsize < 2**32 ), reason="only run on 64b and not on Travis." ) def test_large_send(self): try: buf = os.urandom(1) * (2**31 + 1) except MemoryError: raise SkipTest() a, b = self.create_bound_pair() a.send(buf, copy=False) rcvd = b.recv() assert rcvd == buf if have_gevent: import gevent class TestSocketGreen(GreenTest, TestSocket): test_bad_attr = GreenTest.skip_green test_close_after_destroy = GreenTest.skip_green def test_timeout(self): a,b = self.create_bound_pair() g = gevent.spawn_later(0.5, lambda: a.send(b'hi')) timeout = gevent.Timeout(0.1) timeout.start() self.assertRaises(gevent.Timeout, b.recv) g.kill() @mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO") def test_warn_set_timeo(self): s = self.context.socket(zmq.REQ) with warnings.catch_warnings(record=True) as w: s.rcvtimeo = 5 s.close() self.assertEqual(len(w), 1) self.assertEqual(w[0].category, UserWarning) @mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO") def test_warn_get_timeo(self): s = self.context.socket(zmq.REQ) with warnings.catch_warnings(record=True) as w: s.sndtimeo s.close() self.assertEqual(len(w), 1) self.assertEqual(w[0].category, UserWarning) pyzmq-16.0.2/zmq/tests/test_ssh.py000066400000000000000000000003521301503633700171400ustar00rootroot00000000000000from zmq.ssh.tunnel import select_random_ports def test_random_ports(): for i in range(4096): ports = select_random_ports(10) assert len(ports) == 10 for p in ports: assert ports.count(p) == 1 pyzmq-16.0.2/zmq/tests/test_version.py000066400000000000000000000024661301503633700200400ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from unittest import TestCase import zmq from zmq.sugar import version class TestVersion(TestCase): def test_pyzmq_version(self): vs = zmq.pyzmq_version() vs2 = zmq.__version__ self.assertTrue(isinstance(vs, str)) if zmq.__revision__: self.assertEqual(vs, '@'.join(vs2, zmq.__revision__)) else: self.assertEqual(vs, vs2) if version.VERSION_EXTRA: self.assertTrue(version.VERSION_EXTRA in vs) self.assertTrue(version.VERSION_EXTRA in vs2) def test_pyzmq_version_info(self): info = zmq.pyzmq_version_info() self.assertTrue(isinstance(info, tuple)) for n in info[:3]: self.assertTrue(isinstance(n, int)) if version.VERSION_EXTRA: self.assertEqual(len(info), 4) self.assertEqual(info[-1], float('inf')) else: self.assertEqual(len(info), 3) def test_zmq_version_info(self): info = zmq.zmq_version_info() self.assertTrue(isinstance(info, tuple)) for n in info[:3]: self.assertTrue(isinstance(n, int)) def test_zmq_version(self): v = zmq.zmq_version() self.assertTrue(isinstance(v, str)) pyzmq-16.0.2/zmq/tests/test_win32_shim.py000066400000000000000000000032621301503633700203300ustar00rootroot00000000000000from __future__ import print_function import os from functools import wraps from zmq.tests import BaseZMQTestCase from zmq.utils.win32 import allow_interrupt def count_calls(f): @wraps(f) def _(*args, **kwds): try: return f(*args, **kwds) finally: _.__calls__ += 1 _.__calls__ = 0 return _ class TestWindowsConsoleControlHandler(BaseZMQTestCase): def test_handler(self): @count_calls def interrupt_polling(): print('Caught CTRL-C!') if os.name == 'nt': from ctypes import windll from ctypes.wintypes import BOOL, DWORD kernel32 = windll.LoadLibrary('kernel32') # GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD) GenerateConsoleCtrlEvent.restype = BOOL try: # Simulate CTRL-C event while handler is active. with allow_interrupt(interrupt_polling): result = GenerateConsoleCtrlEvent(0, 0) if result == 0: raise WindowsError except KeyboardInterrupt: pass else: self.fail('Expecting `KeyboardInterrupt` exception!') # Make sure our handler was called. self.assertEqual(interrupt_polling.__calls__, 1) else: # On non-Windows systems, this utility is just a no-op! with allow_interrupt(interrupt_polling): pass self.assertEqual(interrupt_polling.__calls__, 0) pyzmq-16.0.2/zmq/tests/test_z85.py000066400000000000000000000042701301503633700167740ustar00rootroot00000000000000# -*- coding: utf8 -*- """Test Z85 encoding confirm values and roundtrip with test values from the reference implementation. """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from unittest import TestCase from zmq.utils import z85 class TestZ85(TestCase): def test_client_public(self): client_public = \ b"\xBB\x88\x47\x1D\x65\xE2\x65\x9B" \ b"\x30\xC5\x5A\x53\x21\xCE\xBB\x5A" \ b"\xAB\x2B\x70\xA3\x98\x64\x5C\x26" \ b"\xDC\xA2\xB2\xFC\xB4\x3F\xC5\x18" encoded = z85.encode(client_public) self.assertEqual(encoded, b"Yne@$w-vo}U?@Lns47E1%kR.o@n%FcmmsL/@{H8]yf7") decoded = z85.decode(encoded) self.assertEqual(decoded, server_public) def test_server_secret(self): server_secret = \ b"\x8E\x0B\xDD\x69\x76\x28\xB9\x1D" \ b"\x8F\x24\x55\x87\xEE\x95\xC5\xB0" \ b"\x4D\x48\x96\x3F\x79\x25\x98\x77" \ b"\xB4\x9C\xD9\x06\x3A\xEA\xD3\xB7" encoded = z85.encode(server_secret) self.assertEqual(encoded, b"JTKVSB%%)wK0E.X)V>+}o?pNmC{O&4W4b!Ni{Lh6") decoded = z85.decode(encoded) self.assertEqual(decoded, server_secret) pyzmq-16.0.2/zmq/tests/test_zmqstream.py000066400000000000000000000017021301503633700203660ustar00rootroot00000000000000# -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import sys import time from unittest import TestCase import zmq from zmq.eventloop import ioloop, zmqstream class TestZMQStream(TestCase): def setUp(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.REP) self.loop = ioloop.IOLoop.instance() self.stream = zmqstream.ZMQStream(self.socket) def tearDown(self): self.socket.close() self.context.term() def test_callable_check(self): """Ensure callable check works (py3k).""" self.stream.on_send(lambda *args: None) self.stream.on_recv(lambda *args: None) self.assertRaises(AssertionError, self.stream.on_recv, 1) self.assertRaises(AssertionError, self.stream.on_send, 1) self.assertRaises(AssertionError, self.stream.on_recv, zmq) pyzmq-16.0.2/zmq/utils/000077500000000000000000000000001301503633700147305ustar00rootroot00000000000000pyzmq-16.0.2/zmq/utils/__init__.py000066400000000000000000000000001301503633700170270ustar00rootroot00000000000000pyzmq-16.0.2/zmq/utils/buffers.pxd000066400000000000000000000232051301503633700171030ustar00rootroot00000000000000"""Python version-independent methods for C/Python buffers. This file was copied and adapted from mpi4py. Authors ------- * MinRK """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Lisandro Dalcin # All rights reserved. # Used under BSD License: http://www.opensource.org/licenses/bsd-license.php # # Retrieval: # Jul 23, 2010 18:00 PST (r539) # http://code.google.com/p/mpi4py/source/browse/trunk/src/MPI/asbuffer.pxi # # Modifications from original: # Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Python includes. #----------------------------------------------------------------------------- # get version-independent aliases: cdef extern from "pyversion_compat.h": pass # Python 3 buffer interface (PEP 3118) cdef extern from "Python.h": int PY_MAJOR_VERSION int PY_MINOR_VERSION ctypedef int Py_ssize_t ctypedef struct PyMemoryViewObject: pass ctypedef struct Py_buffer: void *buf Py_ssize_t len int readonly char *format int ndim Py_ssize_t *shape Py_ssize_t *strides Py_ssize_t *suboffsets Py_ssize_t itemsize void *internal cdef enum: PyBUF_SIMPLE PyBUF_WRITABLE PyBUF_FORMAT PyBUF_ANY_CONTIGUOUS int PyObject_CheckBuffer(object) int PyObject_GetBuffer(object, Py_buffer *, int) except -1 void PyBuffer_Release(Py_buffer *) int PyBuffer_FillInfo(Py_buffer *view, object obj, void *buf, Py_ssize_t len, int readonly, int infoflags) except -1 object PyMemoryView_FromBuffer(Py_buffer *info) object PyMemoryView_FromObject(object) # Python 2 buffer interface (legacy) cdef extern from "Python.h": ctypedef void const_void "const void" Py_ssize_t Py_END_OF_BUFFER int PyObject_CheckReadBuffer(object) int PyObject_AsReadBuffer (object, const_void **, Py_ssize_t *) except -1 int PyObject_AsWriteBuffer(object, void **, Py_ssize_t *) except -1 object PyBuffer_FromMemory(void *ptr, Py_ssize_t s) object PyBuffer_FromReadWriteMemory(void *ptr, Py_ssize_t s) object PyBuffer_FromObject(object, Py_ssize_t offset, Py_ssize_t size) object PyBuffer_FromReadWriteObject(object, Py_ssize_t offset, Py_ssize_t size) #----------------------------------------------------------------------------- # asbuffer: C buffer from python object #----------------------------------------------------------------------------- cdef inline int memoryview_available(): return PY_MAJOR_VERSION >= 3 or (PY_MAJOR_VERSION >=2 and PY_MINOR_VERSION >= 7) cdef inline int oldstyle_available(): return PY_MAJOR_VERSION < 3 cdef inline int check_buffer(object ob): """Version independent check for whether an object is a buffer. Parameters ---------- object : object Any Python object Returns ------- int : 0 if no buffer interface, 3 if newstyle buffer interface, 2 if oldstyle. """ if PyObject_CheckBuffer(ob): return 3 if oldstyle_available(): return PyObject_CheckReadBuffer(ob) and 2 return 0 cdef inline object asbuffer(object ob, int writable, int format, void **base, Py_ssize_t *size, Py_ssize_t *itemsize): """Turn an object into a C buffer in a Python version-independent way. Parameters ---------- ob : object The object to be turned into a buffer. Must provide a Python Buffer interface writable : int Whether the resulting buffer should be allowed to write to the object. format : int The format of the buffer. See Python buffer docs. base : void ** The pointer that will be used to store the resulting C buffer. size : Py_ssize_t * The size of the buffer(s). itemsize : Py_ssize_t * The size of an item, if the buffer is non-contiguous. Returns ------- An object describing the buffer format. Generally a str, such as 'B'. """ cdef void *bptr = NULL cdef Py_ssize_t blen = 0, bitemlen = 0 cdef Py_buffer view cdef int flags = PyBUF_SIMPLE cdef int mode = 0 bfmt = None mode = check_buffer(ob) if mode == 0: raise TypeError("%r does not provide a buffer interface."%ob) if mode == 3: flags = PyBUF_ANY_CONTIGUOUS if writable: flags |= PyBUF_WRITABLE if format: flags |= PyBUF_FORMAT PyObject_GetBuffer(ob, &view, flags) bptr = view.buf blen = view.len if format: if view.format != NULL: bfmt = view.format bitemlen = view.itemsize PyBuffer_Release(&view) else: # oldstyle if writable: PyObject_AsWriteBuffer(ob, &bptr, &blen) else: PyObject_AsReadBuffer(ob, &bptr, &blen) if format: try: # numpy.ndarray dtype = ob.dtype bfmt = dtype.char bitemlen = dtype.itemsize except AttributeError: try: # array.array bfmt = ob.typecode bitemlen = ob.itemsize except AttributeError: if isinstance(ob, bytes): bfmt = b"B" bitemlen = 1 else: # nothing found bfmt = None bitemlen = 0 if base: base[0] = bptr if size: size[0] = blen if itemsize: itemsize[0] = bitemlen if PY_MAJOR_VERSION >= 3 and bfmt is not None: return bfmt.decode('ascii') return bfmt cdef inline object asbuffer_r(object ob, void **base, Py_ssize_t *size): """Wrapper for standard calls to asbuffer with a readonly buffer.""" asbuffer(ob, 0, 0, base, size, NULL) return ob cdef inline object asbuffer_w(object ob, void **base, Py_ssize_t *size): """Wrapper for standard calls to asbuffer with a writable buffer.""" asbuffer(ob, 1, 0, base, size, NULL) return ob #------------------------------------------------------------------------------ # frombuffer: python buffer/view from C buffer #------------------------------------------------------------------------------ cdef inline object frombuffer_3(void *ptr, Py_ssize_t s, int readonly): """Python 3 version of frombuffer. This is the Python 3 model, but will work on Python >= 2.6. Currently, we use it only on >= 3.0. """ cdef Py_buffer pybuf cdef Py_ssize_t *shape = [s] cdef str astr="" PyBuffer_FillInfo(&pybuf, astr, ptr, s, readonly, PyBUF_SIMPLE) pybuf.format = "B" pybuf.shape = shape pybuf.ndim = 1 return PyMemoryView_FromBuffer(&pybuf) cdef inline object frombuffer_2(void *ptr, Py_ssize_t s, int readonly): """Python 2 version of frombuffer. This must be used for Python <= 2.6, but we use it for all Python < 3. """ if oldstyle_available(): if readonly: return PyBuffer_FromMemory(ptr, s) else: return PyBuffer_FromReadWriteMemory(ptr, s) else: raise NotImplementedError("Old style buffers not available.") cdef inline object frombuffer(void *ptr, Py_ssize_t s, int readonly): """Create a Python Buffer/View of a C array. Parameters ---------- ptr : void * Pointer to the array to be copied. s : size_t Length of the buffer. readonly : int whether the resulting object should be allowed to write to the buffer. Returns ------- Python Buffer/View of the C buffer. """ # oldstyle first priority for now if oldstyle_available(): return frombuffer_2(ptr, s, readonly) else: return frombuffer_3(ptr, s, readonly) cdef inline object frombuffer_r(void *ptr, Py_ssize_t s): """Wrapper for readonly view frombuffer.""" return frombuffer(ptr, s, 1) cdef inline object frombuffer_w(void *ptr, Py_ssize_t s): """Wrapper for writable view frombuffer.""" return frombuffer(ptr, s, 0) #------------------------------------------------------------------------------ # viewfromobject: python buffer/view from python object, refcounts intact # frombuffer(asbuffer(obj)) would lose track of refs #------------------------------------------------------------------------------ cdef inline object viewfromobject(object obj, int readonly): """Construct a Python Buffer/View object from another Python object. This work in a Python version independent manner. Parameters ---------- obj : object The input object to be cast as a buffer readonly : int Whether the result should be prevented from overwriting the original. Returns ------- Buffer/View of the original object. """ if not memoryview_available(): if readonly: return PyBuffer_FromObject(obj, 0, Py_END_OF_BUFFER) else: return PyBuffer_FromReadWriteObject(obj, 0, Py_END_OF_BUFFER) else: return PyMemoryView_FromObject(obj) cdef inline object viewfromobject_r(object obj): """Wrapper for readonly viewfromobject.""" return viewfromobject(obj, 1) cdef inline object viewfromobject_w(object obj): """Wrapper for writable viewfromobject.""" return viewfromobject(obj, 0) pyzmq-16.0.2/zmq/utils/constant_names.py000066400000000000000000000173561301503633700203320ustar00rootroot00000000000000"""0MQ Constant names""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. # dictionaries of constants new or removed in particular versions new_in = { (2,2,0) : [ 'RCVTIMEO', 'SNDTIMEO', ], (3,2,2) : [ # errnos 'EMSGSIZE', 'EAFNOSUPPORT', 'ENETUNREACH', 'ECONNABORTED', 'ECONNRESET', 'ENOTCONN', 'ETIMEDOUT', 'EHOSTUNREACH', 'ENETRESET', # ctx opts 'IO_THREADS', 'MAX_SOCKETS', 'IO_THREADS_DFLT', 'MAX_SOCKETS_DFLT', # socket opts 'IPV4ONLY', 'LAST_ENDPOINT', 'ROUTER_BEHAVIOR', 'ROUTER_MANDATORY', 'FAIL_UNROUTABLE', 'TCP_KEEPALIVE', 'TCP_KEEPALIVE_CNT', 'TCP_KEEPALIVE_IDLE', 'TCP_KEEPALIVE_INTVL', 'DELAY_ATTACH_ON_CONNECT', 'XPUB_VERBOSE', # msg opts 'MORE', 'EVENT_CONNECTED', 'EVENT_CONNECT_DELAYED', 'EVENT_CONNECT_RETRIED', 'EVENT_LISTENING', 'EVENT_BIND_FAILED', 'EVENT_ACCEPTED', 'EVENT_ACCEPT_FAILED', 'EVENT_CLOSED', 'EVENT_CLOSE_FAILED', 'EVENT_DISCONNECTED', 'EVENT_ALL', ], (4,0,0) : [ # socket types 'STREAM', # socket opts 'IMMEDIATE', 'ROUTER_RAW', 'IPV6', 'MECHANISM', 'PLAIN_SERVER', 'PLAIN_USERNAME', 'PLAIN_PASSWORD', 'CURVE_SERVER', 'CURVE_PUBLICKEY', 'CURVE_SECRETKEY', 'CURVE_SERVERKEY', 'PROBE_ROUTER', 'REQ_RELAXED', 'REQ_CORRELATE', 'CONFLATE', 'ZAP_DOMAIN', # security 'NULL', 'PLAIN', 'CURVE', # events 'EVENT_MONITOR_STOPPED', ], (4,1,0) : [ # ctx opts 'SOCKET_LIMIT', 'THREAD_PRIORITY', 'THREAD_PRIORITY_DFLT', 'THREAD_SCHED_POLICY', 'THREAD_SCHED_POLICY_DFLT', # socket opts 'ROUTER_HANDOVER', 'TOS', 'IPC_FILTER_PID', 'IPC_FILTER_UID', 'IPC_FILTER_GID', 'CONNECT_RID', 'GSSAPI_SERVER', 'GSSAPI_PRINCIPAL', 'GSSAPI_SERVICE_PRINCIPAL', 'GSSAPI_PLAINTEXT', 'HANDSHAKE_IVL', 'XPUB_NODROP', 'SOCKS_PROXY', # msg opts 'SRCFD', 'SHARED', # security 'GSSAPI', ], (4,2,0) : [ # polling 'POLLPRI', ] } draft_in = { (4,2,0): [ # socket types 'SERVER', 'CLIENT', 'RADIO', 'DISH', 'GATHER', 'SCATTER', 'DGRAM', # ctx options 'BLOCKY', # socket options 'XPUB_MANUAL', 'XPUB_WELCOME_MSG', 'STREAM_NOTIFY', 'INVERT_MATCHING', 'HEARTBEAT_IVL', 'HEARTBEAT_TTL', 'HEARTBEAT_TIMEOUT', 'XPUB_VERBOSER', 'CONNECT_TIMEOUT', 'TCP_MAXRT', 'THREAD_SAFE', 'MULTICAST_MAXTPDU', 'VMCI_BUFFER_SIZE', 'VMCI_BUFFER_MIN_SIZE', 'VMCI_BUFFER_MAX_SIZE', 'VMCI_CONNECT_TIMEOUT', 'USE_FD', ] } removed_in = { (3,2,2) : [ 'UPSTREAM', 'DOWNSTREAM', 'HWM', 'SWAP', 'MCAST_LOOP', 'RECOVERY_IVL_MSEC', ] } # collections of zmq constant names based on their role # base names have no specific use # opt names are validated in get/set methods of various objects base_names = [ # base 'VERSION', 'VERSION_MAJOR', 'VERSION_MINOR', 'VERSION_PATCH', 'NOBLOCK', 'DONTWAIT', 'POLLIN', 'POLLOUT', 'POLLERR', 'POLLPRI', 'SNDMORE', 'STREAMER', 'FORWARDER', 'QUEUE', 'IO_THREADS_DFLT', 'MAX_SOCKETS_DFLT', 'POLLITEMS_DFLT', 'THREAD_PRIORITY_DFLT', 'THREAD_SCHED_POLICY_DFLT', # socktypes 'PAIR', 'PUB', 'SUB', 'REQ', 'REP', 'DEALER', 'ROUTER', 'XREQ', 'XREP', 'PULL', 'PUSH', 'XPUB', 'XSUB', 'UPSTREAM', 'DOWNSTREAM', 'STREAM', 'SERVER', 'CLIENT', 'RADIO', 'DISH', 'GATHER', 'SCATTER', 'DGRAM', # events 'EVENT_CONNECTED', 'EVENT_CONNECT_DELAYED', 'EVENT_CONNECT_RETRIED', 'EVENT_LISTENING', 'EVENT_BIND_FAILED', 'EVENT_ACCEPTED', 'EVENT_ACCEPT_FAILED', 'EVENT_CLOSED', 'EVENT_CLOSE_FAILED', 'EVENT_DISCONNECTED', 'EVENT_ALL', 'EVENT_MONITOR_STOPPED', # security 'NULL', 'PLAIN', 'CURVE', 'GSSAPI', ## ERRNO # Often used (these are else in errno.) 'EAGAIN', 'EINVAL', 'EFAULT', 'ENOMEM', 'ENODEV', 'EMSGSIZE', 'EAFNOSUPPORT', 'ENETUNREACH', 'ECONNABORTED', 'ECONNRESET', 'ENOTCONN', 'ETIMEDOUT', 'EHOSTUNREACH', 'ENETRESET', # For Windows compatibility 'HAUSNUMERO', 'ENOTSUP', 'EPROTONOSUPPORT', 'ENOBUFS', 'ENETDOWN', 'EADDRINUSE', 'EADDRNOTAVAIL', 'ECONNREFUSED', 'EINPROGRESS', 'ENOTSOCK', # 0MQ Native 'EFSM', 'ENOCOMPATPROTO', 'ETERM', 'EMTHREAD', ] int64_sockopt_names = [ 'AFFINITY', 'MAXMSGSIZE', # sockopts removed in 3.0.0 'HWM', 'SWAP', 'MCAST_LOOP', 'RECOVERY_IVL_MSEC', # new in 4.2 'VMCI_BUFFER_SIZE', 'VMCI_BUFFER_MIN_SIZE', 'VMCI_BUFFER_MAX_SIZE', ] bytes_sockopt_names = [ 'IDENTITY', 'SUBSCRIBE', 'UNSUBSCRIBE', 'LAST_ENDPOINT', 'TCP_ACCEPT_FILTER', 'PLAIN_USERNAME', 'PLAIN_PASSWORD', 'CURVE_PUBLICKEY', 'CURVE_SECRETKEY', 'CURVE_SERVERKEY', 'ZAP_DOMAIN', 'CONNECT_RID', 'GSSAPI_PRINCIPAL', 'GSSAPI_SERVICE_PRINCIPAL', 'SOCKS_PROXY', 'XPUB_WELCOME_MSG', ] fd_sockopt_names = [ 'FD', ] int_sockopt_names = [ # sockopts 'RECONNECT_IVL_MAX', # sockopts new in 2.2.0 'SNDTIMEO', 'RCVTIMEO', # new in 3.x 'SNDHWM', 'RCVHWM', 'MULTICAST_HOPS', 'IPV4ONLY', 'ROUTER_BEHAVIOR', 'TCP_KEEPALIVE', 'TCP_KEEPALIVE_CNT', 'TCP_KEEPALIVE_IDLE', 'TCP_KEEPALIVE_INTVL', 'DELAY_ATTACH_ON_CONNECT', 'XPUB_VERBOSE', 'EVENTS', 'TYPE', 'LINGER', 'RECONNECT_IVL', 'BACKLOG', 'ROUTER_MANDATORY', 'FAIL_UNROUTABLE', 'ROUTER_RAW', 'IMMEDIATE', 'IPV6', 'MECHANISM', 'PLAIN_SERVER', 'CURVE_SERVER', 'PROBE_ROUTER', 'REQ_RELAXED', 'REQ_CORRELATE', 'CONFLATE', 'ROUTER_HANDOVER', 'TOS', 'IPC_FILTER_PID', 'IPC_FILTER_UID', 'IPC_FILTER_GID', 'GSSAPI_SERVER', 'GSSAPI_PLAINTEXT', 'HANDSHAKE_IVL', 'XPUB_NODROP', # new in 4.2 'XPUB_MANUAL', 'STREAM_NOTIFY', 'INVERT_MATCHING', 'XPUB_VERBOSER', 'HEARTBEAT_IVL', 'HEARTBEAT_TTL', 'HEARTBEAT_TIMEOUT', 'CONNECT_TIMEOUT', 'TCP_MAXRT', 'THREAD_SAFE', 'MULTICAST_MAXTPDU', 'VMCI_CONNECT_TIMEOUT', 'USE_FD', ] switched_sockopt_names = [ 'RATE', 'RECOVERY_IVL', 'SNDBUF', 'RCVBUF', 'RCVMORE', ] ctx_opt_names = [ 'IO_THREADS', 'MAX_SOCKETS', 'SOCKET_LIMIT', 'THREAD_PRIORITY', 'THREAD_SCHED_POLICY', 'BLOCKY', ] msg_opt_names = [ 'MORE', 'SRCFD', 'SHARED', ] from itertools import chain all_names = list(chain( base_names, ctx_opt_names, bytes_sockopt_names, fd_sockopt_names, int_sockopt_names, int64_sockopt_names, switched_sockopt_names, msg_opt_names, )) del chain def no_prefix(name): """does the given constant have a ZMQ_ prefix?""" return name.startswith('E') and not name.startswith('EVENT') pyzmq-16.0.2/zmq/utils/garbage.py000066400000000000000000000125271301503633700167010ustar00rootroot00000000000000"""Garbage collection thread for representing zmq refcount of Python objects used in zero-copy sends. """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import atexit import struct from os import getpid from collections import namedtuple from threading import Thread, Event, Lock import warnings import zmq gcref = namedtuple('gcref', ['obj', 'event']) class GarbageCollectorThread(Thread): """Thread in which garbage collection actually happens.""" def __init__(self, gc): super(GarbageCollectorThread, self).__init__() self.gc = gc self.daemon = True self.pid = getpid() self.ready = Event() def run(self): # detect fork at beginning of the thread if getpid is None or getpid() != self.pid: self.ready.set() return try: s = self.gc.context.socket(zmq.PULL) s.linger = 0 s.bind(self.gc.url) finally: self.ready.set() while True: # detect fork if getpid is None or getpid() != self.pid: return msg = s.recv() if msg == b'DIE': break fmt = 'L' if len(msg) == 4 else 'Q' key = struct.unpack(fmt, msg)[0] tup = self.gc.refs.pop(key, None) if tup and tup.event: tup.event.set() del tup s.close() class GarbageCollector(object): """PyZMQ Garbage Collector Used for representing the reference held by libzmq during zero-copy sends. This object holds a dictionary, keyed by Python id, of the Python objects whose memory are currently in use by zeromq. When zeromq is done with the memory, it sends a message on an inproc PUSH socket containing the packed size_t (32 or 64-bit unsigned int), which is the key in the dict. When the PULL socket in the gc thread receives that message, the reference is popped from the dict, and any tracker events that should be signaled fire. """ refs = None _context = None _lock = None url = "inproc://pyzmq.gc.01" def __init__(self, context=None): super(GarbageCollector, self).__init__() self.refs = {} self.pid = None self.thread = None self._context = context self._lock = Lock() self._stay_down = False atexit.register(self._atexit) @property def context(self): if self._context is None: if Thread.__module__.startswith('gevent'): # gevent has monkey-patched Thread, use green Context from zmq import green self._context = green.Context() else: self._context = zmq.Context() return self._context @context.setter def context(self, ctx): if self.is_alive(): if self.refs: warnings.warn("Replacing gc context while gc is running", RuntimeWarning) self.stop() self._context = ctx def _atexit(self): """atexit callback sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers """ self._stay_down = True self.stop() def stop(self): """stop the garbage-collection thread""" if not self.is_alive(): return self._stop() def _stop(self): push = self.context.socket(zmq.PUSH) push.connect(self.url) push.send(b'DIE') push.close() self.thread.join() self.context.term() self.refs.clear() self.context = None def start(self): """Start a new garbage collection thread. Creates a new zmq Context used for garbage collection. Under most circumstances, this will only be called once per process. """ if self.thread is not None and self.pid != getpid(): # It's re-starting, must free earlier thread's context # since a fork probably broke it self._stop() self.pid = getpid() self.refs = {} self.thread = GarbageCollectorThread(self) self.thread.start() self.thread.ready.wait() def is_alive(self): """Is the garbage collection thread currently running? Includes checks for process shutdown or fork. """ if (getpid is None or getpid() != self.pid or self.thread is None or not self.thread.is_alive() ): return False return True def store(self, obj, event=None): """store an object and (optionally) event for zero-copy""" if not self.is_alive(): if self._stay_down: return 0 # safely start the gc thread # use lock and double check, # so we don't start multiple threads with self._lock: if not self.is_alive(): self.start() tup = gcref(obj, event) theid = id(tup) self.refs[theid] = tup return theid def __del__(self): if not self.is_alive(): return try: self.stop() except Exception as e: raise (e) gc = GarbageCollector() pyzmq-16.0.2/zmq/utils/getpid_compat.h000066400000000000000000000001471301503633700177220ustar00rootroot00000000000000#ifdef _WIN32 #include #define getpid _getpid #else #include #endif pyzmq-16.0.2/zmq/utils/interop.py000066400000000000000000000013051301503633700167610ustar00rootroot00000000000000"""Utils for interoperability with other libraries. Just CFFI pointer casting for now. """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. try: long except NameError: long = int # Python 3 def cast_int_addr(n): """Cast an address to a Python int This could be a Python integer or a CFFI pointer """ if isinstance(n, (int, long)): return n try: import cffi except ImportError: pass else: # from pyzmq, this is an FFI void * ffi = cffi.FFI() if isinstance(n, ffi.CData): return int(ffi.cast("size_t", n)) raise ValueError("Cannot cast %r to int" % n) pyzmq-16.0.2/zmq/utils/ipcmaxlen.h000066400000000000000000000007061301503633700170640ustar00rootroot00000000000000/* Platform-independant detection of IPC path max length Copyright (c) 2012 Godefroid Chapelle Distributed under the terms of the New BSD License. The full license is in the file COPYING.BSD, distributed as part of this software. */ #if defined(HAVE_SYS_UN_H) #include "sys/un.h" int get_ipc_path_max_len(void) { struct sockaddr_un *dummy; return sizeof(dummy->sun_path) - 1; } #else int get_ipc_path_max_len(void) { return 0; } #endif pyzmq-16.0.2/zmq/utils/jsonapi.py000066400000000000000000000025611301503633700167510ustar00rootroot00000000000000"""Priority based json library imports. Always serializes to bytes instead of unicode for zeromq compatibility on Python 2 and 3. Use ``jsonapi.loads()`` and ``jsonapi.dumps()`` for guaranteed symmetry. Priority: ``simplejson`` > ``jsonlib2`` > stdlib ``json`` ``jsonapi.loads/dumps`` provide kwarg-compatibility with stdlib json. ``jsonapi.jsonmod`` will be the module of the actual underlying implementation. """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from zmq.utils.strtypes import bytes, unicode jsonmod = None priority = ['simplejson', 'jsonlib2', 'json'] for mod in priority: try: jsonmod = __import__(mod) except ImportError: pass else: break def dumps(o, **kwargs): """Serialize object to JSON bytes (utf-8). See jsonapi.jsonmod.dumps for details on kwargs. """ if 'separators' not in kwargs: kwargs['separators'] = (',', ':') s = jsonmod.dumps(o, **kwargs) if isinstance(s, unicode): s = s.encode('utf8') return s def loads(s, **kwargs): """Load object from JSON bytes (utf-8). See jsonapi.jsonmod.loads for details on kwargs. """ if str is unicode and isinstance(s, bytes): s = s.decode('utf8') return jsonmod.loads(s, **kwargs) __all__ = ['jsonmod', 'dumps', 'loads'] pyzmq-16.0.2/zmq/utils/monitor.py000066400000000000000000000040501301503633700167700ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Module holding utility and convenience functions for zmq event monitoring.""" # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import struct import zmq from zmq.error import _check_version def parse_monitor_message(msg): """decode zmq_monitor event messages. Parameters ---------- msg : list(bytes) zmq multipart message that has arrived on a monitor PAIR socket. First frame is:: 16 bit event id 32 bit event value no padding Second frame is the endpoint as a bytestring Returns ------- event : dict event description as dict with the keys `event`, `value`, and `endpoint`. """ if len(msg) != 2 or len(msg[0]) != 6: raise RuntimeError("Invalid event message format: %s" % msg) event = {'event': struct.unpack("=hi", msg[0])[0], 'value': struct.unpack("=hi", msg[0])[1], 'endpoint': msg[1]} return event def recv_monitor_message(socket, flags=0): """Receive and decode the given raw message from the monitoring socket and return a dict. Requires libzmq ≥ 4.0 The returned dict will have the following entries: event : int, the event id as described in libzmq.zmq_socket_monitor value : int, the event value associated with the event, see libzmq.zmq_socket_monitor endpoint : string, the affected endpoint Parameters ---------- socket : zmq PAIR socket The PAIR socket (created by other.get_monitor_socket()) on which to recv the message flags : bitfield (int) standard zmq recv flags Returns ------- event : dict event description as dict with the keys `event`, `value`, and `endpoint`. """ _check_version((4,0), 'libzmq event API') # will always return a list msg = socket.recv_multipart(flags) # 4.0-style event API return parse_monitor_message(msg) __all__ = ['parse_monitor_message', 'recv_monitor_message'] pyzmq-16.0.2/zmq/utils/pyversion_compat.h000066400000000000000000000024541301503633700205070ustar00rootroot00000000000000#include "Python.h" #if PY_VERSION_HEX < 0x02070000 #define PyMemoryView_FromBuffer(info) (PyErr_SetString(PyExc_NotImplementedError, \ "new buffer interface is not available"), (PyObject *)NULL) #define PyMemoryView_FromObject(object) (PyErr_SetString(PyExc_NotImplementedError, \ "new buffer interface is not available"), (PyObject *)NULL) #endif #if PY_VERSION_HEX >= 0x03000000 // for buffers #define Py_END_OF_BUFFER ((Py_ssize_t) 0) #define PyObject_CheckReadBuffer(object) (0) #define PyBuffer_FromMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \ "old buffer interface is not available"), (PyObject *)NULL) #define PyBuffer_FromReadWriteMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \ "old buffer interface is not available"), (PyObject *)NULL) #define PyBuffer_FromObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \ "old buffer interface is not available"), (PyObject *)NULL) #define PyBuffer_FromReadWriteObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \ "old buffer interface is not available"), (PyObject *)NULL) #endif pyzmq-16.0.2/zmq/utils/sixcerpt.py000066400000000000000000000035361301503633700171520ustar00rootroot00000000000000"""Excerpts of six.py""" # Copyright (C) 2010-2014 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) pyzmq-16.0.2/zmq/utils/strtypes.py000066400000000000000000000022621301503633700172010ustar00rootroot00000000000000"""Declare basic string types unambiguously for various Python versions. Authors ------- * MinRK """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import sys if sys.version_info[0] >= 3: bytes = bytes unicode = str basestring = (bytes, unicode) else: unicode = unicode bytes = str basestring = basestring def cast_bytes(s, encoding='utf8', errors='strict'): """cast unicode or bytes to bytes""" if isinstance(s, bytes): return s elif isinstance(s, unicode): return s.encode(encoding, errors) else: raise TypeError("Expected unicode or bytes, got %r" % s) def cast_unicode(s, encoding='utf8', errors='strict'): """cast bytes or unicode to unicode""" if isinstance(s, bytes): return s.decode(encoding, errors) elif isinstance(s, unicode): return s else: raise TypeError("Expected unicode or bytes, got %r" % s) # give short 'b' alias for cast_bytes, so that we can use fake b('stuff') # to simulate b'stuff' b = asbytes = cast_bytes u = cast_unicode __all__ = ['asbytes', 'bytes', 'unicode', 'basestring', 'b', 'u', 'cast_bytes', 'cast_unicode'] pyzmq-16.0.2/zmq/utils/win32.py000066400000000000000000000120041301503633700162410ustar00rootroot00000000000000"""Win32 compatibility utilities.""" #----------------------------------------------------------------------------- # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. #----------------------------------------------------------------------------- import os # No-op implementation for other platforms. class _allow_interrupt(object): """Utility for fixing CTRL-C events on Windows. On Windows, the Python interpreter intercepts CTRL-C events in order to translate them into ``KeyboardInterrupt`` exceptions. It (presumably) does this by setting a flag in its "console control handler" and checking it later at a convenient location in the interpreter. However, when the Python interpreter is blocked waiting for the ZMQ poll operation to complete, it must wait for ZMQ's ``select()`` operation to complete before translating the CTRL-C event into the ``KeyboardInterrupt`` exception. The only way to fix this seems to be to add our own "console control handler" and perform some application-defined operation that will unblock the ZMQ polling operation in order to force ZMQ to pass control back to the Python interpreter. This context manager performs all that Windows-y stuff, providing you with a hook that is called when a CTRL-C event is intercepted. This hook allows you to unblock your ZMQ poll operation immediately, which will then result in the expected ``KeyboardInterrupt`` exception. Without this context manager, your ZMQ-based application will not respond normally to CTRL-C events on Windows. If a CTRL-C event occurs while blocked on ZMQ socket polling, the translation to a ``KeyboardInterrupt`` exception will be delayed until the I/O completes and control returns to the Python interpreter (this may never happen if you use an infinite timeout). A no-op implementation is provided on non-Win32 systems to avoid the application from having to conditionally use it. Example usage: .. sourcecode:: python def stop_my_application(): # ... with allow_interrupt(stop_my_application): # main polling loop. In a typical ZMQ application, you would use the "self pipe trick" to send message to a ``PAIR`` socket in order to interrupt your blocking socket polling operation. In a Tornado event loop, you can use the ``IOLoop.stop`` method to unblock your I/O loop. """ def __init__(self, action=None): """Translate ``action`` into a CTRL-C handler. ``action`` is a callable that takes no arguments and returns no value (returned value is ignored). It must *NEVER* raise an exception. If unspecified, a no-op will be used. """ self._init_action(action) def _init_action(self, action): pass def __enter__(self): return self def __exit__(self, *args): return if os.name == 'nt': from ctypes import WINFUNCTYPE, windll from ctypes.wintypes import BOOL, DWORD kernel32 = windll.LoadLibrary('kernel32') # PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD) SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL) SetConsoleCtrlHandler.restype = BOOL class allow_interrupt(_allow_interrupt): __doc__ = _allow_interrupt.__doc__ def _init_action(self, action): if action is None: action = lambda: None self.action = action @PHANDLER_ROUTINE def handle(event): if event == 0: # CTRL_C_EVENT action() # Typical C implementations would return 1 to indicate that # the event was processed and other control handlers in the # stack should not be executed. However, that would # prevent the Python interpreter's handler from translating # CTRL-C to a `KeyboardInterrupt` exception, so we pretend # that we didn't handle it. return 0 self.handle = handle def __enter__(self): """Install the custom CTRL-C handler.""" result = SetConsoleCtrlHandler(self.handle, 1) if result == 0: # Have standard library automatically call `GetLastError()` and # `FormatMessage()` into a nice exception object :-) raise WindowsError() def __exit__(self, *args): """Remove the custom CTRL-C handler.""" result = SetConsoleCtrlHandler(self.handle, 0) if result == 0: # Have standard library automatically call `GetLastError()` and # `FormatMessage()` into a nice exception object :-) raise WindowsError() else: class allow_interrupt(_allow_interrupt): __doc__ = _allow_interrupt.__doc__ pass pyzmq-16.0.2/zmq/utils/z85.py000066400000000000000000000037201301503633700157320ustar00rootroot00000000000000"""Python implementation of Z85 85-bit encoding Z85 encoding is a plaintext encoding for a bytestring interpreted as 32bit integers. Since the chunks are 32bit, a bytestring must be a multiple of 4 bytes. See ZMQ RFC 32 for details. """ # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. import sys import struct PY3 = sys.version_info[0] >= 3 # Z85CHARS is the base 85 symbol table Z85CHARS = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#" # Z85MAP maps integers in [0,84] to the appropriate character in Z85CHARS Z85MAP = dict([(c, idx) for idx, c in enumerate(Z85CHARS)]) _85s = [ 85**i for i in range(5) ][::-1] def encode(rawbytes): """encode raw bytes into Z85""" # Accepts only byte arrays bounded to 4 bytes if len(rawbytes) % 4: raise ValueError("length must be multiple of 4, not %i" % len(rawbytes)) nvalues = len(rawbytes) / 4 values = struct.unpack('>%dI' % nvalues, rawbytes) encoded = [] for v in values: for offset in _85s: encoded.append(Z85CHARS[(v // offset) % 85]) # In Python 3, encoded is a list of integers (obviously?!) if PY3: return bytes(encoded) else: return b''.join(encoded) def decode(z85bytes): """decode Z85 bytes to raw bytes, accepts ASCII string""" if PY3 and isinstance(z85bytes, str): try: z85bytes = z85bytes.encode('ascii') except UnicodeEncodeError: raise ValueError('string argument should contain only ASCII characters') if len(z85bytes) % 5: raise ValueError("Z85 length must be multiple of 5, not %i" % len(z85bytes)) nvalues = len(z85bytes) / 5 values = [] for i in range(0, len(z85bytes), 5): value = 0 for j, offset in enumerate(_85s): value += Z85MAP[z85bytes[i+j]] * offset values.append(value) return struct.pack('>%dI' % nvalues, *values) pyzmq-16.0.2/zmq/utils/zmq_compat.h000066400000000000000000000045731301503633700172640ustar00rootroot00000000000000//----------------------------------------------------------------------------- // Copyright (c) 2010 Brian Granger, Min Ragan-Kelley // // Distributed under the terms of the New BSD License. The full license is in // the file COPYING.BSD, distributed as part of this software. //----------------------------------------------------------------------------- #if defined(_MSC_VER) #define pyzmq_int64_t __int64 #else #include #define pyzmq_int64_t int64_t #endif #include "zmq.h" // version compatibility for constants: #include "zmq_constants.h" #define _missing (-1) // define fd type (from libzmq's fd.hpp) #ifdef _WIN32 #if defined(_MSC_VER) && _MSC_VER <= 1400 #define ZMQ_FD_T UINT_PTR #else #define ZMQ_FD_T SOCKET #endif #else #define ZMQ_FD_T int #endif // use unambiguous aliases for zmq_send/recv functions #if ZMQ_VERSION_MAJOR >= 4 // nothing to remove #if ZMQ_VERSION_MINOR == 0 // zmq 4.1 deprecates zmq_utils.h // we only get zmq_curve_keypair from it #include "zmq_utils.h" #endif #else #define zmq_curve_keypair(z85_public_key, z85_secret_key) _missing #endif #if ZMQ_VERSION_MAJOR >= 4 && ZMQ_VERSION_MINOR >= 1 // nothing to remove #else #define zmq_msg_gets(msg, prop) _missing #define zmq_has(capability) _missing #endif #if ZMQ_VERSION_MAJOR >= 3 #define zmq_sendbuf zmq_send #define zmq_recvbuf zmq_recv // 3.x deprecations - these symbols haven't been removed, // but let's protect against their planned removal #define zmq_device(device_type, isocket, osocket) _missing #define zmq_init(io_threads) ((void*)NULL) #define zmq_term zmq_ctx_destroy #else #define zmq_ctx_set(ctx, opt, val) _missing #define zmq_ctx_get(ctx, opt) _missing #define zmq_ctx_destroy zmq_term #define zmq_ctx_new() ((void*)NULL) #define zmq_proxy(a,b,c) _missing #define zmq_disconnect(s, addr) _missing #define zmq_unbind(s, addr) _missing #define zmq_msg_more(msg) _missing #define zmq_msg_get(msg, opt) _missing #define zmq_msg_set(msg, opt, val) _missing #define zmq_msg_send(msg, s, flags) zmq_send(s, msg, flags) #define zmq_msg_recv(msg, s, flags) zmq_recv(s, msg, flags) #define zmq_sendbuf(s, buf, len, flags) _missing #define zmq_recvbuf(s, buf, len, flags) _missing #define zmq_socket_monitor(s, addr, flags) _missing #endif pyzmq-16.0.2/zmq/utils/zmq_constants.h000066400000000000000000000332421301503633700200100ustar00rootroot00000000000000#ifndef _PYZMQ_CONSTANT_DEFS #define _PYZMQ_CONSTANT_DEFS #ifdef ZMQ_BUILD_DRAFT_API #define PYZMQ_DRAFT_API 1 #else #define PYZMQ_DRAFT_API 0 #endif #define _PYZMQ_UNDEFINED (-9999) #ifndef ZMQ_VERSION #define ZMQ_VERSION (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_VERSION_MAJOR #define ZMQ_VERSION_MAJOR (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_VERSION_MINOR #define ZMQ_VERSION_MINOR (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_VERSION_PATCH #define ZMQ_VERSION_PATCH (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_NOBLOCK #define ZMQ_NOBLOCK (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_DONTWAIT #define ZMQ_DONTWAIT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_POLLIN #define ZMQ_POLLIN (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_POLLOUT #define ZMQ_POLLOUT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_POLLERR #define ZMQ_POLLERR (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_POLLPRI #define ZMQ_POLLPRI (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SNDMORE #define ZMQ_SNDMORE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_STREAMER #define ZMQ_STREAMER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_FORWARDER #define ZMQ_FORWARDER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_QUEUE #define ZMQ_QUEUE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IO_THREADS_DFLT #define ZMQ_IO_THREADS_DFLT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MAX_SOCKETS_DFLT #define ZMQ_MAX_SOCKETS_DFLT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_POLLITEMS_DFLT #define ZMQ_POLLITEMS_DFLT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_THREAD_PRIORITY_DFLT #define ZMQ_THREAD_PRIORITY_DFLT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_THREAD_SCHED_POLICY_DFLT #define ZMQ_THREAD_SCHED_POLICY_DFLT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PAIR #define ZMQ_PAIR (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PUB #define ZMQ_PUB (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SUB #define ZMQ_SUB (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_REQ #define ZMQ_REQ (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_REP #define ZMQ_REP (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_DEALER #define ZMQ_DEALER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_ROUTER #define ZMQ_ROUTER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XREQ #define ZMQ_XREQ (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XREP #define ZMQ_XREP (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PULL #define ZMQ_PULL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PUSH #define ZMQ_PUSH (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XPUB #define ZMQ_XPUB (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XSUB #define ZMQ_XSUB (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_UPSTREAM #define ZMQ_UPSTREAM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_DOWNSTREAM #define ZMQ_DOWNSTREAM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_STREAM #define ZMQ_STREAM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SERVER #define ZMQ_SERVER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CLIENT #define ZMQ_CLIENT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RADIO #define ZMQ_RADIO (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_DISH #define ZMQ_DISH (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_GATHER #define ZMQ_GATHER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SCATTER #define ZMQ_SCATTER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_DGRAM #define ZMQ_DGRAM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_CONNECTED #define ZMQ_EVENT_CONNECTED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_CONNECT_DELAYED #define ZMQ_EVENT_CONNECT_DELAYED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_CONNECT_RETRIED #define ZMQ_EVENT_CONNECT_RETRIED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_LISTENING #define ZMQ_EVENT_LISTENING (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_BIND_FAILED #define ZMQ_EVENT_BIND_FAILED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_ACCEPTED #define ZMQ_EVENT_ACCEPTED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_ACCEPT_FAILED #define ZMQ_EVENT_ACCEPT_FAILED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_CLOSED #define ZMQ_EVENT_CLOSED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_CLOSE_FAILED #define ZMQ_EVENT_CLOSE_FAILED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_DISCONNECTED #define ZMQ_EVENT_DISCONNECTED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_ALL #define ZMQ_EVENT_ALL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENT_MONITOR_STOPPED #define ZMQ_EVENT_MONITOR_STOPPED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_NULL #define ZMQ_NULL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PLAIN #define ZMQ_PLAIN (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CURVE #define ZMQ_CURVE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_GSSAPI #define ZMQ_GSSAPI (_PYZMQ_UNDEFINED) #endif #ifndef EAGAIN #define EAGAIN (_PYZMQ_UNDEFINED) #endif #ifndef EINVAL #define EINVAL (_PYZMQ_UNDEFINED) #endif #ifndef EFAULT #define EFAULT (_PYZMQ_UNDEFINED) #endif #ifndef ENOMEM #define ENOMEM (_PYZMQ_UNDEFINED) #endif #ifndef ENODEV #define ENODEV (_PYZMQ_UNDEFINED) #endif #ifndef EMSGSIZE #define EMSGSIZE (_PYZMQ_UNDEFINED) #endif #ifndef EAFNOSUPPORT #define EAFNOSUPPORT (_PYZMQ_UNDEFINED) #endif #ifndef ENETUNREACH #define ENETUNREACH (_PYZMQ_UNDEFINED) #endif #ifndef ECONNABORTED #define ECONNABORTED (_PYZMQ_UNDEFINED) #endif #ifndef ECONNRESET #define ECONNRESET (_PYZMQ_UNDEFINED) #endif #ifndef ENOTCONN #define ENOTCONN (_PYZMQ_UNDEFINED) #endif #ifndef ETIMEDOUT #define ETIMEDOUT (_PYZMQ_UNDEFINED) #endif #ifndef EHOSTUNREACH #define EHOSTUNREACH (_PYZMQ_UNDEFINED) #endif #ifndef ENETRESET #define ENETRESET (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_HAUSNUMERO #define ZMQ_HAUSNUMERO (_PYZMQ_UNDEFINED) #endif #ifndef ENOTSUP #define ENOTSUP (_PYZMQ_UNDEFINED) #endif #ifndef EPROTONOSUPPORT #define EPROTONOSUPPORT (_PYZMQ_UNDEFINED) #endif #ifndef ENOBUFS #define ENOBUFS (_PYZMQ_UNDEFINED) #endif #ifndef ENETDOWN #define ENETDOWN (_PYZMQ_UNDEFINED) #endif #ifndef EADDRINUSE #define EADDRINUSE (_PYZMQ_UNDEFINED) #endif #ifndef EADDRNOTAVAIL #define EADDRNOTAVAIL (_PYZMQ_UNDEFINED) #endif #ifndef ECONNREFUSED #define ECONNREFUSED (_PYZMQ_UNDEFINED) #endif #ifndef EINPROGRESS #define EINPROGRESS (_PYZMQ_UNDEFINED) #endif #ifndef ENOTSOCK #define ENOTSOCK (_PYZMQ_UNDEFINED) #endif #ifndef EFSM #define EFSM (_PYZMQ_UNDEFINED) #endif #ifndef ENOCOMPATPROTO #define ENOCOMPATPROTO (_PYZMQ_UNDEFINED) #endif #ifndef ETERM #define ETERM (_PYZMQ_UNDEFINED) #endif #ifndef EMTHREAD #define EMTHREAD (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IO_THREADS #define ZMQ_IO_THREADS (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MAX_SOCKETS #define ZMQ_MAX_SOCKETS (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SOCKET_LIMIT #define ZMQ_SOCKET_LIMIT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_THREAD_PRIORITY #define ZMQ_THREAD_PRIORITY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_THREAD_SCHED_POLICY #define ZMQ_THREAD_SCHED_POLICY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_BLOCKY #define ZMQ_BLOCKY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IDENTITY #define ZMQ_IDENTITY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SUBSCRIBE #define ZMQ_SUBSCRIBE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_UNSUBSCRIBE #define ZMQ_UNSUBSCRIBE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_LAST_ENDPOINT #define ZMQ_LAST_ENDPOINT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TCP_ACCEPT_FILTER #define ZMQ_TCP_ACCEPT_FILTER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PLAIN_USERNAME #define ZMQ_PLAIN_USERNAME (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PLAIN_PASSWORD #define ZMQ_PLAIN_PASSWORD (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CURVE_PUBLICKEY #define ZMQ_CURVE_PUBLICKEY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CURVE_SECRETKEY #define ZMQ_CURVE_SECRETKEY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CURVE_SERVERKEY #define ZMQ_CURVE_SERVERKEY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_ZAP_DOMAIN #define ZMQ_ZAP_DOMAIN (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CONNECT_RID #define ZMQ_CONNECT_RID (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_GSSAPI_PRINCIPAL #define ZMQ_GSSAPI_PRINCIPAL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_GSSAPI_SERVICE_PRINCIPAL #define ZMQ_GSSAPI_SERVICE_PRINCIPAL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SOCKS_PROXY #define ZMQ_SOCKS_PROXY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XPUB_WELCOME_MSG #define ZMQ_XPUB_WELCOME_MSG (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_FD #define ZMQ_FD (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RECONNECT_IVL_MAX #define ZMQ_RECONNECT_IVL_MAX (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SNDTIMEO #define ZMQ_SNDTIMEO (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RCVTIMEO #define ZMQ_RCVTIMEO (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SNDHWM #define ZMQ_SNDHWM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RCVHWM #define ZMQ_RCVHWM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MULTICAST_HOPS #define ZMQ_MULTICAST_HOPS (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IPV4ONLY #define ZMQ_IPV4ONLY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_ROUTER_BEHAVIOR #define ZMQ_ROUTER_BEHAVIOR (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TCP_KEEPALIVE #define ZMQ_TCP_KEEPALIVE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TCP_KEEPALIVE_CNT #define ZMQ_TCP_KEEPALIVE_CNT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TCP_KEEPALIVE_IDLE #define ZMQ_TCP_KEEPALIVE_IDLE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TCP_KEEPALIVE_INTVL #define ZMQ_TCP_KEEPALIVE_INTVL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_DELAY_ATTACH_ON_CONNECT #define ZMQ_DELAY_ATTACH_ON_CONNECT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XPUB_VERBOSE #define ZMQ_XPUB_VERBOSE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_EVENTS #define ZMQ_EVENTS (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TYPE #define ZMQ_TYPE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_LINGER #define ZMQ_LINGER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RECONNECT_IVL #define ZMQ_RECONNECT_IVL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_BACKLOG #define ZMQ_BACKLOG (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_ROUTER_MANDATORY #define ZMQ_ROUTER_MANDATORY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_FAIL_UNROUTABLE #define ZMQ_FAIL_UNROUTABLE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_ROUTER_RAW #define ZMQ_ROUTER_RAW (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IMMEDIATE #define ZMQ_IMMEDIATE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IPV6 #define ZMQ_IPV6 (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MECHANISM #define ZMQ_MECHANISM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PLAIN_SERVER #define ZMQ_PLAIN_SERVER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CURVE_SERVER #define ZMQ_CURVE_SERVER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_PROBE_ROUTER #define ZMQ_PROBE_ROUTER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_REQ_RELAXED #define ZMQ_REQ_RELAXED (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_REQ_CORRELATE #define ZMQ_REQ_CORRELATE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CONFLATE #define ZMQ_CONFLATE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_ROUTER_HANDOVER #define ZMQ_ROUTER_HANDOVER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TOS #define ZMQ_TOS (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IPC_FILTER_PID #define ZMQ_IPC_FILTER_PID (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IPC_FILTER_UID #define ZMQ_IPC_FILTER_UID (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_IPC_FILTER_GID #define ZMQ_IPC_FILTER_GID (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_GSSAPI_SERVER #define ZMQ_GSSAPI_SERVER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_GSSAPI_PLAINTEXT #define ZMQ_GSSAPI_PLAINTEXT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_HANDSHAKE_IVL #define ZMQ_HANDSHAKE_IVL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XPUB_NODROP #define ZMQ_XPUB_NODROP (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XPUB_MANUAL #define ZMQ_XPUB_MANUAL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_STREAM_NOTIFY #define ZMQ_STREAM_NOTIFY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_INVERT_MATCHING #define ZMQ_INVERT_MATCHING (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_XPUB_VERBOSER #define ZMQ_XPUB_VERBOSER (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_HEARTBEAT_IVL #define ZMQ_HEARTBEAT_IVL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_HEARTBEAT_TTL #define ZMQ_HEARTBEAT_TTL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_HEARTBEAT_TIMEOUT #define ZMQ_HEARTBEAT_TIMEOUT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_CONNECT_TIMEOUT #define ZMQ_CONNECT_TIMEOUT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_TCP_MAXRT #define ZMQ_TCP_MAXRT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_THREAD_SAFE #define ZMQ_THREAD_SAFE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MULTICAST_MAXTPDU #define ZMQ_MULTICAST_MAXTPDU (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_VMCI_CONNECT_TIMEOUT #define ZMQ_VMCI_CONNECT_TIMEOUT (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_USE_FD #define ZMQ_USE_FD (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_AFFINITY #define ZMQ_AFFINITY (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MAXMSGSIZE #define ZMQ_MAXMSGSIZE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_HWM #define ZMQ_HWM (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SWAP #define ZMQ_SWAP (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MCAST_LOOP #define ZMQ_MCAST_LOOP (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RECOVERY_IVL_MSEC #define ZMQ_RECOVERY_IVL_MSEC (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_VMCI_BUFFER_SIZE #define ZMQ_VMCI_BUFFER_SIZE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_VMCI_BUFFER_MIN_SIZE #define ZMQ_VMCI_BUFFER_MIN_SIZE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_VMCI_BUFFER_MAX_SIZE #define ZMQ_VMCI_BUFFER_MAX_SIZE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RATE #define ZMQ_RATE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RECOVERY_IVL #define ZMQ_RECOVERY_IVL (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SNDBUF #define ZMQ_SNDBUF (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RCVBUF #define ZMQ_RCVBUF (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_RCVMORE #define ZMQ_RCVMORE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_MORE #define ZMQ_MORE (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SRCFD #define ZMQ_SRCFD (_PYZMQ_UNDEFINED) #endif #ifndef ZMQ_SHARED #define ZMQ_SHARED (_PYZMQ_UNDEFINED) #endif #endif // ifndef _PYZMQ_CONSTANT_DEFS pyzmq-16.0.2/zmqversion.py000066400000000000000000000075141301503633700155570ustar00rootroot00000000000000"""A simply script to scrape zmq.h for the zeromq version. This is similar to the version.sh script in a zeromq source dir, but it searches for an installed header, rather than in the current dir. """ # Copyright (c) PyZMQ Developers # Distributed under the terms of the Modified BSD License. from __future__ import with_statement import os import sys import re import traceback from warnings import warn try: from configparser import ConfigParser except: from ConfigParser import ConfigParser pjoin = os.path.join MAJOR_PAT='^#define +ZMQ_VERSION_MAJOR +[0-9]+$' MINOR_PAT='^#define +ZMQ_VERSION_MINOR +[0-9]+$' PATCH_PAT='^#define +ZMQ_VERSION_PATCH +[0-9]+$' def include_dirs_from_path(): """Check the exec path for include dirs.""" include_dirs = [] for p in os.environ['PATH'].split(os.path.pathsep): if p.endswith('/'): p = p[:-1] if p.endswith('bin'): include_dirs.append(p[:-3]+'include') return include_dirs def default_include_dirs(): """Default to just /usr/local/include:/usr/include""" return ['/usr/local/include', '/usr/include'] def find_zmq_version(): """check setup.cfg, then /usr/local/include, then /usr/include for zmq.h. Then scrape zmq.h for the version tuple. Returns ------- ((major,minor,patch), "/path/to/zmq.h")""" include_dirs = [] if os.path.exists('setup.cfg'): cfg = ConfigParser() cfg.read('setup.cfg') if 'build_ext' in cfg.sections(): items = cfg.items('build_ext') for name,val in items: if name == 'include_dirs': include_dirs = val.split(os.path.pathsep) if not include_dirs: include_dirs = default_include_dirs() for include in include_dirs: zmq_h = pjoin(include, 'zmq.h') if os.path.isfile(zmq_h): with open(zmq_h) as f: contents = f.read() else: continue line = re.findall(MAJOR_PAT, contents, re.MULTILINE)[0] major = int(re.findall('[0-9]+',line)[0]) line = re.findall(MINOR_PAT, contents, re.MULTILINE)[0] minor = int(re.findall('[0-9]+',line)[0]) line = re.findall(PATCH_PAT, contents, re.MULTILINE)[0] patch = int(re.findall('[0-9]+',line)[0]) return ((major,minor,patch), zmq_h) raise IOError("Couldn't find zmq.h") def ver_str(version): """version tuple as string""" return '.'.join(map(str, version)) def check_zmq_version(min_version): """Check that zmq.h has an appropriate version.""" sv = ver_str(min_version) try: found, zmq_h = find_zmq_version() sf = ver_str(found) if found < min_version: print ("This pyzmq requires zeromq >= %s"%sv) print ("but it appears you are building against %s"%zmq_h) print ("which has zeromq %s"%sf) sys.exit(1) except IOError: msg = '\n'.join(["Couldn't find zmq.h to check for version compatibility.", "If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.", "This pyzmq requires zeromq >= %s"%sv]) warn(msg) except IndexError: msg = '\n'.join(["Couldn't find ZMQ_VERSION macros in zmq.h to check for version compatibility.", "This probably means that you have ZeroMQ <= 2.0.9", "If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.", "This pyzmq requires zeromq >= %s"%sv]) warn(msg) except Exception: traceback.print_exc() msg = '\n'.join(["Unexpected Error checking for zmq version.", "If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.", "This pyzmq requires zeromq >= %s"%sv]) warn(msg) if __name__ == '__main__': v,h = find_zmq_version() print (h) print (ver_str(v))