pax_global_header00006660000000000000000000000064147737437020014530gustar00rootroot0000000000000052 comment=b273c0915abd45003dc6378a3c6e8750d39a34c0 pyzmq-26.4.0/000077500000000000000000000000001477374370200130015ustar00rootroot00000000000000pyzmq-26.4.0/.coveragerc000066400000000000000000000002721477374370200151230ustar00rootroot00000000000000[run] plugins = Cython.Coverage parallel = True branch = False omit = zmq/eventloop/minitornado/* zmq/tests/* [report] exclude_lines = pragma: no cover ignore_errors = True pyzmq-26.4.0/.flake8000066400000000000000000000003551477374370200141570ustar00rootroot00000000000000# flake8 no longer used, # ruff config in pyproject.toml [flake8] exclude = .git,dist,docs,zmq/eventloop/minitornado,buildutils/templates ignore = E,W per-file-ignores = **/__init__.py:F401,F403 zmq/tests/**:F841 examples/**:F841 pyzmq-26.4.0/.github/000077500000000000000000000000001477374370200143415ustar00rootroot00000000000000pyzmq-26.4.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001477374370200165245ustar00rootroot00000000000000pyzmq-26.4.0/.github/ISSUE_TEMPLATE/bug.yml000066400000000000000000000052111477374370200200230ustar00rootroot00000000000000name: Bug Report description: File a pyzmq-specific bug report title: "BUG: " labels: ["bug"] body: - type: markdown attributes: value: > Thanks for taking the time to fill out this bug report! Please only report pyzmq-specific bugs on this repo, i.e. missing options, pyzmq build failures, event loop problems. General problems or questions about zeromq sockets (e.g. missing messages, etc.) are best directed towards [zeromq/libzmq](https://github.com/zeromq/libzmq) instead. - type: checkboxes id: not-libzmq attributes: label: This is a pyzmq bug description: Make sure to open issues about zmq socket behavior (e.g. HWM, LINGER, missed or dropped messages) on zeromq/libzmq itself, not here. options: - label: This is a pyzmq-specific bug, not an issue of zmq socket behavior. Don't worry if you're not sure! We'll figure it out together. required: true - type: input id: pyzmq-version attributes: label: What pyzmq version? placeholder: | print(zmq.__version__) validations: required: true - type: input id: libzmq-version attributes: label: What libzmq version? description: | If you installed libzmq separately, mention that. placeholder: print(zmq.zmq_version()) validations: required: true - type: input id: python attributes: label: Python version (and how it was installed) description: | For example: Python 3.10 installed via conda-forge placeholder: python 3.87, via Python.org validations: required: true - type: input id: os attributes: label: OS description: | What Operation System and Version? placeholder: What OS (ubuntu 22.04, macOS 12, etc.)? validations: required: true - type: textarea id: what-happened attributes: label: What happened? description: | What did you expect to happen, and what happened instead? placeholder: Tell us what you see! value: A bug happened! validations: required: true - type: textarea id: repro attributes: label: Code to reproduce bug description: Please provide _complete_ Python code required to reproduce the bug, if you can. render: python - type: textarea id: logs attributes: label: Traceback, if applicable description: Provide full Python traceback of the error you see, if there is one. render: shell - type: textarea id: more attributes: label: More info description: Anything more you want to share? pyzmq-26.4.0/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000010031477374370200205060ustar00rootroot00000000000000contact_links: - name: "\U0001F914 Question about how to use zeromq" url: https://stackoverflow.com/questions/tagged/zeromq about: The pyzmq development repo is not the place to ask about how to use zeromq. There just aren't that many eyes on it! - name: "\U0001F914 Bug reports for zeromq" url: https://github.com/zeromq/libzmq/issues about: Report general issues about zeromq socket behavior on the libzmq repo. pyzmq only exposes libzmq to Python, it does not influence zmq socket behavior. pyzmq-26.4.0/.github/dependabot.yml000066400000000000000000000006331477374370200171730ustar00rootroot00000000000000# dependabot.yaml reference: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file version: 2 updates: # Maintain dependencies in our GitHub Workflows - package-ecosystem: github-actions directory: "/" schedule: interval: monthly - package-ecosystem: pip directory: "/tools" schedule: interval: weekly pyzmq-26.4.0/.github/workflows/000077500000000000000000000000001477374370200163765ustar00rootroot00000000000000pyzmq-26.4.0/.github/workflows/test-docs.yml000066400000000000000000000022451477374370200210310ustar00rootroot00000000000000name: Test docs # The tests defined in docs/ are currently influenced by changes to _version.py # and scopes.py. on: pull_request: paths: - "docs/**" - "zmq/**" - ".github/workflows/test-docs.yml" push: paths: - "docs/**" - "zmq/**" - ".github/workflows/test-docs.yml" branches-ignore: - "dependabot/**" - "pre-commit-ci-update-config" tags: - "**" workflow_dispatch: env: LANG: C.UTF-8 SPHINXOPTS: "-W" jobs: test-docs: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: "3.12" cache: pip - name: Install libzmq run: | sudo apt-get -y install libzmq3-dev - name: Install pyzmq run: | pip install -v . - name: Install requirements run: | pip install -r docs/requirements.txt # readthedocs doesn't halt on warnings, # so raise any warnings here - name: build docs run: | cd docs make html - name: check links run: | cd docs make linkcheck pyzmq-26.4.0/.github/workflows/test.yml000066400000000000000000000146751477374370200201150ustar00rootroot00000000000000name: Test on: push: branches-ignore: - "dependabot/**" - "pre-commit-ci-update-config" paths-ignore: - "docs/**" - "tools/**" - ".circleci/**" - ".github/workflows/*" - "!.github/workflows/test.yml" pull_request: paths-ignore: - "docs/**" - "tools/**" - ".circleci/**" - ".github/workflows/*" - "!.github/workflows/test.yml" concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: FORCE_COLOR: "1" # only affects Windows, but easiest to set here for now SETUPTOOLS_ENABLE_FEATURES: "legacy-editable" jobs: test: runs-on: ${{ matrix.os }} timeout-minutes: 20 continue-on-error: ${{ matrix.zmq == 'head' }} env: MACOSX_DEPLOYMENT_TARGET: "13.7" strategy: fail-fast: true matrix: include: - os: macos-13 python: "3.8" - os: macos-14 python: "3.12" zmq: bundled - os: macos-14 python: "3.13" zmq: bundled - os: macos-13 python: pypy-3.9 zmq: bundled - os: ubuntu-22.04 python: "3.8" zmq: bundled tornado: none - os: ubuntu-22.04 python: pypy-3.9 zmq: bundled - os: ubuntu-22.04 python: pypy-3.8 - os: ubuntu-22.04 python: "3.9" tornado: head - os: ubuntu-22.04 python: "3.10" - os: ubuntu-22.04 python: "3.11" zmq: head - os: ubuntu-24.04 python: "3.12" - os: ubuntu-24.04 python: "3.13" backend: cffi - os: ubuntu-24.04 python: "3.13" - os: ubuntu-24.04 python: "3.13" free_threading: free_threading - os: windows-2022 python: "3.8" arch: x86 - os: windows-2022 python: "3.11" arch: x64 - os: windows-2022 python: "3.13" arch: x64 steps: - uses: actions/checkout@v4 - name: setup python uses: actions/setup-python@v5 if: ${{ !matrix.free_threading }} with: python-version: ${{ matrix.python }} architecture: ${{ matrix.arch || 'x64' }} # allows us to use '3.12' and get '-dev' while we wait allow-prereleases: true cache: pip - name: setup python (free threading) if: ${{ matrix.free_threading }} uses: mamba-org/setup-micromamba@v2 with: environment-name: nogil condarc: | channels: - ad-testing/label/py313_nogil - defaults create-args: >- python=${{ matrix.python-version }} pip - name: activate python (free threading) if: ${{ matrix.free_threading }} # light activate, just add env to path # run this in micromamba-shell # so we don't have to for the rest run: | echo $PATH echo "PATH=$PATH" >> "$GITHUB_ENV" echo "PYTHON_GIL=0" >> "$GITHUB_ENV" # need prerelease Cython until 3.1 is out echo "PIP_PRE=1" >> "$GITHUB_ENV" echo "PIP_EXTRA_INDEX_URL=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" >> "$GITHUB_ENV" shell: micromamba-shell {0} - name: setup coverage if: startsWith(matrix.python, 'pypy') || startsWith(matrix.python, '3.12') run: | grep -v plugins .coveragerc > .coveragerc-save mv .coveragerc-save .coveragerc # - name: enable Cython coverage # if: "! (startsWith(matrix.python, 'pypy') || startsWith(matrix.python, '3.12'))" # run: | # echo "PYZMQ_CYTHON_COVERAGE=1" >> "$GITHUB_ENV" - name: install dependencies run: | pip install --upgrade pip wheel pip install -r test-requirements.txt - name: remove tornado if: matrix.tornado == 'none' run: | pip uninstall -y tornado - name: install tornado-head if: matrix.tornado == 'head' run: | pip install https://github.com/tornadoweb/tornado/archive/HEAD.zip - name: install pinned tornado if: matrix.tornado && matrix.tornado != 'none' && matrix.tornado != 'head' run: | pip install tornado=="${{ matrix.tornado }}" - name: show environment run: | pip freeze - name: install mac dependencies if: startsWith(matrix.os, 'mac') && matrix.zmq != 'bundled' run: | brew install libsodium zeromq - name: install linux dependencies if: startsWith(matrix.os, 'ubuntu') run: | sudo apt-get update sudo apt-get -y remove libzmq5 || true # workaround https://github.com/actions/virtual-environments/issues/3317 sudo apt-get -y install libzmq3-dev libsodium-dev - name: set $ZMQ_PREFIX if: matrix.zmq run: | echo "ZMQ_PREFIX=${{ matrix.zmq }}" >> "$GITHUB_ENV" - name: set $PYZMQ_BACKEND if: matrix.backend run: | echo "PYZMQ_BACKEND=${{ matrix.backend }}" >> "$GITHUB_ENV" pip install cffi - name: install libzmq-dev if: matrix.zmq == 'head' run: | wget https://github.com/zeromq/libzmq/archive/HEAD.zip -O libzmq.zip unzip libzmq.zip pushd libzmq-* ./autogen.sh ./configure --enable-drafts make -j4 sudo make install sudo ldconfig popd echo "ZMQ_PREFIX=/usr/local" >> "$GITHUB_ENV" echo ZMQ_DRAFT_API=1 >> "$GITHUB_ENV" - name: build pyzmq run: | pip install -v -e . - name: import zmq run: | python -I -c "import zmq" - name: run tests run: | pytest --maxfail 2 --cov zmq -m "not wheel and not new_console" -v - name: upload coverage run: codecov # FIXME: what's the right incantation to call this? # the same line from appveyor doesn't work here # - name: run extra windows tests # if: startsWith(matrix.os, 'win') # run: | # cmd /c start /wait test pytest -vsx -m new_console --pyargs zmq.tests --junit-xml=./results.xml # python tools/check_junit_result.py ./results.xml pyzmq-26.4.0/.github/workflows/wheels.yml000066400000000000000000000142211477374370200204100ustar00rootroot00000000000000name: Release on: push: branches-ignore: - "dependabot/**" - "pre-commit-ci-update-config" tags: - "**" pull_request: paths: - pyproject.toml - CMakeLists.txt - cmake/** - buildutils/** - .github/workflows/wheels.yml - tools/install_libzmq.sh - zmq/utils/*.h concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: PYTHONUNBUFFERED: "1" # CIBW_PRERELEASE_PYTHONS: "1" jobs: sdist: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - name: setup python uses: actions/setup-python@v5 with: python-version: "3.11" cache: pip - name: install dependencies run: | pip install --upgrade pip build pytest pip install -r tools/wheel-requirements.txt - name: build sdist run: | python -m build --sdist . - uses: actions/upload-artifact@v4 with: name: sdist path: "dist/*.tar.gz" if-no-files-found: error - name: verify sdist files run: pytest -v tools/test_sdist.py wheel: runs-on: ${{ matrix.os || 'ubuntu-22.04' }} name: wheel-${{ matrix.name }} env: MACOSX_DEPLOYMENT_TARGET: "10.15" CIBW_BUILD: "${{ matrix.cibw.build || '*' }}" CIBW_SKIP: "${{ matrix.cibw.skip || '' }}" CIBW_ARCHS: "${{ matrix.cibw.arch || 'auto' }}" CIBW_MANYLINUX_X86_64_IMAGE: "${{ matrix.cibw.manylinux_x86_64_image || '' }}" strategy: fail-fast: false matrix: include: - os: macos-13 name: mac-pypy cibw: build: "pp*" - os: macos-14 name: mac-cpython cibw: arch: universal2 build: "cp*" - name: manylinux-x86_64 cibw: arch: x86_64 build: "*manylinux*" - name: manylinux-i686 cibw: arch: i686 build: "*manylinux*" - name: manylinux-arm os: ubuntu-24.04-arm cibw: build: "*manylinux*" # additional manylinux variants, not specified in pyproject.toml: # build with newer 2_28 for cpython >= 3.10, pypy 3.9 - name: manylinux-x86_64-2_28 cibw: arch: x86_64 build: "cp31*-manylinux* pp39-manylinux*" manylinux_x86_64_image: manylinux_2_28 - name: musllinux cibw: build: "*musllinux*" - name: musllinux-arm os: ubuntu-24.04-arm cibw: build: "*musllinux*" - name: win32 os: windows-2019 architecture: x86 cibw: build: "cp*win32" - os: windows-2019 name: win-pypy architecture: x64 cibw: build: "pp*win_amd64" - os: windows-2019 name: win_amd64 architecture: x64 cibw: build: "cp*win_amd64" - os: windows-2022 name: win_arm64 architecture: x64 cibw: arch: ARM64 skip: "cp37*" steps: - uses: actions/checkout@v4 - name: setup python uses: actions/setup-python@v5 with: python-version: "3.11" architecture: ${{ matrix.architecture }} - name: locate windows-arm vcredist if: matrix.name == 'win_arm64' run: | python tools/find_vcredist.py - name: customize mac-arm-64 if: contains(matrix.os, 'macos') && matrix.cibw.arch run: | echo 'MACOSX_DEPLOYMENT_TARGET=10.15' >> "$GITHUB_ENV" - name: install dependencies run: | pip install --upgrade setuptools pip wheel pip install -r tools/wheel-requirements.txt - name: show environment run: | pip freeze - name: list target wheels run: | python -m cibuildwheel . --print-build-identifiers - name: build wheels run: | python -m cibuildwheel . - uses: actions/upload-artifact@v4 with: name: wheels-${{ matrix.name }} path: "wheelhouse/*" if-no-files-found: error github-release: permissions: contents: write environment: release runs-on: ubuntu-22.04 if: startsWith(github.ref, 'refs/tags/') steps: - id: version # strip leading `v` from tag, since it's not part of the version # see https://github.com/orgs/community/discussions/26625 for substring feature request run: | echo "${{ github.ref_name }}" | sed s@^v@v=@ >> "${GITHUB_OUTPUT}" - uses: ncipollo/release-action@v1 with: # mark as prerelease if it looks like one prerelease: ${{ contains(github.ref_name, 'a') || contains(github.ref_name, 'b') || contains(github.ref_name, 'rc') }} # link to release notes, PyPI body: | See [release notes][], or [pyzmq on PyPI][]. Install with: ``` pip install 'pyzmq==${{ steps.version.outputs.v }}' ``` [release notes]: https://pyzmq.readthedocs.io/en/latest/changelog.html [pyzmq on PyPI]: https://pypi.org/project/pyzmq/${{ steps.version.outputs.v }}/ list-wheels: runs-on: ubuntu-22.04 needs: - sdist - wheel steps: - uses: actions/checkout@v4 - uses: actions/download-artifact@v4 with: path: dist merge-multiple: true - name: list wheels run: | ls -l dist python3 tools/wheel_summary.py dist | tee "$GITHUB_STEP_SUMMARY" upload-pypi: permissions: id-token: write environment: release runs-on: ubuntu-22.04 if: startsWith(github.ref, 'refs/tags/') needs: - sdist - wheel steps: - uses: actions/download-artifact@v4 with: path: dist merge-multiple: true - name: Publish wheels to PyPI uses: pypa/gh-action-pypi-publish@release/v1 pyzmq-26.4.0/.gitignore000066400000000000000000000012041477374370200147660ustar00rootroot00000000000000.idea .vagrant *.pyc zmq/backend/cython/*.c zmq/backend/cffi/*.o zmq/devices/*.c zmq/utils/*.json zmq/include/*.h __pycache__ build dist conf bundled libzmq-dll *.egg-info *.so *.pyd *.dll *.dylib docs/source/api/generated docs/gh-pages setup.cfg MANIFEST .tox examples/security/public_keys examples/security/private_keys wheelhouse .coverage .cache .pytest_cache win-dist *.pickle .ipynb_checkpoints venv *.code-workspace .vscode .mypy_cache .coverage htmlcov coverage.xml env .eggs CMakeFiles CMakeCache.txt cmake_install.cmake _deps /Makefile _src licenses/* !licenses/LICENSE.tornado.txt .virtual_documents /libsodium-* /zeromq-* *.tar.gz pyzmq-26.4.0/.mailmap000066400000000000000000000013351477374370200144240ustar00rootroot00000000000000Brian E. Granger Brian Granger Chris Laws Chris Laws Daniel Lundin Daniel Lundin Min Ragan-Kelley Min RK Min Ragan-Kelley MinRK Michel Pelletier Michel Pelletier Nicholas Piël nicholas Felipe Cruz felipecruz Felipe Cruz Felipe cruz Yannick Hold Yannick Hold pyzmq-26.4.0/.obs/000077500000000000000000000000001477374370200136425ustar00rootroot00000000000000pyzmq-26.4.0/.obs/workflows.yml000066400000000000000000000002001477374370200164120ustar00rootroot00000000000000workflow: steps: - link_package: source_project: network:messaging:zeromq:git-draft source_package: pyzmq pyzmq-26.4.0/.pre-commit-config.yaml000066400000000000000000000031461477374370200172660ustar00rootroot00000000000000ci: autoupdate_schedule: monthly exclude: ^zmq/eventloop/minitornado/ repos: - repo: local hooks: - id: constants name: constants files: "^.*/constants.py" description: Generate constants files entry: python -m buildutils.constants language: python pass_filenames: false additional_dependencies: - ruff - repo: https://github.com/executablebooks/mdformat rev: 0.7.22 # Use the ref you want to point at hooks: - id: mdformat # Optionally add plugins additional_dependencies: - mdformat-black - mdformat-myst exclude: LICENSE.md # autoformat and lint Python code - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.9.9 hooks: - id: ruff types_or: - python - jupyter - pyi args: ["--fix", "--show-fixes"] - id: ruff-format types_or: - python - jupyter - pyi # don't format zmq/constants.py twice exclude: zmq/constants.py - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy files: zmq/.* # mypy gets the wrong results # if we pass specific files instead of the zmq dir # no idea why pass_filenames: false args: [zmq] additional_dependencies: - types-paramiko - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: end-of-file-fixer - id: check-executables-have-shebangs - id: requirements-txt-fixer pyzmq-26.4.0/.prettierignore000066400000000000000000000000411477374370200160370ustar00rootroot00000000000000docs/source/_templates externals pyzmq-26.4.0/.readthedocs.yml000066400000000000000000000005211477374370200160650ustar00rootroot00000000000000# Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details version: 2 formats: all build: os: ubuntu-22.04 tools: python: "3.11" sphinx: configuration: docs/source/conf.py fail_on_warning: true python: install: - requirements: docs/requirements.txt - path: . pyzmq-26.4.0/AUTHORS.md000066400000000000000000000113121477374370200144460ustar00rootroot00000000000000## Authors This project was started and continues to be led by Brian E. Granger (ellisonbg AT gmail DOT com). Min Ragan-Kelley (benjaminrk AT gmail DOT com) is the primary developer of pyzmq at this time. The following people have contributed to the project: - Alexander Else (alexander DOT else AT team DOT telstra DOT com) - Alexander Pyhalov (apyhalov AT gmail DOT com) - Alexandr Emelin (frvzmb AT gmail DOT com) - Amr Ali (amr AT ledgerx DOT com) - Andre Caron (andre DOT l DOT caron AT gmail DOT com) - Andrea Crotti (andrea DOT crotti DOT 0 AT gmail DOT com) - Andrew Gwozdziewycz (git AT apgwoz DOT com) - Baptiste Lepilleur (baptiste DOT lepilleur AT gmail DOT com) - Brandyn A. White (bwhite AT dappervision DOT com) - Brian E. Granger (ellisonbg AT gmail DOT com) - Brian Hoffman (hoffman_brian AT bah DOT com) - Carlos A. Rocha (carlos DOT rocha AT gmail DOT com) - Chris Laws (clawsicus AT gmail DOT com) - Christian Wyglendowski (christian AT bu DOT mp) - Christoph Gohlke (cgohlke AT uci DOT edu) - Curtis (curtis AT tinbrain DOT net) - Cyril Holweck (cyril DOT holweck AT free DOT fr) - Dan Colish (dcolish AT gmail DOT com) - Daniel Lundin (dln AT eintr DOT org) - Daniel Truemper (truemped AT googlemail DOT com) - Douglas Creager (douglas DOT creager AT redjack DOT com) - Eduardo Stalinho (eduardooc DOT 86 AT gmail DOT com) - Eren Güven (erenguven0 AT gmail DOT com) - Erick Tryzelaar (erick DOT tryzelaar AT gmail DOT com) - Erik Tollerud (erik DOT tollerud AT gmail DOT com) - FELD Boris (lothiraldan AT gmail DOT com) - Fantix King (fantix DOT king AT gmail DOT com) - Felipe Cruz (felipecruz AT loogica DOT net) - Fernando Perez (Fernando DOT Perez AT berkeley DOT edu) - Frank Wiles (frank AT revsys DOT com) - Félix-Antoine Fortin (felix DOT antoine DOT fortin AT gmail DOT com) - Gavrie Philipson (gavriep AT il DOT ibm DOT com) - Godefroid Chapelle (gotcha AT bubblenet DOT be) - Greg Banks (gbanks AT mybasis DOT com) - Greg Ward (greg AT gerg DOT ca) - Guido Goldstein (github AT a-nugget DOT de) - Ian Lee (IanLee1521 AT gmail DOT com) - Ionuț Arțăriși (ionut AT artarisi DOT eu) - Ivo Danihelka (ivo AT danihelka DOT net) - Iyed (iyed DOT bennour AT gmail DOT com) - Jim Garrison (jim AT garrison DOT cc) - John Gallagher (johnkgallagher AT gmail DOT com) - Julian Taylor (jtaylor DOT debian AT googlemail DOT com) - Justin Bronder (jsbronder AT gmail DOT com) - Justin Riley (justin DOT t DOT riley AT gmail DOT com) - Marc Abramowitz (marc AT marc-abramowitz DOT com) - Matthew Aburn (mattja6 AT gmail DOT com) - Michel Pelletier (pelletier DOT michel AT gmail DOT com) - Michel Zou (xantares09 AT hotmail DOT com) - Min Ragan-Kelley (benjaminrk AT gmail DOT com) - Nell Hardcastle (nell AT dev-nell DOT com) - Nicholas Pilkington (nicholas DOT pilkington AT gmail DOT com) - Nicholas Piël (nicholas AT nichol DOT as) - Nick Pellegrino (npellegrino AT mozilla DOT com) - Nicolas Delaby (nicolas DOT delaby AT ezeep DOT com) - Ondrej Certik (ondrej AT certik DOT cz) - Paul Colomiets (paul AT colomiets DOT name) - Pawel Jasinski (pawel DOT jasinski AT gmail DOT com) - Phus Lu (phus DOT lu AT gmail DOT com) - Robert Buchholz (rbu AT goodpoint DOT de) - Robert Jordens (jordens AT gmail DOT com) - Ryan Cox (ryan DOT a DOT cox AT gmail DOT com) - Ryan Kelly (ryan AT rfk DOT id DOT au) - Scott Maxwell (scott AT codecobblers DOT com) - Scott Sadler (github AT mashi DOT org) - Simon Knight (simon DOT knight AT gmail DOT com) - Stefan Friesel (sf AT cloudcontrol DOT de) - Stefan van der Walt (stefan AT sun DOT ac DOT za) - Stephen Diehl (stephen DOT m DOT diehl AT gmail DOT com) - Sylvain Corlay (scorlay AT bloomberg DOT net) - Thomas Kluyver (takowl AT gmail DOT com) - Thomas Spura (tomspur AT fedoraproject DOT org) - Tigger Bear (Tigger AT Tiggers-Mac-mini DOT local) - Torsten Landschoff (torsten DOT landschoff AT dynamore DOT de) - Vadim Markovtsev (v DOT markovtsev AT samsung DOT com) - Yannick Hold (yannickhold AT gmail DOT com) - Zbigniew Jędrzejewski-Szmek (zbyszek AT in DOT waw DOT pl) - hugo shi (hugoshi AT bleb2 DOT (none)) - jdgleeson (jdgleeson AT mac DOT com) - kyledj (kyle AT bucebuce DOT com) - spez (steve AT hipmunk DOT com) - stu (stuart DOT axon AT jpcreative DOT co DOT uk) - xantares (xantares AT fujitsu-l64 DOT (none)) as reported by: ``` git log --all --format='- %aN (%aE)' | sort -u | sed 's/@/ AT /1' | sed -e 's/\.\([^ ]\)/ DOT \1/g' ``` with some adjustments. ### Not in git log - Brandon Craig-Rhodes (brandon AT rhodesmill DOT org) - Eugene Chernyshov (chernyshov DOT eugene AT gmail DOT com) - Craig Austin (craig DOT austin AT gmail DOT com) ### gevent_zeromq, now zmq.green - Travis Cline (travis DOT cline AT gmail DOT com) - Ryan Kelly (ryan AT rfk DOT id DOT au) - Zachary Voase (z AT zacharyvoase DOT com) pyzmq-26.4.0/CMakeLists.txt000066400000000000000000000375321477374370200155530ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.14...3.28) project(${SKBUILD_PROJECT_NAME} LANGUAGES C CXX) set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) list(PREPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) find_package( Python COMPONENTS Interpreter Development.Module REQUIRED) # Python_SOABI isn't always right when cross-compiling # SKBUILD_SOABI seems to be if (DEFINED SKBUILD_SOABI AND NOT "${SKBUILD_SOABI}" STREQUAL "${Python_SOABI}") message(WARNING "SKBUILD_SOABI=${SKBUILD_SOABI} != Python_SOABI=${Python_SOABI}; likely cross-compiling, using SOABI=${SKBUILD_SOABI} from scikit-build") set(Python_SOABI "${SKBUILD_SOABI}") endif() # legacy pyzmq env options, no PYZMQ_ prefix set(ZMQ_PREFIX "auto" CACHE STRING "libzmq installation prefix or 'bundled'") option(ZMQ_DRAFT_API "whether to build the libzmq draft API" OFF) option(PYZMQ_LIBZMQ_RPATH "Add $ZMQ_PREFIX/lib to $RPATH (true by default). Set to false if libzmq will be bundled or relocated and RPATH is handled separately" ON) # anything new should start with PYZMQ_ option(PYZMQ_NO_BUNDLE "Prohibit building bundled libzmq. Useful for repackaging, to allow default search for libzmq and requiring it to succeed." OFF) set(PYZMQ_LIBZMQ_VERSION "4.3.5" CACHE STRING "libzmq version when bundling") set(PYZMQ_LIBSODIUM_VERSION "1.0.20" CACHE STRING "libsodium version when bundling") set(PYZMQ_LIBZMQ_URL "" CACHE STRING "full URL to download bundled libzmq") set(PYZMQ_LIBSODIUM_URL "" CACHE STRING "full URL to download bundled libsodium") set(PYZMQ_LIBSODIUM_CONFIGURE_ARGS "" CACHE STRING "semicolon-separated list of arguments to pass to ./configure for bundled libsodium") set(PYZMQ_LIBSODIUM_MSBUILD_ARGS "" CACHE STRING "semicolon-separated list of arguments to pass to msbuild for bundled libsodium") set(PYZMQ_LIBSODIUM_VS_VERSION "" CACHE STRING "Visual studio solution version for bundled libsodium (default: detect from MSVC_VERSION)") set(PYZMQ_BACKEND "" CACHE STRING "pyzmq backend to build ('cffi' or 'cython'). Default: cffi on PyPy, else Cython.") if (NOT CMAKE_BUILD_TYPE) # default to Release set(CMAKE_BUILD_TYPE "Release") endif() # get options from env # handle booleans foreach(_optname ZMQ_DRAFT_API PYZMQ_NO_BUNDLE PYZMQ_LIBZMQ_RPATH) if (DEFINED ENV{${_optname}}) if ("$ENV{${_optname}}" STREQUAL "1" OR "$ENV{${_optname}}" STREQUAL "ON") set(${_optname} TRUE) else() set(${_optname} FALSE) endif() endif() endforeach() foreach(_optname ZMQ_PREFIX PYZMQ_LIBZMQ_VERSION PYZMQ_LIBZMQ_URL PYZMQ_LIBSODIUM_VERSION PYZMQ_LIBSODIUM_URL PYZMQ_LIBSODIUM_CONFIGURE_ARGS PYZMQ_LIBSODIUM_MSBUILD_ARGS PYZMQ_LIBSODIUM_VS_VERSION PYZMQ_BACKEND ) if (DEFINED ENV{${_optname}}) if (_optname MATCHES ".*_ARGS") # if it's an _ARGS, split "-a -b" into "-a" "-b" # use native CMake lists for cmake args, # native command-line strings for env variables separate_arguments(${_optname} NATIVE_COMMAND "$ENV{${_optname}}") else() set(${_optname} "$ENV{${_optname}}") endif() endif() endforeach() if(ZMQ_DRAFT_API) message(STATUS "enabling ZMQ_DRAFT_API") add_compile_definitions(ZMQ_BUILD_DRAFT_API=1) endif() if (PYZMQ_LIBSODIUM_VERSION AND NOT PYZMQ_LIBSODIUM_URL) set(PYZMQ_LIBSODIUM_URL "https://github.com/jedisct1/libsodium/releases/download/${PYZMQ_LIBSODIUM_VERSION}-RELEASE/libsodium-${PYZMQ_LIBSODIUM_VERSION}.tar.gz") endif() if (PYZMQ_LIBZMQ_VERSION AND NOT PYZMQ_LIBZMQ_URL) set(PYZMQ_LIBZMQ_URL "https://github.com/zeromq/libzmq/releases/download/v${PYZMQ_LIBZMQ_VERSION}/zeromq-${PYZMQ_LIBZMQ_VERSION}.tar.gz") endif() #------- bundle libzmq ------ if (NOT ZMQ_PREFIX) # empty string is the same as 'auto' set(ZMQ_PREFIX "auto") endif() # default search paths: foreach(prefix $ENV{PREFIX} "/opt/homebrew" "/opt/local" "/usr/local" "/usr") if (IS_DIRECTORY "${prefix}") list(APPEND CMAKE_PREFIX_PATH "${prefix}") endif() endforeach() if (ZMQ_PREFIX STREQUAL "auto") message(CHECK_START "Looking for libzmq") find_package(ZeroMQ QUIET) if (ZeroMQ_FOUND AND TARGET libzmq) set(libzmq_target "libzmq") get_target_property(_ZMQ_LOCATION libzmq IMPORTED_LOCATION) message(CHECK_PASS "Found with cmake: ${_ZMQ_LOCATION}") endif() if (NOT ZeroMQ_FOUND) find_package(PkgConfig QUIET) if (PkgConfig_FOUND) message(CHECK_START "Looking for libzmq with pkg-config") pkg_check_modules(libzmq libzmq IMPORTED_TARGET) if (TARGET PkgConfig::libzmq) set(ZeroMQ_FOUND TRUE) set(libzmq_target "PkgConfig::libzmq") message(CHECK_PASS "found: -L${libzmq_LIBRARY_DIRS} -l${libzmq_LIBRARIES}") if (PYZMQ_LIBZMQ_RPATH) foreach(LIBZMQ_LIB_DIR IN LISTS libzmq_LIBRARY_DIRS) message(STATUS " Adding ${LIBZMQ_LIB_DIR} to RPATH, set PYZMQ_LIBZMQ_RPATH=OFF if this is not what you want.") list(APPEND CMAKE_INSTALL_RPATH "${LIBZMQ_LIB_DIR}") endforeach() endif() else() message(CHECK_FAIL "no") endif() endif() endif() if (NOT ZeroMQ_FOUND) message(STATUS " Fallback: looking for libzmq in ${CMAKE_PREFIX_PATH}") find_library(LIBZMQ_LIBRARY NAMES zmq) find_path(LIBZMQ_INCLUDE_DIR "zmq.h") # check if found if (LIBZMQ_LIBRARY AND LIBZMQ_INCLUDE_DIR) set(ZeroMQ_FOUND TRUE) message(CHECK_PASS "${LIBZMQ_LIBRARY}") # NOTE: we _could_ set RPATH here. Should we? Unclear. if (PYZMQ_LIBZMQ_RPATH) get_filename_component(LIBZMQ_LIB_DIR "${LIBZMQ_LIBRARY}" DIRECTORY) message(STATUS " Adding ${LIBZMQ_LIB_DIR} to RPATH, set PYZMQ_LIBZMQ_RPATH=OFF if this is not what you want.") list(APPEND CMAKE_INSTALL_RPATH "${LIBZMQ_LIB_DIR}") endif() endif() endif() if (NOT ZeroMQ_FOUND) if (PYZMQ_NO_BUNDLE) message(CHECK_FAIL "libzmq not found") message(FATAL_ERROR "aborting because bundled libzmq is disabled") else() message(CHECK_FAIL "libzmq not found, will bundle libzmq and libsodium") set(ZMQ_PREFIX "bundled") endif() endif() elseif (NOT ZMQ_PREFIX STREQUAL "bundled") message(CHECK_START "Looking for libzmq in ${ZMQ_PREFIX}") find_path( LIBZMQ_INCLUDE_DIR zmq.h PATHS "${ZMQ_PREFIX}/include" NO_DEFAULT_PATH ) find_library( LIBZMQ_LIBRARY NAMES zmq PATHS "${ZMQ_PREFIX}/lib" NO_DEFAULT_PATH ) if (LIBZMQ_LIBRARY AND LIBZMQ_INCLUDE_DIR) message(CHECK_PASS "${LIBZMQ_LIBRARY}") if (PYZMQ_LIBZMQ_RPATH) # add prefix to RPATH message(STATUS " Adding ${ZMQ_PREFIX}/lib to RPATH, set PYZMQ_LIBZMQ_RPATH=OFF if this is not what you want.") list(APPEND CMAKE_INSTALL_RPATH "${ZMQ_PREFIX}/lib") endif() else() message(CHECK_FAIL "no") message(FATAL_ERROR "libzmq not found in ZMQ_PREFIX=${ZMQ_PREFIX}") endif() else() # bundled endif() if (ZMQ_PREFIX STREQUAL "bundled") message(STATUS "Bundling libzmq and libsodium") include(FetchContent) add_compile_definitions(ZMQ_STATIC=1) set(BUNDLE_DIR "${CMAKE_CURRENT_BINARY_DIR}/bundled") file(MAKE_DIRECTORY "${BUNDLE_DIR}/lib") include_directories(${BUNDLE_DIR}/include) list(PREPEND CMAKE_PREFIX_PATH ${BUNDLE_DIR}) set(LICENSE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/licenses") file(MAKE_DIRECTORY "${LICENSE_DIR}") # libsodium if (MSVC) set(libsodium_lib "${BUNDLE_DIR}/lib/libsodium.lib") else() set(libsodium_lib "${BUNDLE_DIR}/lib/libsodium.a") endif() FetchContent_Declare(bundled_libsodium URL ${PYZMQ_LIBSODIUM_URL} PREFIX ${BUNDLE_DIR} ) FetchContent_MakeAvailable(bundled_libsodium) configure_file("${bundled_libsodium_SOURCE_DIR}/LICENSE" "${LICENSE_DIR}/LICENSE.libsodium.txt" COPYONLY) # run libsodium build explicitly here, so it's available to libzmq next set(bundled_libsodium_include "${bundled_libsodium_SOURCE_DIR}/src/libsodium/include") if(${bundled_libsodium_POPULATED} AND NOT EXISTS "${libsodium_lib}") message(STATUS "building bundled libsodium") if (MSVC) # select vs build solution by msvc version number if (NOT PYZMQ_LIBSODIUM_VS_VERSION) if(MSVC_VERSION GREATER_EQUAL 1940) message(STATUS "Unrecognized MSVC_VERSION=${MSVC_VERSION}") set(MSVC_VERSION 1939) endif() if(MSVC_VERSION GREATER_EQUAL 1930) set(PYZMQ_LIBSODIUM_VS_VERSION "2022") elseif(MSVC_VERSION GREATER_EQUAL 1920) set(PYZMQ_LIBSODIUM_VS_VERSION "2019") elseif(MSVC_VERSION GREATER_EQUAL 1910) set(PYZMQ_LIBSODIUM_VS_VERSION "2017") else() message(FATAL_ERROR "unsupported bundling libsodium for MSVC_VERSION=${MSVC_VERSION} (need at least VS2017)") endif() endif() find_package(Vcvars REQUIRED) list(APPEND libsodium_build ${Vcvars_LAUNCHER} "msbuild" "/m" "/v:n" "/p:Configuration=Static${CMAKE_BUILD_TYPE}" "/p:Platform=${CMAKE_GENERATOR_PLATFORM}" "builds/msvc/vs${PYZMQ_LIBSODIUM_VS_VERSION}/libsodium.sln" ) list(APPEND libsodium_build ${PYZMQ_LIBSODIUM_MSBUILD_ARGS}) execute_process( COMMAND ${libsodium_build} WORKING_DIRECTORY ${bundled_libsodium_SOURCE_DIR} COMMAND_ECHO STDOUT # COMMAND_ERROR_IS_FATAL ANY RESULT_VARIABLE _status ) if (_status) message(FATAL_ERROR "failed to build libsodium") endif() file(GLOB_RECURSE BUILT_LIB "${bundled_libsodium_SOURCE_DIR}/**/libsodium.lib") message(STATUS "copy ${BUILT_LIB} ${libsodium_lib}") configure_file(${BUILT_LIB} ${libsodium_lib} COPYONLY) else() list(APPEND libsodium_configure ./configure --prefix=${BUNDLE_DIR} --with-pic --disable-dependency-tracking --disable-shared --enable-static ) list(APPEND libsodium_configure ${PYZMQ_LIBSODIUM_CONFIGURE_ARGS}) execute_process( COMMAND ${libsodium_configure} WORKING_DIRECTORY ${bundled_libsodium_SOURCE_DIR} COMMAND_ECHO STDOUT # COMMAND_ERROR_IS_FATAL ANY RESULT_VARIABLE _status ) # COMMAND_ERROR_IS_FATAL requires cmake 3.19, ubuntu 20.04 has 3.16 if (_status) message(FATAL_ERROR "failed to configure libsodium") endif() execute_process( COMMAND make WORKING_DIRECTORY ${bundled_libsodium_SOURCE_DIR} COMMAND_ECHO STDOUT # COMMAND_ERROR_IS_FATAL ANY RESULT_VARIABLE _status ) if (_status) message(FATAL_ERROR "failed to build libsodium") endif() execute_process( COMMAND make install WORKING_DIRECTORY ${bundled_libsodium_SOURCE_DIR} COMMAND_ECHO STDOUT # COMMAND_ERROR_IS_FATAL ANY RESULT_VARIABLE _status ) if (_status) message(FATAL_ERROR "failed to install libsodium") endif() endif() endif() # use libzmq's own cmake, so we can import the libzmq-static target # libzmq uses an ancient policy minimum, no longer supported if ("${CMAKE_POLICY_VERSION_MINIMUM}" STREQUAL "") # workaround https://github.com/zeromq/libzmq/pull/4776 if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.31") set(CMAKE_POLICY_VERSION_MINIMUM "3.31") else() set(CMAKE_POLICY_VERSION_MINIMUM "${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}") endif() message(STATUS "Setting CMAKE_POLICY_VERSION_MINIMUM=${CMAKE_POLICY_VERSION_MINIMUM} for libzmq") endif() set(ENABLE_CURVE ON) set(ENABLE_DRAFTS ${ZMQ_DRAFT_API}) set(ENABLE_LIBSODIUM_RANDOMBYTES_CLOSE "OFF") set(WITH_LIBSODIUM ON) set(WITH_LIBSODIUM_STATIC ON) set(LIBZMQ_PEDANTIC OFF) set(LIBZMQ_WERROR OFF) set(WITH_DOC OFF) set(WITH_DOCS OFF) set(BUILD_TESTS OFF) set(BUILD_SHARED OFF) set(BUILD_STATIC ON) if(NOT MSVC) # backport check for kqueue, which is wrong in libzmq 4.3.5 # libzmq's cmake will proceed with the rest # https://github.com/zeromq/libzmq/pull/4659 include(CheckCXXSymbolExists) set(POLLER "" CACHE STRING "Choose polling system for I/O threads. valid values are kqueue, epoll, devpoll, pollset, poll or select [default=autodetect]") if(POLLER STREQUAL "") check_cxx_symbol_exists(kqueue "sys/types.h;sys/event.h;sys/time.h" HAVE_KQUEUE) if(HAVE_KQUEUE) set(POLLER "kqueue") endif() endif() endif() if(MSVC) set(API_POLLER "select" CACHE STRING "Set API Poller (default: select)") endif() FetchContent_Declare(bundled_libzmq URL ${PYZMQ_LIBZMQ_URL} PREFIX ${BUNDLE_DIR} ) FetchContent_MakeAvailable(bundled_libzmq) configure_file("${bundled_libzmq_SOURCE_DIR}/LICENSE" "${LICENSE_DIR}/LICENSE.zeromq.txt" COPYONLY) # target for libzmq static if (TARGET libzmq-static) set(libzmq_target "libzmq-static") else() message(FATAL_ERROR "libzmq-static target not found in bundled libzmq") endif() endif() if (NOT TARGET "${libzmq_target}" AND LIBZMQ_LIBRARY AND LIBZMQ_INCLUDE_DIR) set(libzmq_target "libzmq") # construct target from find_library results # what if it was static? add_library(libzmq SHARED IMPORTED) set_property(TARGET libzmq PROPERTY IMPORTED_LOCATION ${LIBZMQ_LIBRARY}) set_property(TARGET libzmq PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${LIBZMQ_INCLUDE_DIR}) endif() #------- building pyzmq itself ------- message(STATUS "Using Python ${Python_INTERPRETER_ID} ${Python_EXECUTABLE}") set(EXT_SRC_DIR "${CMAKE_CURRENT_BINARY_DIR}/_src") set(ZMQ_BUILDUTILS "${CMAKE_CURRENT_SOURCE_DIR}/buildutils") file(MAKE_DIRECTORY "${EXT_SRC_DIR}") if (NOT PYZMQ_BACKEND) if(Python_INTERPRETER_ID STREQUAL "PyPy") set(PYZMQ_BACKEND "cffi") else() set(PYZMQ_BACKEND "cython") endif() endif() if(PYZMQ_BACKEND STREQUAL "cffi") message(STATUS "Building CFFI backend") if(NOT Python_INTERPRETER_ID STREQUAL "PyPy") message(WARNING "Building CFFI backend on ${Python_INTERPRETER_ID}, not PyPy. This is not supported and may not work.") endif() set(ZMQ_EXT_NAME "_cffi") set(ZMQ_BACKEND_DEST "zmq/backend/cffi") set(ZMQ_C "${EXT_SRC_DIR}/${ZMQ_EXT_NAME}.c") add_custom_command( OUTPUT ${ZMQ_C} VERBATIM COMMAND "${Python_EXECUTABLE}" "${ZMQ_BUILDUTILS}/build_cffi.py" "${ZMQ_C}" ) elseif(PYZMQ_BACKEND STREQUAL "cython") message(STATUS "Building Cython backend") if(NOT Python_INTERPRETER_ID STREQUAL "Python") message(WARNING "Building Cython backend on ${Python_INTERPRETER_ID}, not CPython. This is not supported and may not work.") endif() find_program(CYTHON "cython") set(ZMQ_BACKEND_DEST "zmq/backend/cython") set(ZMQ_EXT_NAME "_zmq") set(ZMQ_C "${EXT_SRC_DIR}/${ZMQ_EXT_NAME}.c") set(ZMQ_PYX "${CMAKE_CURRENT_SOURCE_DIR}/zmq/backend/cython/${ZMQ_EXT_NAME}.py") add_custom_command( OUTPUT ${ZMQ_C} DEPENDS ${ZMQ_PYX} VERBATIM COMMAND "${Python_EXECUTABLE}" -mcython --output-file ${ZMQ_C} --module-name "zmq.backend.cython._zmq" ${ZMQ_PYX} ) else() message(FATAL_ERROR "Unsupported PYZMQ_BACKEND=${PYZMQ_BACKEND}. Must be 'cffi' or 'cython'.") endif() file(MAKE_DIRECTORY ${ZMQ_BACKEND_DEST}) python_add_library( ${ZMQ_EXT_NAME} MODULE WITH_SOABI ${ZMQ_C} ) if (TARGET ${libzmq_target}) message(STATUS "Linking libzmq target ${libzmq_target}") target_link_libraries(${ZMQ_EXT_NAME} PUBLIC ${libzmq_target}) if ("${libzmq_target}" STREQUAL "libzmq-static" AND NOT MSVC) # seem to need stdc++ for static libzmq on non-Windows # not sure if/when this should be libc++ or how to know target_link_libraries(${ZMQ_EXT_NAME} PUBLIC stdc++) endif() else() message(FATAL_ERROR "should have a libzmq target ${libzmq_target} to link to...") endif() target_include_directories(${ZMQ_EXT_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/zmq/utils") install(TARGETS ${ZMQ_EXT_NAME} DESTINATION "${ZMQ_BACKEND_DEST}" COMPONENT pyzmq) # add custom target so we exclude bundled targets from installation # only need this because the extension name is different for cff/cython add_custom_target(pyzmq DEPENDS ${ZMQ_EXT_NAME}) pyzmq-26.4.0/CONTRIBUTING.md000066400000000000000000000043031477374370200152320ustar00rootroot00000000000000# Testing pyzmq is tested on GitHub Actions. ![Build Status](https://github.com/zeromq/pyzmq/actions/workflows/test.yml/badge.svg)\](https://github.com/zeromq/pyzmq/actions/workflows/test.yml) ## Opening an Issue For a good bug report: 1. [Search] for existing Issues, both on GitHub and in general with Google/Stack Overflow before posting a duplicate question. 1. Update to pyzmq main, if possible, especially if you are already using git. It's possible that the bug you are about to report has already been fixed. Many things reported as pyzmq Issues are often just libzmq-related, and don't have anything to do with pyzmq itself. These are better directed to [zeromq-dev]. When making a bug report, it is helpful to tell us as much as you can about your system (such as pyzmq version, libzmq version, Python version, OS Version, how you built/installed pyzmq and libzmq, etc.) The basics: ```python import sys import zmq print("libzmq-%s" % zmq.zmq_version()) print("pyzmq-%s" % zmq.pyzmq_version()) print("Python-%s" % sys.version) ``` Which will give something like: ``` libzmq-4.3.4 pyzmq-22.3.0 Python-3.9.9 | packaged by conda-forge | (main, Dec 20 2021, 02:38:53) [Clang 11.1.0 ] ``` ### Your contributions **Pull Requests are welcome!** When you contribute to PyZMQ, your contributions are made under the same license as the file you are working on. Any new, original code should be BSD licensed. We use [pre-commit] for autoformatting, so you hopefully don't need to worry too much about style. To install pre-commit: ``` pip install pre-commit pre-commit install ``` Examples are copyright their respective authors, and BSD unless otherwise specified by the author. ### Inherited licenses in pyzmq Some code outside the core is taken from other open-source projects, and inherits that project's license. - zmq/eventloop contains some code derived from [tornado], used under the Apache 2.0 license - zmq/ssh/forward.py is from [paramiko], and inherits LGPL - perf examples are (c) iMatix, and MPL [paramiko]: http://www.lag.net/paramiko [pre-commit]: https://pre-commit.com [search]: https://github.com/zeromq/pyzmq/issues [tornado]: http://www.tornadoweb.org [zeromq-dev]: mailto:zeromq-dev@zeromq.org pyzmq-26.4.0/LICENSE.md000066400000000000000000000030111477374370200144000ustar00rootroot00000000000000BSD 3-Clause License Copyright (c) 2009-2012, Brian Granger, Min Ragan-Kelley All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pyzmq-26.4.0/MANIFEST.in000066400000000000000000000004011477374370200145320ustar00rootroot00000000000000# only need to track non-version-controlled files to add include bundled/zeromq/COPYING* graft bundled/zeromq/include graft bundled/zeromq/src graft bundled/zeromq/external/wepoll exclude bundled/zeromq/src/Makefile* exclude bundled/zeromq/src/platform.hpp pyzmq-26.4.0/README.md000066400000000000000000000053701477374370200142650ustar00rootroot00000000000000# PyZMQ: Python bindings for ØMQ This package contains Python bindings for [ZeroMQ](https://zeromq.org). ØMQ is a lightweight and fast messaging implementation. PyZMQ should work with any reasonable version of Python (≥ 3.8), as well as PyPy. PyZMQ supports libzmq ≥ 3.2.2 (including 4.x). For a summary of changes to pyzmq, see our [changelog](https://pyzmq.readthedocs.io/en/latest/changelog.html). ### ØMQ 3.x, 4.x PyZMQ fully supports the stable (not DRAFT) 3.x and 4.x APIs of libzmq, developed at [zeromq/libzmq](https://github.com/zeromq/libzmq). No code to change, no flags to pass, just build pyzmq against the latest and it should work. ## Documentation See PyZMQ's Sphinx-generated documentation [on Read the Docs](https://pyzmq.readthedocs.io) for API details, and some notes on Python and Cython development. If you want to learn about using ØMQ in general, the excellent [ØMQ Guide](http://zguide.zeromq.org/py:all) is the place to start, which has a Python version of every example. We also have some information on our [wiki](https://github.com/zeromq/pyzmq/wiki). ## Downloading Unless you specifically want to develop PyZMQ, we recommend downloading the PyZMQ source code or wheels from [PyPI](https://pypi.io/project/pyzmq/), or install with conda. You can also get the latest source code from our GitHub repository, but building from the repository will require that you install recent Cython. ## Building and installation For more detail on building pyzmq, see [our docs](https://pyzmq.readthedocs.io/en/latest/howto/build.html). We build wheels for macOS, Windows, and Linux, so you can get a binary on those platforms with: ``` pip install pyzmq ``` but compiling from source with `pip install pyzmq` should work in most environments. Make sure you are using the latest pip, or it may not find the right wheels. If the wheel doesn't work for some reason, or you want to force pyzmq to be compiled (this is often preferable if you already have libzmq installed and configured the way you want it), you can force installation from source with: ``` pip install --no-binary=pyzmq pyzmq ``` ## Old versions pyzmq 16 drops support Python 2.6 and 3.2. If you need to use one of those Python versions, you can pin your pyzmq version to before 16: ``` pip install 'pyzmq<16' ``` For libzmq 2.0.x, use 'pyzmq\<2.1' pyzmq-2.1.11 was the last version of pyzmq to support Python 2.5, and pyzmq ≥ 2.2.0 requires Python ≥ 2.6. pyzmq-13.0.0 introduces PyPy support via CFFI, which only supports libzmq-3.2.2 and newer. PyZMQ releases ≤ 2.2.0 matched libzmq versioning, but this is no longer the case, starting with PyZMQ 13.0.0 (it was the thirteenth release, so why not?). PyZMQ ≥ 13.0 follows semantic versioning conventions accounting only for PyZMQ itself. pyzmq-26.4.0/RELICENSE/000077500000000000000000000000001477374370200143125ustar00rootroot00000000000000pyzmq-26.4.0/RELICENSE/README.md000066400000000000000000000041041477374370200155700ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or BSD pyzmq starting with 26.0.0 is fully licensed under the 3-clause Modified BSD License. A small part of the core (Cython backend only) was previously licensed under LGPLv3 for historical reasons. Permission has been granted by the contributors of the vast majority of those components to relicense under MPLv2 or BSD. This backend has been completely replaced in pyzmq 26, and the new implementation is fully licensed under BSD-3-Clause, so pyzmq is now under a single license. Original text: Most of pyzmq is licensed under [3-Clause BSD](https://opensource.org/licenses/BSD-3-Clause). For historical reasons, the 'core' of pyzmq (the low-level Cython bindings) was licensed under LGPLv3, like libzmq itself. libzmq is in the process of moving away from LGPL to the [Mozilla Public License, version 2](https://www.mozilla.org/en-US/MPL/2.0/). I'd like to take this opportunity to follow libzmq's example and also eliminate LGPL from pyzmq. For a similarly copyleft license, MPLv2 can be used for the core. However, I would prefer to update the core to follow the example of the rest of pyzmq, and adopt the 3-Clause BSD license. This directory collects grants from individuals and firms that hold copyrights in pyzmq to permit licensing the pyzmq code under the MPLv2 or BSD license. See the [0MQ Licensing Page](http://zeromq.org/area:licensing) and [libzmq relicensing effort](https://github.com/zeromq/libzmq/pull/1917) for some background information. Please create a separate file in this directory for each individual or firm holding copyright in pyzmq core, named after the individual or firm holding the copyright. Each patch must be made with a GitHub handle that is clearly associated with the copyright owner, to guarantee the identity of the signatory. Please avoid changing the files created by other individuals or firms granting a copyright license over their copyrights (if rewording is required contact them and ask them to submit an updated version). This makes it easier to verify that the license grant was made by an authorized GitHub account. pyzmq-26.4.0/RELICENSE/authors.py000066400000000000000000000047771477374370200163700ustar00rootroot00000000000000#!/usr/bin/env python3 """Get the authors of the LGPL-licensed subset of pyzmq (Cython bindings)""" import re from collections import defaultdict from itertools import chain from os.path import abspath, dirname, join import git here = dirname(__file__) root = dirname(abspath(here)) repo = git.Repo(root) LAST_CORE_COMMIT = 'db1d4d2f2cdd97955a7db620e667a834920a938a' PRE_CORE_COMMIT = 'd4e3453b012962fc9bf6ed621019b395f968340c' EXCLUDED = { # docstring only: 'c2db4af3c591aae99bf437a223d97b30ecbfcd38', '7b1ac07a3bbffe70af3adcd663c0cbe6f2a724f7', 'ce97f46881168c4c05d7885dc48a430c520a9683', '14c16a97ffa95bf645ab27bf5b06c3eabda30e5e', # accidental swapfile '93150feb4a80712c6a379f79d561fbc87405ade8', } def get_all_commits(): return chain( repo.iter_commits('master', 'zmq/backend/cython'), repo.iter_commits(LAST_CORE_COMMIT, 'zmq/core'), repo.iter_commits(PRE_CORE_COMMIT, ['zmq/_zmq.*']), ) mailmap = {} email_names = {} pat = re.compile(r'\<([^\>]+)\>') with open(join(root, '.mailmap')) as f: for line in f: if not line.strip(): continue dest, src = pat.findall(line) mailmap[src] = dest email_names[dest] = line[: line.index('<')].strip() author_commits = defaultdict(list) for commit in get_all_commits(): # exclude some specific commits (e.g. docstring typos) if commit.hexsha in EXCLUDED: continue # exclude commits that only touch generated pxi files in backend/cython backend_cython_files = { f for f in commit.stats.files if f.startswith('zmq/backend/cython') } if backend_cython_files and backend_cython_files.issubset( { 'zmq/backend/cython/constant_enums.pxi', 'zmq/backend/cython/constants.pxi', } ): continue email = commit.author.email email = mailmap.get(email, email) name = email_names.setdefault(email, commit.author.name) author_commits[email].append(commit) def sort_key(email_commits): commits = email_commits[1] return (len(commits), commits[0].authored_date) for email, commits in sorted(author_commits.items(), key=sort_key, reverse=True): if len(commits) <= 2: msg = '{} ({})'.format( ' '.join(c.hexsha[:12] for c in commits), commits[0].authored_datetime.year, ) else: msg = f"{len(commits)} commits ({commits[-1].authored_datetime.year}-{commits[0].authored_datetime.year})" print(f"- [ ] {email_names[email]} {email}: {msg}") pyzmq-26.4.0/RELICENSE/chrislaws.md000066400000000000000000000013211477374370200166300ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by Chris Laws that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "claws", with commit author "Chris Laws clawsicus@gmail.com", are copyright of Chris Laws. This document hereby grants the pyzmq project team permission to relicense pyzmq, including all past, present, and future contributions of the author listed above. Chris Laws 2017/07/18 pyzmq-26.4.0/RELICENSE/ellisonbg.md000066400000000000000000000013361477374370200166150ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by Brian E. Granger that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "ellisonbg", with commit author "Brian E. Granger", are copyright of Brian E. Granger. This document hereby grants the pyzmq project team permission to relicense pyzmq, including all past, present, and future contributions of the author listed above. Brian Ellison Granger 2017/07/18 pyzmq-26.4.0/RELICENSE/frankwiles.md000066400000000000000000000011451477374370200170020ustar00rootroot00000000000000This is a statement by Frank Wiles that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "frankwiles", with commit author "Frank Wiles ", are copyright of Frank Wiles . This document hereby grants the libzmq project team to relicense pyzmq, including all past, present and future contributions of the author listed above. Frank Wiles 2017/07/17 pyzmq-26.4.0/RELICENSE/juliantaylor.md000066400000000000000000000013461477374370200173550ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by Julian Taylor that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "juliantaylor", with commit author "Julian Taylor ", are copyright of Julian Taylor. This document hereby grants the libzmq project team to relicense libzmq, including all past, present and future contributions of the author listed above. Julian Taylor 2017/07/19 pyzmq-26.4.0/RELICENSE/ledgerx.md000066400000000000000000000011301477374370200162610ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by LedgerX LLC. that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2). A portion of the commits made by the GitHub handle "amrali", with commit author "Amr Ali ", are copyright of LedgerX LLC. This document hereby grants the pyzmq project team permission to relicense pyzmq, including all past, present, and future contributions of the author listed above. Amr Ali 2017/09/11 pyzmq-26.4.0/RELICENSE/lothiraldan.md000066400000000000000000000013331477374370200171350ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by Boris Feld that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "Lothiraldan", with commit author "FELD Boris ", are copyright of Boris Feld. This document hereby grants the pyzmq project team permission to relicense pyzmq, including all past, present, and future contributions of the author listed above. Boris Feld 2017/07/18 pyzmq-26.4.0/RELICENSE/minrk.md000066400000000000000000000013231477374370200157530ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by Min Ragan-Kelley that grants permission to relicense its copyrights in the libzmq C++ library (ZeroMQ) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "minrk", with commit author "Min RK benjaminrk@gmail.com", are copyright of Min Ragan-Kelley. This document hereby grants the libzmq project team to relicense libzmq, including all past, present and future contributions of the author listed above. Min Ragan-Kelley 2017/07/17 pyzmq-26.4.0/RELICENSE/takluyver.md000066400000000000000000000013321477374370200166610ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by Thomas Kluyver that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "takluyver", with commit author "Thomas Kluyver ", are copyright of Thomas Kluyver. This document hereby grants the libzmq project team to relicense libzmq, including all past, present and future contributions of the author listed above. Thomas Kluyver 2017/07/18 pyzmq-26.4.0/RELICENSE/templates/000077500000000000000000000000001477374370200163105ustar00rootroot00000000000000pyzmq-26.4.0/RELICENSE/templates/relicense-template-bsd.txt000066400000000000000000000011501477374370200233760ustar00rootroot00000000000000# Permission to Relicense under BSD This is a statement by {{ name of company / name of individual }} that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the 3-Clause BSD License (BSD3). A portion of the commits made by the Github handle "{{github username}}", with commit author "{{github commit author}}", are copyright of {{ name }}. This document hereby grants the pyzmq project team permission to relicense pyzmq, including all past, present, and future contributions of the author listed above. {{ Full Name }} {{ creation date of document (format: yyyy/mm/dd) }} pyzmq-26.4.0/RELICENSE/templates/relicense-template-mplv2-any-osi.txt000066400000000000000000000014511477374370200252470ustar00rootroot00000000000000# Permission to Relicense under MPLv2 or any other OSI approved license chosen by the current PyZMQ BDFL This is a statement by {{ name of company / name of individual }} that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2) or any other Open Source Initiative approved license chosen by the current PyZMQ BDFL (Benevolent Dictator for Life). A portion of the commits made by the GitHub handle "{{github username}}", with commit author "{{github commit author}}", are copyright of {{ name }} . This document hereby grants the pyzmq project team permission to relicense pyzmq, including all past, present, and future contributions of the author listed above. {{ Full Name }} {{ creation date of document (format: yyyy/mm/dd) }} pyzmq-26.4.0/RELICENSE/templates/relicense-template-mplv2.txt000066400000000000000000000011601477374370200236670ustar00rootroot00000000000000# Permission to Relicense under MPLv2 This is a statement by {{ name of company / name of individual }} that grants permission to relicense its copyrights in the Python ZeroMQ bindings (pyzmq) under the Mozilla Public License v2 (MPLv2). A portion of the commits made by the GitHub handle "{{github username}}", with commit author "{{github commit author}}", are copyright of {{ name }}. This document hereby grants the pyzmq project team permission to relicense pyzmq, including all past, present, and future contributions of the author listed above. {{ Full Name }} {{ creation date of document (format: yyyy/mm/dd) }} pyzmq-26.4.0/SECURITY.md000066400000000000000000000030051477374370200145700ustar00rootroot00000000000000# Security Policies and Procedures This document outlines security procedures and general policies for the pyzmq project. - [Reporting a Bug](#reporting-a-bug) - [Disclosure Policy](#disclosure-policy) - [Comments on this Policy](#comments-on-this-policy) ## Reporting a Bug Thank you for improving the security of pyzmq. We appreciate your efforts and responsible disclosure and will make every effort to acknowledge your contributions. Report security bugs by emailing the lead maintainer at benjaminrk AT gmail.com. The lead maintainer will acknowledge your email as promptly as possible, and will follow up with a more detailed response. When the issue is confirmed, a GitHub security advisory will be created to discuss resolutions. We will endeavor to keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance. Report security bugs in libzmq itself or other packages to the mainainers of those packages. ## Disclosure Policy When the security team receives a security bug report, they will assign it to a primary handler. This person will coordinate the fix and release process, involving the following steps: - Confirm the problem and determine the affected versions. - Audit code to find any potential similar problems. - Prepare fixes for all releases still under maintenance. These fixes will be released as fast as possible to npm. ## Comments on this Policy If you have suggestions on how this process could be improved please submit a pull request. pyzmq-26.4.0/Vagrantfile000066400000000000000000000020561477374370200151710ustar00rootroot00000000000000#e -*- mode: ruby -*- # vi: set ft=ruby : # This will setup a clean Ubuntu1404 LTS env $script = <" ], "text/plain": [ "alt.Chart(...)" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "chart = crossover(thr, \"throughput\")\n", "chart.title = \"Throughput\"\n", "chart" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Compare the maximum throughput for small messages:" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "zero-copy max msgs/sec: ~2.1e+05\n", " copy max msgs/sec: ~4.2e+05\n" ] } ], "source": [ "zero_copy_max = thr.where(~thr[\"copy\"]).throughput.max()\n", "copy_max = thr.where(thr[\"copy\"]).throughput.max()\n", "print(f\"zero-copy max msgs/sec: ~{zero_copy_max:.1e}\")\n", "print(f\" copy max msgs/sec: ~{copy_max:.1e}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So that's a ~5x penalty when sending 100B messages.\n", "It's still 40k msgs/sec, which isn't catastrophic,\n", "but if you want to send small messages as fast as possible,\n", "you can get closer to 250-500k msgs/sec if you skip the zero-copy logic.\n", "\n", "We can see the relative gains of zero-copy by plotting zero-copy performance\n", "normalized to message-copying performance" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'pd' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m chart \u001b[38;5;241m=\u001b[39m \u001b[43mrelative\u001b[49m\u001b[43m(\u001b[49m\u001b[43mthr\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mthroughput\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2\u001b[0m chart\u001b[38;5;241m.\u001b[39mtitle \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mZero-copy Throughput (relative)\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m chart\n", "Cell \u001b[0;32mIn[1], line 24\u001b[0m, in \u001b[0;36mrelative\u001b[0;34m(data, column, yscale)\u001b[0m\n\u001b[1;32m 20\u001b[0m no_copy \u001b[38;5;241m=\u001b[39m data[\u001b[38;5;241m~\u001b[39mdata[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcopy\u001b[39m\u001b[38;5;124m\"\u001b[39m]]\n\u001b[1;32m 21\u001b[0m reference \u001b[38;5;241m=\u001b[39m copy_mean[no_copy[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msize\u001b[39m\u001b[38;5;124m\"\u001b[39m]]\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (\n\u001b[1;32m 23\u001b[0m alt\u001b[38;5;241m.\u001b[39mChart(\n\u001b[0;32m---> 24\u001b[0m \u001b[43mpd\u001b[49m\u001b[38;5;241m.\u001b[39mDataFrame(\n\u001b[1;32m 25\u001b[0m {\n\u001b[1;32m 26\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msize\u001b[39m\u001b[38;5;124m\"\u001b[39m: no_copy[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msize\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[1;32m 27\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mno-copy speedup\u001b[39m\u001b[38;5;124m\"\u001b[39m: no_copy[column] \u001b[38;5;241m/\u001b[39m reference\u001b[38;5;241m.\u001b[39marray,\n\u001b[1;32m 28\u001b[0m }\n\u001b[1;32m 29\u001b[0m )\n\u001b[1;32m 30\u001b[0m )\n\u001b[1;32m 31\u001b[0m \u001b[38;5;241m.\u001b[39mmark_point()\n\u001b[1;32m 32\u001b[0m \u001b[38;5;241m.\u001b[39mencode(\n\u001b[1;32m 33\u001b[0m x\u001b[38;5;241m=\u001b[39malt\u001b[38;5;241m.\u001b[39mX(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msize\u001b[39m\u001b[38;5;124m\"\u001b[39m, title\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msize (B)\u001b[39m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39mscale(\u001b[38;5;28mtype\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlog\u001b[39m\u001b[38;5;124m\"\u001b[39m),\n\u001b[1;32m 34\u001b[0m y\u001b[38;5;241m=\u001b[39malt\u001b[38;5;241m.\u001b[39mY(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mno-copy speedup\u001b[39m\u001b[38;5;124m\"\u001b[39m, title\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39mscale(\u001b[38;5;28mtype\u001b[39m\u001b[38;5;241m=\u001b[39myscale),\n\u001b[1;32m 35\u001b[0m )\n\u001b[1;32m 36\u001b[0m )\n", "\u001b[0;31mNameError\u001b[0m: name 'pd' is not defined" ] } ], "source": [ "chart = relative(thr, \"throughput\")\n", "chart.title = \"Zero-copy Throughput (relative)\"\n", "chart" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So that's ~5x penalty for using zero-copy on 100B messages\n", "and a ~2x win for using zero-copy in ~500kB messages.\n", "THe crossover where the cost balances the benefit is in the vicinity of ~64kB.\n", "\n", "This is why pyzmq 17 introduces the `zmq.COPY_THRESHOLD` behavior,\n", "which sents a bound where `copy=False` can always be used,\n", "and the zero-copy machinery will only be triggered for frames that are larger than this threshold.\n", "The default for zmq.COPY_THRESHOLD in pyzmq-17.0 is 64kB,\n", "based on these experiments." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Send-only throughput" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So far, we've only been measuring the time it takes to actually deliver all of those messages (total application throughput).\n", "\n", "One of the big wins for zero-copy in pyzmq is that the the local `send` action is much less expensive for large messages because there is no `memcpy` in the handoff to zmq.\n", "Plotting only the time it takes to *send* messages shows a much bigger win,\n", "but similar crossover point." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "chart = crossover(thr, \"sends\")\n", "chart.title = \"Messages sent/sec\"\n", "chart" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Scaled plot, showing ratio of zero-copy to copy throughput performance:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "chart = relative(thr, \"sends\", yscale=\"log\")\n", "chart.title = \"Zero-copy sends/sec (relative speedup)\"\n", "chart" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The `socket.send` calls for ~1MB messages is ~20x faster with zero-copy than copy,\n", "but it's also ~10x *slower* for very small messages.\n", "\n", "Taking that into perspective, the penalty for zero-copy is ~10 µs per send:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "copy_small = 1e6 / thr[thr[\"copy\"] * (thr[\"size\"] == thr[\"size\"].min())][\"sends\"].mean()\n", "nocopy = 1e6 / thr[~thr[\"copy\"]][\"sends\"]\n", "penalty = nocopy - copy_small\n", "print(f\"Small copying send : {copy_small:.2f}µs\")\n", "print(f\"Small zero-copy send: {nocopy.mean():.2f}µs ± {nocopy.std():.2f}µs\")\n", "print(f\"Penalty : [{penalty.min():.2f}µs - {penalty.max():.2f}µs]\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "which is a pretty big deal for small sends that only take 2µs, but nothing for 1MB sends, where the memcpy can take almost a millisecond:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "copy_big = 1e6 / thr[thr[\"copy\"] * (thr[\"size\"] == thr[\"size\"].max())][\"sends\"].mean()\n", "print(f\"Big copying send ({thr['size'].max() / 1e6:.0f} MB): {copy_big:.2f}µs\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Latency\n", "\n", "Latency tests measure REQ-REP request/reply cycles, waiting for a reply before sending the next request.\n", "This more directly measures the cost of sending and receiving a single message,\n", "removing any instance of queuing up multiple sends in the background.\n", "\n", "This differs from the throughput test, where many messages are in flight at once.\n", "This is significant because much of the performance cost of zero-copy is in\n", "contention between the garbage collection thread and the main thread.\n", "If garbage collection events fire when the main thread is idle waiting for a message,\n", "this has ~no extra cost." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with open(\"lat.pickle\", \"rb\") as f:\n", " lat = pickle.load(f)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "chart = crossover(lat, \"latency\", ylabel=\"µs\")\n", "chart.title = \"Latency (µs)\"\n", "chart" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "chart = relative(lat, \"latency\")\n", "chart.title = \"Relative increase in latency zero-copy / copy\"\n", "chart" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For the latency test, we see that there is much lower overhead to the zero-copy machinery when there are few messages in flight.\n", "This is expected, because much of the performance cost comes from thread contention when the gc thread is working hard to keep up with the freeing of messages that zmq is done with.\n", "\n", "The result is a much lower penalty for zero-copy of small messages." ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": {}, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 4 } pyzmq-26.4.0/perf/perf.py000066400000000000000000000142671477374370200152550ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. # # Some original test code Copyright (c) 2007-2010 iMatix Corporation, # Used under MPL-2.0 import argparse import time from multiprocessing import Process try: now = time.monotonic except AttributeError: now = time.time import zmq # disable copy threshold for benchmarking zmq.COPY_THRESHOLD = 0 def parse_args(argv=None): parser = argparse.ArgumentParser(description='Run a zmq performance test') parser.add_argument( '-p', '--poll', action='store_true', help='use a zmq Poller instead of raw send/recv', ) parser.add_argument( '--no-copy', action='store_false', dest='copy', help='enable zero-copy transfer (potentially faster for large messages)', ) parser.add_argument( '-s', '--size', type=int, default=1024, help='size (in bytes) of the test message', ) parser.add_argument( '-n', '--count', type=int, default=1024, help='number of test messages to send' ) parser.add_argument( '--url', dest='url', type=str, default='tcp://127.0.0.1:5555', help='the zmq URL on which to run the test', ) parser.add_argument( dest='test', nargs='?', type=str, default='lat', choices=['lat', 'thr'], help='which test to run', ) return parser.parse_args(argv) def latency_echo(url, count, size=None, poll=False, copy=True, quiet=False): """echo messages on a REP socket Should be started before `latency` """ ctx = zmq.Context() s = ctx.socket(zmq.REP) if poll: p = zmq.Poller() p.register(s) s.bind(url) block = zmq.NOBLOCK if poll else 0 for i in range(count + 1): if poll: p.poll() msg = s.recv(block, copy=copy) if poll: p.poll() s.send(msg, block, copy=copy) msg = s.recv() assert msg == b'done' s.close() ctx.term() def latency(url, count, size, poll=False, copy=True, quiet=False): """Perform a latency test""" ctx = zmq.Context() s = ctx.socket(zmq.REQ) s.setsockopt(zmq.LINGER, -1) s.connect(url) if poll: p = zmq.Poller() p.register(s) msg = b' ' * size block = zmq.NOBLOCK if poll else 0 # trigger one roundtrip before starting the timer s.send(msg) s.recv() start = now() for i in range(0, count): if poll: res = p.poll() assert res[0][1] & zmq.POLLOUT s.send(msg, block, copy=copy) if poll: res = p.poll() assert res[0][1] & zmq.POLLIN msg = s.recv(block, copy=copy) assert len(msg) == size elapsed = now() - start s.send(b'done') latency = 1e6 * elapsed / (count * 2.0) if not quiet: print(f"message size : {size:8d} [B]") print(f"roundtrip count: {count:8d} [msgs]") print(f"mean latency : {latency:12.3f} [µs]") print(f"test time : {elapsed:12.3f} [s]") ctx.destroy() return latency def thr_sink(url, count, size, poll=False, copy=True, quiet=False): """send a bunch of messages on a PUSH socket""" ctx = zmq.Context() s = ctx.socket(zmq.ROUTER) s.RCVHWM = 0 # Add your socket options here. # For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM. if poll: p = zmq.Poller() p.register(s) s.bind(url) msg = s.recv_multipart() assert msg[1] == b'BEGIN', msg count = int(msg[2].decode('ascii')) s.send_multipart(msg) flags = zmq.NOBLOCK if poll else 0 for i in range(count): if poll: res = p.poll() assert res[0][1] & zmq.POLLIN msg = s.recv_multipart(flags=flags, copy=copy) s.send_multipart([msg[0], b'DONE']) s.close() ctx.term() def throughput(url, count, size, poll=False, copy=True, quiet=False): """recv a bunch of messages on a PULL socket Should be started before `pusher` """ ctx = zmq.Context() s = ctx.socket(zmq.DEALER) s.SNDHWM = 0 # Add your socket options here. # For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM. if poll: p = zmq.Poller() p.register(s, zmq.POLLOUT) s.connect(url) data = b' ' * size flags = zmq.NOBLOCK if poll else 0 s.send_multipart([b'BEGIN', str(count).encode('ascii')]) # Wait for the other side to connect. msg = s.recv_multipart() assert msg[0] == b'BEGIN' start = now() for i in range(count): if poll: res = p.poll() assert res[0][1] & zmq.POLLOUT s.send(data, flags=flags, copy=copy) sent = now() # wait for receiver reply = s.recv_multipart() elapsed = now() - start assert reply[0] == b'DONE' send_only = sent - start send_throughput = count / send_only throughput = count / elapsed megabits = throughput * size * 8 / 1e6 if not quiet: print(f"message size : {size:8d} [B]") print(f"message count : {count:8d} [msgs]") print(f"send only : {send_throughput:8.0f} [msg/s]") print(f"mean throughput: {throughput:8.0f} [msg/s]") print(f"mean throughput: {megabits:12.3f} [Mb/s]") print(f"test time : {elapsed:12.3f} [s]") ctx.destroy() return (send_throughput, throughput) def do_run(test, **kwargs): """Do a single run""" if test == 'lat': bg_func = latency_echo fg_func = latency elif test == 'thr': bg_func = thr_sink fg_func = throughput bg = Process(target=bg_func, kwargs=kwargs) bg.start() result = fg_func(**kwargs) bg.join() return result def main(): args = parse_args() tic = time.time() do_run( args.test, url=args.url, size=args.size, count=args.count, poll=args.poll, copy=args.copy, ) toc = time.time() if (toc - tic) < 3: print("For best results, tests should take at least a few seconds.") if __name__ == '__main__': main() pyzmq-26.4.0/pyproject.toml000066400000000000000000000133731477374370200157240ustar00rootroot00000000000000# PEP 621 build info [build-system] requires = [ "cffi; implementation_name == 'pypy'", "cython>=3.0.0; implementation_name != 'pypy'", "packaging", "scikit-build-core>=0.10", ] build-backend = "scikit_build_core.build" # Project metadata # ref: https://setuptools.pypa.io/en/latest/userguide/pyproject_config.html [project] name = "pyzmq" version = "26.4.0" authors = [ { name = "PyZMQ Contributors", email = "zeromq-dev@lists.zeromq.org" }, { name = "Brian E. Granger" }, { name = "Min Ragan-Kelley" }, ] license = { file = "LICENSE.md" } requires-python = ">=3.8" classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Intended Audience :: System Administrators", "License :: OSI Approved :: BSD License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Topic :: System :: Networking", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = ["cffi; implementation_name == 'pypy'"] description = "Python bindings for 0MQ" readme = "README.md" [project.urls] Homepage = "https://pyzmq.readthedocs.org" Documentation = "https://pyzmq.readthedocs.org" Source = "https://github.com/zeromq/pyzmq" Tracker = "https://github.com/zeromq/pyzmq/issues" [tool.scikit-build] wheel.packages = ["zmq"] wheel.license-files = ["licenses/LICENSE*"] # 3.15 is required by scikit-build-core cmake.version = ">=3.15" # only build/install the pyzmq component build.targets = ["pyzmq"] install.components = ["pyzmq"] [[tool.scikit-build.overrides]] if.env.PYZMQ_BACKEND = "cffi" build.requires = ["cffi"] [tool.ruff] [tool.ruff.format] exclude = [ "buildutils/templates/*", "zmq/eventloop/minitornado/*", ] quote-style = "preserve" [tool.ruff.lint.isort] known-first-party = ["zmq", "zmq_test_utils"] [tool.ruff.lint] select = [ "E", "F", "UP", "I", ] ignore = [ "E501", # line length (formatter is responsible) "E721", # compare types "F841", # unused variables ] exclude = ["buildutils/templates/*"] [tool.ruff.lint.per-file-ignores] "__init__.py" = ["F4", "E4"] "__init__.pyi" = ["F4", "E4"] "tests/*" = ["E4", "F4"] "docs/source/conf.py" = ["E4"] "zmq/eventloop/*" = ["E402"] "zmq/ssh/forward.py" = ["E"] # no longer used autoformatters, linters: [tool.autoflake] ignore-init-module-imports = true remove-all-unused-imports = true remove-duplicate-keys = true # remove-unused-variables = true [tool.black] skip-string-normalization = true exclude = "zmq/eventloop/minitornado|docs/source/conf.py" [tool.isort] profile = "black" multi_line_output = 3 skip = ["zmq/__init__.py"] [tool.tbump] # Uncomment this if your project is hosted on GitHub: github_url = "https://github.com/zeromq/pyzmq" [tool.tbump.version] current = "26.4.0" regex = ''' (?P\d+) \. (?P\d+) \. (?P\d+) (?P
((a|b|rc|)\d+)|.dev\d*|)
  '''

[tool.tbump.git]
message_template = "Bump to {new_version}"
tag_template = "v{new_version}"

# For each file to patch, add a [[tool.tbump.file]] config
# section containing the path of the file, relative to the
# pyproject.toml location.
[[tool.tbump.file]]
src = "pyproject.toml"
search = 'version = "{current_version}"'

[[tool.tbump.file]]
src = "zmq/sugar/version.py"
search = '__version__: str = "{current_version}"'

[tool.cibuildwheel]
build-verbosity = "1"
enable = ["cpython-freethreading", "pypy"]
test-requires = ["pytest>=6", "importlib_metadata"]
test-command = "pytest -vsx {package}/tools/test_wheel.py"

[tool.cibuildwheel.linux]
before-all = "bash tools/install_libzmq.sh"
manylinux-x86_64-image = "manylinux2014"
manylinux-i686-image = "manylinux2014"
manylinux-aarch64-image = "manylinux2014"
musllinux-aarch64-image = "musllinux_1_1"
musllinux-i686-image = "musllinux_1_1"
musllinux-x86_64-image = "musllinux_1_1"

[tool.cibuildwheel.linux.environment]
ZMQ_PREFIX = "/usr/local"
CFLAGS = "-Wl,-strip-all"
CXXFLAGS = "-Wl,-strip-all"

[tool.cibuildwheel.macos]
before-all = "bash tools/install_libzmq.sh"
repair-wheel-command = "delocate-wheel --sanitize-rpaths --require-archs {delocate_archs} -w {dest_dir} -v {wheel}"

[tool.cibuildwheel.macos.environment]
ZMQ_PREFIX = "/tmp/zmq"
MACOSX_DEPLOYMENT_TARGET = "10.15"

[tool.cibuildwheel.windows]
before-all = "python buildutils/bundle.py licenses"
# free-threaded doesn't seem to work on Windows
enable = ["pypy"]
repair-wheel-command = """\
    delvewheel repair \
        -v \
        --wheel-dir={dest_dir} \
        {wheel} \
"""

[tool.cibuildwheel.windows.config-settings]
"cmake.define.ZMQ_PREFIX" = "bundled"
# statically link MSVCP
# see https://github.com/zeromq/pyzmq/issues/2012
# and https://github.com/matplotlib/matplotlib/pull/28687
"cmake.define.CMAKE_MSVC_RUNTIME_LIBRARY" = "MultiThreaded"
"cmake.define.CMAKE_SHARED_LINKER_FLAGS" = "ucrt.lib;vcruntime.lib;/nodefaultlib:libucrt.lib;/nodefaultlib:libvcruntime.lib"
# disable IPC/epoll on Windows
# due to https://github.com/zeromq/pyzmq/issues/1981
"cmake.define.ZMQ_HAVE_IPC" = "OFF"
"cmake.define.POLLER" = "select"


# manylinux2010 for (less) old cp37-9, pp37-8
[[tool.cibuildwheel.overrides]]
select = "cp3{7,8,9}-* pp3{7,8}-*"
manylinux-x86_64-image = "manylinux2010"
manylinux-i686-image = "manylinux2010"

# note: manylinux_2_28 builds are added
# in .github/workflows/wheels.yml

[[tool.cibuildwheel.overrides]]
select = "cp313t-*"
build-frontend = "pip; args: -v --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple"
pyzmq-26.4.0/pytest.ini000066400000000000000000000006761477374370200150430ustar00rootroot00000000000000[pytest]
addopts = --durations 10 --import-mode=importlib
markers =
    large: these tests use a lot of memory
    new_console: these tests create a new console
    wheel: these tests are for installs from a wheel, not dev-installs
# import-mode=importlib doesn't put testpaths on sys.path
pythonpath = tests
testpaths =
    tests
# automatically run coroutine tests with asyncio
asyncio_mode = auto
asyncio_default_fixture_loop_scope = function
pyzmq-26.4.0/test-requirements.txt000066400000000000000000000014001477374370200172350ustar00rootroot00000000000000black; platform_python_implementation != "PyPy"
# gevent wheels on mac cause Illegal Instruction
codecov
coverage>=7.1
cython; platform_python_implementation != "PyPy" # required for Cython tests
cython>=3.0.0b3; platform_python_implementation != "PyPy" and python_version >= "3.12" # required for Cython tests
flake8
gevent; platform_python_implementation != "PyPy" and sys_platform != "win32" and sys_platform != "darwin" and python_version < "3.11"
mypy; platform_python_implementation != "PyPy"
pymongo
pytest
pytest-asyncio>=0.17
# pytest-cov 2.11 requires coverage 5, which still doesn't work with Cython
pytest-cov==2.10.*
pytest-rerunfailures
setuptools; platform_python_implementation != "PyPy" # required for Cython tests after distutils deprecation
tornado
pyzmq-26.4.0/tests/000077500000000000000000000000001477374370200141435ustar00rootroot00000000000000pyzmq-26.4.0/tests/conftest.py000066400000000000000000000126741477374370200163540ustar00rootroot00000000000000"""pytest configuration and fixtures"""

import asyncio
import inspect
import os
import signal
import time
from functools import partial
from threading import Thread

try:
    import tornado
    from tornado import version_info
except ImportError:
    tornado = None
else:
    if version_info < (5,):
        tornado = None
    from tornado.ioloop import IOLoop

import pytest

import zmq
import zmq.asyncio

test_timeout_seconds = os.environ.get("ZMQ_TEST_TIMEOUT")
teardown_timeout = 10


def pytest_collection_modifyitems(items):
    """This function is automatically run by pytest passing all collected test
    functions.
    We use it to add asyncio marker to all async tests and assert we don't use
    test functions that are async generators which wouldn't make sense.
    It is no longer required with pytest-asyncio >= 0.17
    """
    for item in items:
        if inspect.iscoroutinefunction(item.obj):
            item.add_marker('asyncio')
        assert not inspect.isasyncgenfunction(item.obj)


@pytest.fixture
async def io_loop(event_loop, request):
    """Create tornado io_loop on current asyncio event loop"""
    if tornado is None:
        pytest.skip()
    io_loop = IOLoop.current()
    assert asyncio.get_event_loop() is event_loop
    assert io_loop.asyncio_loop is event_loop

    def _close():
        io_loop.close(all_fds=True)

    request.addfinalizer(_close)
    return io_loop


def term_context(ctx, timeout):
    """Terminate a context with a timeout"""
    t = Thread(target=ctx.term)
    t.daemon = True
    t.start()
    t.join(timeout=timeout)
    if t.is_alive():
        # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
        zmq.sugar.context.Context._instance = None
        raise RuntimeError(
            f"context {ctx} could not terminate, open sockets likely remain in test"
        )


@pytest.fixture
def event_loop():
    loop = asyncio.new_event_loop()
    yield loop
    loop.close()
    # make sure selectors are cleared
    assert dict(zmq.asyncio._selectors) == {}


@pytest.fixture
def sigalrm_timeout():
    """Set timeout using SIGALRM

    Avoids infinite hang in context.term for an unclean context,
    raising an error instead.
    """
    if not hasattr(signal, "SIGALRM") or not test_timeout_seconds:
        return

    def _alarm_timeout(*args):
        raise TimeoutError(f"Test did not complete in {test_timeout_seconds} seconds")

    signal.signal(signal.SIGALRM, _alarm_timeout)
    signal.alarm(test_timeout_seconds)


@pytest.fixture
def Context():
    """Context class fixture

    Override in modules to specify a different class (e.g. zmq.green)
    """
    return zmq.Context


@pytest.fixture
def contexts(sigalrm_timeout):
    """Fixture to track contexts used in tests

    For cleanup purposes
    """
    contexts = set()
    yield contexts
    for ctx in contexts:
        try:
            term_context(ctx, teardown_timeout)
        except Exception:
            # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
            zmq.sugar.context.Context._instance = None
            raise


@pytest.fixture
def context(Context, contexts):
    """Fixture for shared context"""
    ctx = Context()
    contexts.add(ctx)
    return ctx


@pytest.fixture
def sockets(contexts):
    sockets = []
    yield sockets
    # ensure any tracked sockets get their contexts cleaned up
    for socket in sockets:
        contexts.add(socket.context)

    # close sockets
    for socket in sockets:
        socket.close(linger=0)


@pytest.fixture
def socket(context, sockets):
    """Fixture to create sockets, while tracking them for cleanup"""

    def new_socket(*args, **kwargs):
        s = context.socket(*args, **kwargs)
        sockets.append(s)
        return s

    return new_socket


def assert_raises_errno(errno):
    try:
        yield
    except zmq.ZMQError as e:
        assert e.errno == errno, (
            f"wrong error raised, expected {zmq.ZMQError(errno)} got {zmq.ZMQError(e.errno)}"
        )
    else:
        pytest.fail(f"Expected {zmq.ZMQError(errno)}, no error raised")


def recv(socket, *, timeout=5, flags=0, multipart=False, **kwargs):
    """call recv[_multipart] in a way that raises if there is nothing to receive"""
    if zmq.zmq_version_info() >= (3, 1, 0):
        # zmq 3.1 has a bug, where poll can return false positives,
        # so we wait a little bit just in case
        # See LIBZMQ-280 on JIRA
        time.sleep(0.1)

    r, w, x = zmq.select([socket], [], [], timeout=timeout)
    assert r, "Should have received a message"
    kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)

    recv = socket.recv_multipart if multipart else socket.recv
    return recv(flags=flags, **kwargs)


recv_multipart = partial(recv, multipart=True)


@pytest.fixture
def create_bound_pair(socket):
    def create_bound_pair(type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'):
        """Create a bound socket pair using a random port."""
        s1 = socket(type1)
        s1.linger = 0
        port = s1.bind_to_random_port(interface)
        s2 = socket(type2)
        s2.linger = 0
        s2.connect(f'{interface}:{port}')
        return s1, s2

    return create_bound_pair


@pytest.fixture
def bound_pair(create_bound_pair):
    return create_bound_pair()


@pytest.fixture
def push_pull(create_bound_pair):
    return create_bound_pair(zmq.PUSH, zmq.PULL)


@pytest.fixture
def dealer_router(create_bound_pair):
    return create_bound_pair(zmq.DEALER, zmq.ROUTER)
pyzmq-26.4.0/tests/cython_ext.pyx000066400000000000000000000012021477374370200170640ustar00rootroot00000000000000from zmq cimport Context, Frame, Socket, libzmq


cdef inline Frame c_send_recv(Socket a, Socket b, bytes to_send):
    cdef Frame msg = Frame(to_send)
    a.send(msg)
    cdef Frame recvd = b.recv(flags=0, copy=False)
    return recvd


cpdef bytes send_recv_test(bytes to_send):
    cdef Context ctx = Context()
    cdef Socket a = Socket(ctx, libzmq.ZMQ_PUSH)
    cdef Socket b = Socket(ctx, libzmq.ZMQ_PULL)
    url = 'inproc://test'
    a.bind(url)
    b.connect(url)
    cdef Frame recvd_frame = c_send_recv(a, b, to_send)
    a.close()
    b.close()
    ctx.term()
    cdef bytes recvd_bytes = recvd_frame.bytes
    return recvd_bytes
pyzmq-26.4.0/tests/test_asyncio.py000066400000000000000000000256521477374370200172330ustar00rootroot00000000000000"""Test asyncio support"""

# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import asyncio
import json
import os
import sys
from multiprocessing import Process

import pytest
from pytest import mark

import zmq
import zmq.asyncio as zaio


@pytest.fixture
def Context(event_loop):
    return zaio.Context


def test_socket_class(context):
    with context.socket(zmq.PUSH) as s:
        assert isinstance(s, zaio.Socket)


def test_instance_subclass_first(context):
    actx = zmq.asyncio.Context.instance()
    ctx = zmq.Context.instance()
    ctx.term()
    actx.term()
    assert type(ctx) is zmq.Context
    assert type(actx) is zmq.asyncio.Context


def test_instance_subclass_second(context):
    with zmq.Context.instance() as ctx:
        assert type(ctx) is zmq.Context
        with zmq.asyncio.Context.instance() as actx:
            assert type(actx) is zmq.asyncio.Context


async def test_recv_multipart(context, create_bound_pair):
    a, b = create_bound_pair(zmq.PUSH, zmq.PULL)
    f = b.recv_multipart()
    assert not f.done()
    await a.send(b"hi")
    recvd = await f
    assert recvd == [b"hi"]


async def test_recv(create_bound_pair):
    a, b = create_bound_pair(zmq.PUSH, zmq.PULL)
    f1 = b.recv()
    f2 = b.recv()
    assert not f1.done()
    assert not f2.done()
    await a.send_multipart([b"hi", b"there"])
    recvd = await f2
    assert f1.done()
    assert f1.result() == b"hi"
    assert recvd == b"there"


async def test_recv_into(create_bound_pair):
    a, b = create_bound_pair()
    b.rcvtimeo = 1000
    msg = [
        b'hello',
        b'there world',
        b'part 3',
        b'rest',
    ]
    a.send_multipart(msg)

    # default nbytes: fits in array
    buf = bytearray(10)
    nbytes = await b.recv_into(buf)
    assert nbytes == len(msg[0])
    assert buf[:nbytes] == msg[0]

    # default nbytes: truncates to sizeof(buf)
    buf = bytearray(4)
    nbytes = await b.recv_into(buf, flags=zmq.DONTWAIT)
    # returned nbytes is the actual received length,
    # which indicates truncation
    assert nbytes == len(msg[1])
    assert buf[:] == msg[1][: len(buf)]

    # specify nbytes, truncates
    buf = bytearray(10)
    nbytes = 4
    nbytes_recvd = await b.recv_into(buf, nbytes=nbytes)
    assert nbytes_recvd == len(msg[2])

    # recv_into empty buffer discards everything
    buf = bytearray(10)
    view = memoryview(buf)[:0]
    assert view.nbytes == 0
    nbytes = await b.recv_into(view)
    assert nbytes == len(msg[3])


async def test_recv_into_bad(create_bound_pair):
    a, b = create_bound_pair()
    b.rcvtimeo = 1000

    # bad calls
    # make sure flags work
    with pytest.raises(zmq.Again):
        await b.recv_into(bytearray(5), flags=zmq.DONTWAIT)

    await a.send(b'msg')
    # negative nbytes
    buf = bytearray(10)
    with pytest.raises(ValueError):
        await b.recv_into(buf, nbytes=-1)


@mark.skipif(not hasattr(zmq, "RCVTIMEO"), reason="requires RCVTIMEO")
async def test_recv_timeout(push_pull):
    a, b = push_pull
    b.rcvtimeo = 100
    f1 = b.recv()
    b.rcvtimeo = 1000
    f2 = b.recv_multipart()
    with pytest.raises(zmq.Again):
        await f1
    await a.send_multipart([b"hi", b"there"])
    recvd = await f2
    assert f2.done()
    assert recvd == [b"hi", b"there"]


@mark.skipif(not hasattr(zmq, "SNDTIMEO"), reason="requires SNDTIMEO")
async def test_send_timeout(socket):
    s = socket(zmq.PUSH)
    s.sndtimeo = 100
    with pytest.raises(zmq.Again):
        await s.send(b"not going anywhere")


async def test_recv_string(push_pull):
    a, b = push_pull
    f = b.recv_string()
    assert not f.done()
    msg = "πøøπ"
    await a.send_string(msg)
    recvd = await f
    assert f.done()
    assert f.result() == msg
    assert recvd == msg


async def test_recv_json(push_pull):
    a, b = push_pull
    f = b.recv_json()
    assert not f.done()
    obj = dict(a=5)
    await a.send_json(obj)
    recvd = await f
    assert f.done()
    assert f.result() == obj
    assert recvd == obj


async def test_recv_json_cancelled(push_pull):
    a, b = push_pull
    f = b.recv_json()
    assert not f.done()
    f.cancel()
    # cycle eventloop to allow cancel events to fire
    await asyncio.sleep(0)
    obj = dict(a=5)
    await a.send_json(obj)
    with pytest.raises(asyncio.CancelledError):
        recvd = await f
    assert f.done()
    # give it a chance to incorrectly consume the event
    events = await b.poll(timeout=5)
    assert events
    await asyncio.sleep(0)
    # make sure cancelled recv didn't eat up event
    f = b.recv_json()
    recvd = await asyncio.wait_for(f, timeout=5)
    assert recvd == obj


async def test_recv_pyobj(push_pull):
    a, b = push_pull
    f = b.recv_pyobj()
    assert not f.done()
    obj = dict(a=5)
    await a.send_pyobj(obj)
    recvd = await f
    assert f.done()
    assert f.result() == obj
    assert recvd == obj


async def test_custom_serialize(create_bound_pair):
    def serialize(msg):
        frames = []
        frames.extend(msg.get("identities", []))
        content = json.dumps(msg["content"]).encode("utf8")
        frames.append(content)
        return frames

    def deserialize(frames):
        identities = frames[:-1]
        content = json.loads(frames[-1].decode("utf8"))
        return {
            "identities": identities,
            "content": content,
        }

    a, b = create_bound_pair(zmq.DEALER, zmq.ROUTER)

    msg = {
        "content": {
            "a": 5,
            "b": "bee",
        }
    }
    await a.send_serialized(msg, serialize)
    recvd = await b.recv_serialized(deserialize)
    assert recvd["content"] == msg["content"]
    assert recvd["identities"]
    # bounce back, tests identities
    await b.send_serialized(recvd, serialize)
    r2 = await a.recv_serialized(deserialize)
    assert r2["content"] == msg["content"]
    assert not r2["identities"]


async def test_custom_serialize_error(dealer_router):
    a, b = dealer_router

    msg = {
        "content": {
            "a": 5,
            "b": "bee",
        }
    }
    with pytest.raises(TypeError):
        await a.send_serialized(json, json.dumps)

    await a.send(b"not json")
    with pytest.raises(TypeError):
        await b.recv_serialized(json.loads)


async def test_recv_dontwait(push_pull):
    push, pull = push_pull
    f = pull.recv(zmq.DONTWAIT)
    with pytest.raises(zmq.Again):
        await f
    await push.send(b"ping")
    await pull.poll()  # ensure message will be waiting
    f = pull.recv(zmq.DONTWAIT)
    assert f.done()
    msg = await f
    assert msg == b"ping"


async def test_recv_cancel(push_pull):
    a, b = push_pull
    f1 = b.recv()
    f2 = b.recv_multipart()
    assert f1.cancel()
    assert f1.done()
    assert not f2.done()
    await a.send_multipart([b"hi", b"there"])
    recvd = await f2
    assert f1.cancelled()
    assert f2.done()
    assert recvd == [b"hi", b"there"]


async def test_poll(push_pull):
    a, b = push_pull
    f = b.poll(timeout=0)
    await asyncio.sleep(0)
    assert f.result() == 0

    f = b.poll(timeout=1)
    assert not f.done()
    evt = await f

    assert evt == 0

    f = b.poll(timeout=1000)
    assert not f.done()
    await a.send_multipart([b"hi", b"there"])
    evt = await f
    assert evt == zmq.POLLIN
    recvd = await b.recv_multipart()
    assert recvd == [b"hi", b"there"]


async def test_poll_base_socket(sockets):
    ctx = zmq.Context()
    url = "inproc://test"
    a = ctx.socket(zmq.PUSH)
    b = ctx.socket(zmq.PULL)
    sockets.extend([a, b])
    a.bind(url)
    b.connect(url)

    poller = zaio.Poller()
    poller.register(b, zmq.POLLIN)

    f = poller.poll(timeout=1000)
    assert not f.done()
    a.send_multipart([b"hi", b"there"])
    evt = await f
    assert evt == [(b, zmq.POLLIN)]
    recvd = b.recv_multipart()
    assert recvd == [b"hi", b"there"]


async def test_poll_on_closed_socket(push_pull):
    a, b = push_pull

    f = b.poll(timeout=1)
    b.close()

    # The test might stall if we try to await f directly so instead just make a few
    # passes through the event loop to schedule and execute all callbacks
    for _ in range(5):
        await asyncio.sleep(0)
        if f.cancelled():
            break
    assert f.cancelled()


@pytest.mark.skipif(
    sys.platform.startswith("win"),
    reason="Windows does not support polling on files",
)
async def test_poll_raw():
    p = zaio.Poller()
    # make a pipe
    r, w = os.pipe()
    r = os.fdopen(r, "rb")
    w = os.fdopen(w, "wb")

    # POLLOUT
    p.register(r, zmq.POLLIN)
    p.register(w, zmq.POLLOUT)
    evts = await p.poll(timeout=1)
    evts = dict(evts)
    assert r.fileno() not in evts
    assert w.fileno() in evts
    assert evts[w.fileno()] == zmq.POLLOUT

    # POLLIN
    p.unregister(w)
    w.write(b"x")
    w.flush()
    evts = await p.poll(timeout=1000)
    evts = dict(evts)
    assert r.fileno() in evts
    assert evts[r.fileno()] == zmq.POLLIN
    assert r.read(1) == b"x"
    r.close()
    w.close()


def test_multiple_loops(push_pull):
    a, b = push_pull

    async def test():
        await a.send(b'buf')
        msg = await b.recv()
        assert msg == b'buf'

    for i in range(3):
        loop = asyncio.new_event_loop()
        loop.run_until_complete(asyncio.wait_for(test(), timeout=10))
        loop.close()


def test_shadow():
    with zmq.Context() as ctx:
        s = ctx.socket(zmq.PULL)
        async_s = zaio.Socket(s)
        assert isinstance(async_s, zaio.Socket)
        assert async_s.underlying == s.underlying
        assert async_s.type == s.type


async def test_poll_leak():
    ctx = zmq.asyncio.Context()
    with ctx, ctx.socket(zmq.PULL) as s:
        assert len(s._recv_futures) == 0
        for i in range(10):
            f = asyncio.ensure_future(s.poll(timeout=1000, flags=zmq.PollEvent.POLLIN))
            f.cancel()
            await asyncio.sleep(0)
        # one more sleep allows further chained cleanup
        await asyncio.sleep(0.1)
        assert len(s._recv_futures) == 0


class ProcessForTeardownTest(Process):
    def run(self):
        """Leave context, socket and event loop upon implicit disposal"""

        actx = zaio.Context.instance()
        socket = actx.socket(zmq.PAIR)
        socket.bind_to_random_port("tcp://127.0.0.1")

        async def never_ending_task(socket):
            await socket.recv()  # never ever receive anything

        loop = asyncio.new_event_loop()
        coro = asyncio.wait_for(never_ending_task(socket), timeout=1)
        try:
            loop.run_until_complete(coro)
        except asyncio.TimeoutError:
            pass  # expected timeout
        else:
            assert False, "never_ending_task was completed unexpectedly"
        finally:
            loop.close()


def test_process_teardown(request):
    proc = ProcessForTeardownTest()
    proc.start()
    request.addfinalizer(proc.terminate)
    proc.join(10)  # starting new Python process may cost a lot
    assert proc.exitcode is not None, "process teardown hangs"
    assert proc.exitcode == 0, f"Python process died with code {proc.exitcode}"
pyzmq-26.4.0/tests/test_auth.py000066400000000000000000000344231477374370200165230ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import asyncio
import logging
import os
import shutil
import sys
import warnings
from contextlib import contextmanager

import pytest

import zmq
import zmq.asyncio
import zmq.auth
from zmq_test_utils import SkipTest, skip_pypy

try:
    import tornado
except ImportError:
    tornado = None


@pytest.fixture
def Context(event_loop):
    return zmq.asyncio.Context


@pytest.fixture
def create_certs(tmpdir):
    """Create CURVE certificates for a test"""

    # Create temporary CURVE key pairs for this test run. We create all keys in a
    # temp directory and then move them into the appropriate private or public
    # directory.
    base_dir = str(tmpdir.join("certs").mkdir())
    keys_dir = os.path.join(base_dir, "certificates")
    public_keys_dir = os.path.join(base_dir, 'public_keys')
    secret_keys_dir = os.path.join(base_dir, 'private_keys')

    os.mkdir(keys_dir)
    os.mkdir(public_keys_dir)
    os.mkdir(secret_keys_dir)

    server_public_file, server_secret_file = zmq.auth.create_certificates(
        keys_dir, "server"
    )
    client_public_file, client_secret_file = zmq.auth.create_certificates(
        keys_dir, "client"
    )
    for key_file in os.listdir(keys_dir):
        if key_file.endswith(".key"):
            shutil.move(
                os.path.join(keys_dir, key_file), os.path.join(public_keys_dir, '.')
            )

    for key_file in os.listdir(keys_dir):
        if key_file.endswith(".key_secret"):
            shutil.move(
                os.path.join(keys_dir, key_file), os.path.join(secret_keys_dir, '.')
            )

    return (public_keys_dir, secret_keys_dir)


def load_certs(secret_keys_dir):
    """Return server and client certificate keys"""
    server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
    client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")

    server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
    client_public, client_secret = zmq.auth.load_certificate(client_secret_file)

    return server_public, server_secret, client_public, client_secret


@pytest.fixture
def public_keys_dir(create_certs):
    public_keys_dir, secret_keys_dir = create_certs
    return public_keys_dir


@pytest.fixture
def secret_keys_dir(create_certs):
    public_keys_dir, secret_keys_dir = create_certs
    return secret_keys_dir


@pytest.fixture
def certs(secret_keys_dir):
    return load_certs(secret_keys_dir)


@pytest.fixture
async def _async_setup(request, event_loop):
    """pytest doesn't support async setup/teardown"""
    instance = request.instance
    await instance.async_setup()
    yield
    # make sure our teardown runs before the loop closes
    await instance.async_teardown()


@pytest.mark.usefixtures("_async_setup")
class AuthTest:
    auth = None

    async def async_setup(self):
        self.context = zmq.asyncio.Context()
        if zmq.zmq_version_info() < (4, 0):
            raise SkipTest("security is new in libzmq 4.0")
        try:
            zmq.curve_keypair()
        except zmq.ZMQError:
            raise SkipTest("security requires libzmq to have curve support")
        # enable debug logging while we run tests
        logging.getLogger('zmq.auth').setLevel(logging.DEBUG)
        self.auth = self.make_auth()
        await self.start_auth()

    async def async_teardown(self):
        # Windows seems to have an issue waiting for cleanup
        # of the closed sockets
        # perhaps due to the background selector thread
        if sys.platform.startswith("win"):
            await asyncio.sleep(0.2)

        if self.auth:
            self.auth.stop()
            self.auth = None

        if sys.platform.startswith("win"):
            await asyncio.sleep(0.2)
        self.context.term()

    def make_auth(self):
        raise NotImplementedError()

    async def start_auth(self):
        self.auth.start()

    async def can_connect(self, server, client, timeout=1000):
        """Check if client can connect to server using tcp transport"""
        result = False
        iface = 'tcp://127.0.0.1'
        port = server.bind_to_random_port(iface)
        client.connect(f"{iface}:{port}")
        msg = [b"Hello World"]
        # run poll on server twice
        # to flush spurious events
        await server.poll(100, zmq.POLLOUT)

        if await server.poll(timeout, zmq.POLLOUT):
            try:
                await server.send_multipart(msg, zmq.NOBLOCK)
            except zmq.Again:
                warnings.warn("server set POLLOUT, but cannot send", RuntimeWarning)
                return False
        else:
            return False

        if await client.poll(timeout):
            try:
                rcvd_msg = await client.recv_multipart(zmq.NOBLOCK)
            except zmq.Again:
                warnings.warn("client set POLLIN, but cannot recv", RuntimeWarning)
            else:
                assert rcvd_msg == msg
                result = True
        return result

    @contextmanager
    def push_pull(self):
        with self.context.socket(zmq.PUSH) as server, self.context.socket(
            zmq.PULL
        ) as client:
            server.linger = 0
            server.sndtimeo = 2000
            client.linger = 0
            client.rcvtimeo = 2000
            yield server, client

    @contextmanager
    def curve_push_pull(self, certs, client_key="ok"):
        server_public, server_secret, client_public, client_secret = certs
        with self.push_pull() as (server, client):
            server.curve_publickey = server_public
            server.curve_secretkey = server_secret
            server.curve_server = True
            if client_key is not None:
                client.curve_publickey = client_public
                client.curve_secretkey = client_secret
                if client_key == "ok":
                    client.curve_serverkey = server_public
                else:
                    private, public = zmq.curve_keypair()
                    client.curve_serverkey = public
            yield (server, client)

    async def test_null(self):
        """threaded auth - NULL"""
        # A default NULL connection should always succeed, and not
        # go through our authentication infrastructure at all.
        self.auth.stop()
        self.auth = None

        # use a new context, so ZAP isn't inherited
        self.context.term()
        self.context = zmq.asyncio.Context()

        with self.push_pull() as (server, client):
            assert await self.can_connect(server, client)

        # By setting a domain we switch on authentication for NULL sockets,
        # though no policies are configured yet. The client connection
        # should still be allowed.
        with self.push_pull() as (server, client):
            server.zap_domain = b'global'
            assert await self.can_connect(server, client)

    async def test_deny(self):
        # deny 127.0.0.1, connection should fail
        self.auth.deny('127.0.0.1')
        with pytest.raises(ValueError):
            self.auth.allow("127.0.0.2")
        with self.push_pull() as (server, client):
            # By setting a domain we switch on authentication for NULL sockets,
            # though no policies are configured yet.
            server.zap_domain = b'global'
            assert not await self.can_connect(server, client, timeout=100)

    async def test_allow(self):
        # allow 127.0.0.1, connection should pass
        self.auth.allow('127.0.0.1')
        with pytest.raises(ValueError):
            self.auth.deny("127.0.0.2")
        with self.push_pull() as (server, client):
            # By setting a domain we switch on authentication for NULL sockets,
            # though no policies are configured yet.
            server.zap_domain = b'global'
            assert await self.can_connect(server, client)

    @pytest.mark.parametrize(
        "enabled, password, success",
        [
            (True, "correct", True),
            (False, "correct", False),
            (True, "incorrect", False),
        ],
    )
    async def test_plain(self, enabled, password, success):
        """threaded auth - PLAIN"""

        # Try PLAIN authentication - without configuring server, connection should fail
        with self.push_pull() as (server, client):
            server.plain_server = True
            if password:
                client.plain_username = b'admin'
                client.plain_password = password.encode("ascii")
            if enabled:
                self.auth.configure_plain(domain='*', passwords={'admin': 'correct'})
            if success:
                assert await self.can_connect(server, client)
            else:
                assert not await self.can_connect(server, client, timeout=100)

        # Remove authenticator and check that a normal connection works
        self.auth.stop()
        self.auth = None
        with self.push_pull() as (server, client):
            assert await self.can_connect(server, client)

    @pytest.mark.parametrize(
        "client_key, location, success",
        [
            ('ok', zmq.auth.CURVE_ALLOW_ANY, True),
            ('ok', "public_keys", True),
            ('bad', "public_keys", False),
            (None, "public_keys", False),
        ],
    )
    async def test_curve(self, certs, public_keys_dir, client_key, location, success):
        """threaded auth - CURVE"""
        self.auth.allow('127.0.0.1')

        # Try CURVE authentication - without configuring server, connection should fail
        with self.curve_push_pull(certs, client_key=client_key) as (server, client):
            if location:
                if location == 'public_keys':
                    location = public_keys_dir
                self.auth.configure_curve(domain='*', location=location)
            if success:
                assert await self.can_connect(server, client, timeout=100)
            else:
                assert not await self.can_connect(server, client, timeout=100)

        # Remove authenticator and check that a normal connection works
        self.auth.stop()
        self.auth = None

        # Try connecting using NULL and no authentication enabled, connection should pass
        with self.push_pull() as (server, client):
            assert await self.can_connect(server, client)

    @pytest.mark.parametrize("key", ["ok", "wrong"])
    @pytest.mark.parametrize("async_", [True, False])
    async def test_curve_callback(self, certs, key, async_):
        """threaded auth - CURVE with callback authentication"""
        self.auth.allow('127.0.0.1')
        server_public, server_secret, client_public, client_secret = certs

        class CredentialsProvider:
            def __init__(self):
                if key == "ok":
                    self.client = client_public
                else:
                    self.client = server_public

            def callback(self, domain, key):
                if key == self.client:
                    return True
                else:
                    return False

            async def async_callback(self, domain, key):
                await asyncio.sleep(0.1)
                if key == self.client:
                    return True
                else:
                    return False

        if async_:
            CredentialsProvider.callback = CredentialsProvider.async_callback
        provider = CredentialsProvider()
        self.auth.configure_curve_callback(credentials_provider=provider)
        with self.curve_push_pull(certs) as (server, client):
            if key == "ok":
                assert await self.can_connect(server, client)
            else:
                assert not await self.can_connect(server, client, timeout=200)

    @skip_pypy
    async def test_curve_user_id(self, certs, public_keys_dir):
        """threaded auth - CURVE"""
        self.auth.allow('127.0.0.1')
        server_public, server_secret, client_public, client_secret = certs
        self.auth.configure_curve(domain='*', location=public_keys_dir)
        # reverse server-client relationship, so server is PULL
        with self.push_pull() as (client, server):
            server.curve_publickey = server_public
            server.curve_secretkey = server_secret
            server.curve_server = True

            client.curve_publickey = client_public
            client.curve_secretkey = client_secret
            client.curve_serverkey = server_public

            assert await self.can_connect(client, server)

            # test default user-id map
            await client.send(b'test')
            msg = await server.recv(copy=False)
            assert msg.bytes == b'test'
            try:
                user_id = msg.get('User-Id')
            except zmq.ZMQVersionError:
                pass
            else:
                assert user_id == client_public.decode("utf8")

            # test custom user-id map
            self.auth.curve_user_id = lambda client_key: 'custom'

            with self.context.socket(zmq.PUSH) as client2:
                client2.curve_publickey = client_public
                client2.curve_secretkey = client_secret
                client2.curve_serverkey = server_public
                assert await self.can_connect(client2, server)

                await client2.send(b'test2')
                msg = await server.recv(copy=False)
                assert msg.bytes == b'test2'
                try:
                    user_id = msg.get('User-Id')
                except zmq.ZMQVersionError:
                    pass
                else:
                    assert user_id == 'custom'


class TestThreadAuthentication(AuthTest):
    """Test authentication running in a thread"""

    def make_auth(self):
        from zmq.auth.thread import ThreadAuthenticator

        return ThreadAuthenticator(self.context)


class TestAsyncioAuthentication(AuthTest):
    """Test authentication running in a thread"""

    def make_auth(self):
        from zmq.auth.asyncio import AsyncioAuthenticator

        return AsyncioAuthenticator(self.context)


async def test_ioloop_authenticator(context, event_loop, io_loop):
    from tornado.ioloop import IOLoop

    with warnings.catch_warnings():
        from zmq.auth.ioloop import IOLoopAuthenticator

    auth = IOLoopAuthenticator(context)
    assert auth.context is context

    loop = IOLoop(make_current=False)
    with pytest.warns(DeprecationWarning):
        auth = IOLoopAuthenticator(io_loop=loop)
pyzmq-26.4.0/tests/test_cffi_backend.py000066400000000000000000000213661477374370200201420ustar00rootroot00000000000000import time
from unittest import TestCase

from zmq_test_utils import SkipTest

try:
    from zmq.backend.cffi import (  # type: ignore
        IDENTITY,
        POLLIN,
        POLLOUT,
        PULL,
        PUSH,
        REP,
        REQ,
        zmq_version_info,
    )
    from zmq.backend.cffi._cffi import C, ffi

    have_ffi_backend = True
except ImportError:
    have_ffi_backend = False


class TestCFFIBackend(TestCase):
    def setUp(self):
        if not have_ffi_backend:
            raise SkipTest('CFFI not available')

    def test_zmq_version_info(self):
        version = zmq_version_info()

        assert version[0] in range(2, 11)

    def test_zmq_ctx_new_destroy(self):
        ctx = C.zmq_ctx_new()

        assert ctx != ffi.NULL
        assert 0 == C.zmq_ctx_destroy(ctx)

    def test_zmq_socket_open_close(self):
        ctx = C.zmq_ctx_new()
        socket = C.zmq_socket(ctx, PUSH)

        assert ctx != ffi.NULL
        assert ffi.NULL != socket
        assert 0 == C.zmq_close(socket)
        assert 0 == C.zmq_ctx_destroy(ctx)

    def test_zmq_setsockopt(self):
        ctx = C.zmq_ctx_new()
        socket = C.zmq_socket(ctx, PUSH)

        identity = ffi.new('char[3]', b'zmq')
        ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)

        assert ret == 0
        assert ctx != ffi.NULL
        assert ffi.NULL != socket
        assert 0 == C.zmq_close(socket)
        assert 0 == C.zmq_ctx_destroy(ctx)

    def test_zmq_getsockopt(self):
        ctx = C.zmq_ctx_new()
        socket = C.zmq_socket(ctx, PUSH)

        identity = ffi.new('char[]', b'zmq')
        ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3)
        assert ret == 0

        option_len = ffi.new('size_t*', 3)
        option = ffi.new('char[3]')
        ret = C.zmq_getsockopt(socket, IDENTITY, ffi.cast('void*', option), option_len)

        assert ret == 0
        assert ffi.string(ffi.cast('char*', option))[0:1] == b"z"
        assert ffi.string(ffi.cast('char*', option))[1:2] == b"m"
        assert ffi.string(ffi.cast('char*', option))[2:3] == b"q"
        assert ctx != ffi.NULL
        assert ffi.NULL != socket
        assert 0 == C.zmq_close(socket)
        assert 0 == C.zmq_ctx_destroy(ctx)

    def test_zmq_bind(self):
        ctx = C.zmq_ctx_new()
        socket = C.zmq_socket(ctx, 8)

        assert 0 == C.zmq_bind(socket, b'tcp://*:4444')
        assert ctx != ffi.NULL
        assert ffi.NULL != socket
        assert 0 == C.zmq_close(socket)
        assert 0 == C.zmq_ctx_destroy(ctx)

    def test_zmq_bind_connect(self):
        ctx = C.zmq_ctx_new()

        socket1 = C.zmq_socket(ctx, PUSH)
        socket2 = C.zmq_socket(ctx, PULL)

        assert 0 == C.zmq_bind(socket1, b'tcp://*:4444')
        assert 0 == C.zmq_connect(socket2, b'tcp://127.0.0.1:4444')
        assert ctx != ffi.NULL
        assert ffi.NULL != socket1
        assert ffi.NULL != socket2
        assert 0 == C.zmq_close(socket1)
        assert 0 == C.zmq_close(socket2)
        assert 0 == C.zmq_ctx_destroy(ctx)

    def test_zmq_msg_init_close(self):
        zmq_msg = ffi.new('zmq_msg_t*')

        assert ffi.NULL != zmq_msg
        assert 0 == C.zmq_msg_init(zmq_msg)
        assert 0 == C.zmq_msg_close(zmq_msg)

    def test_zmq_msg_init_size(self):
        zmq_msg = ffi.new('zmq_msg_t*')

        assert ffi.NULL != zmq_msg
        assert 0 == C.zmq_msg_init_size(zmq_msg, 10)
        assert 0 == C.zmq_msg_close(zmq_msg)

    def test_zmq_msg_init_data(self):
        zmq_msg = ffi.new('zmq_msg_t*')
        message = ffi.new('char[5]', b'Hello')

        assert 0 == C.zmq_msg_init_data(
            zmq_msg, ffi.cast('void*', message), 5, ffi.NULL, ffi.NULL
        )

        assert ffi.NULL != zmq_msg
        assert 0 == C.zmq_msg_close(zmq_msg)

    def test_zmq_msg_data(self):
        zmq_msg = ffi.new('zmq_msg_t*')
        message = ffi.new('char[]', b'Hello')
        assert 0 == C.zmq_msg_init_data(
            zmq_msg, ffi.cast('void*', message), 5, ffi.NULL, ffi.NULL
        )

        data = C.zmq_msg_data(zmq_msg)

        assert ffi.NULL != zmq_msg
        assert ffi.string(ffi.cast("char*", data)) == b'Hello'
        assert 0 == C.zmq_msg_close(zmq_msg)

    def test_zmq_send(self):
        ctx = C.zmq_ctx_new()

        sender = C.zmq_socket(ctx, REQ)
        receiver = C.zmq_socket(ctx, REP)

        assert 0 == C.zmq_bind(receiver, b'tcp://*:7777')
        assert 0 == C.zmq_connect(sender, b'tcp://127.0.0.1:7777')

        time.sleep(0.1)

        zmq_msg = ffi.new('zmq_msg_t*')
        message = ffi.new('char[5]', b'Hello')

        C.zmq_msg_init_data(
            zmq_msg,
            ffi.cast('void*', message),
            ffi.cast('size_t', 5),
            ffi.NULL,
            ffi.NULL,
        )

        assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
        assert 0 == C.zmq_msg_close(zmq_msg)
        assert C.zmq_close(sender) == 0
        assert C.zmq_close(receiver) == 0
        assert C.zmq_ctx_destroy(ctx) == 0

    def test_zmq_recv(self):
        ctx = C.zmq_ctx_new()

        sender = C.zmq_socket(ctx, REQ)
        receiver = C.zmq_socket(ctx, REP)

        assert 0 == C.zmq_bind(receiver, b'tcp://*:2222')
        assert 0 == C.zmq_connect(sender, b'tcp://127.0.0.1:2222')

        time.sleep(0.1)

        zmq_msg = ffi.new('zmq_msg_t*')
        message = ffi.new('char[5]', b'Hello')

        C.zmq_msg_init_data(
            zmq_msg,
            ffi.cast('void*', message),
            ffi.cast('size_t', 5),
            ffi.NULL,
            ffi.NULL,
        )

        zmq_msg2 = ffi.new('zmq_msg_t*')
        C.zmq_msg_init(zmq_msg2)

        assert 5 == C.zmq_msg_send(zmq_msg, sender, 0)
        assert 5 == C.zmq_msg_recv(zmq_msg2, receiver, 0)
        assert 5 == C.zmq_msg_size(zmq_msg2)
        assert (
            b"Hello"
            == ffi.buffer(C.zmq_msg_data(zmq_msg2), C.zmq_msg_size(zmq_msg2))[:]
        )
        assert C.zmq_close(sender) == 0
        assert C.zmq_close(receiver) == 0
        assert C.zmq_ctx_destroy(ctx) == 0

    def test_zmq_poll(self):
        ctx = C.zmq_ctx_new()

        sender = C.zmq_socket(ctx, REQ)
        receiver = C.zmq_socket(ctx, REP)

        r1 = C.zmq_bind(receiver, b'tcp://*:3333')
        r2 = C.zmq_connect(sender, b'tcp://127.0.0.1:3333')

        zmq_msg = ffi.new('zmq_msg_t*')
        message = ffi.new('char[5]', b'Hello')

        C.zmq_msg_init_data(
            zmq_msg,
            ffi.cast('void*', message),
            ffi.cast('size_t', 5),
            ffi.NULL,
            ffi.NULL,
        )

        receiver_pollitem = ffi.new('zmq_pollitem_t*')
        receiver_pollitem.socket = receiver
        receiver_pollitem.fd = 0
        receiver_pollitem.events = POLLIN | POLLOUT
        receiver_pollitem.revents = 0

        ret = C.zmq_poll(ffi.NULL, 0, 0)
        assert ret == 0

        ret = C.zmq_poll(receiver_pollitem, 1, 0)
        assert ret == 0

        ret = C.zmq_msg_send(zmq_msg, sender, 0)
        print(ffi.string(C.zmq_strerror(C.zmq_errno())))
        assert ret == 5

        time.sleep(0.2)

        ret = C.zmq_poll(receiver_pollitem, 1, 0)
        assert ret == 1

        assert int(receiver_pollitem.revents) & POLLIN
        assert not int(receiver_pollitem.revents) & POLLOUT

        zmq_msg2 = ffi.new('zmq_msg_t*')
        C.zmq_msg_init(zmq_msg2)

        ret_recv = C.zmq_msg_recv(zmq_msg2, receiver, 0)
        assert ret_recv == 5

        assert 5 == C.zmq_msg_size(zmq_msg2)
        assert (
            b"Hello"
            == ffi.buffer(C.zmq_msg_data(zmq_msg2), C.zmq_msg_size(zmq_msg2))[:]
        )

        sender_pollitem = ffi.new('zmq_pollitem_t*')
        sender_pollitem.socket = sender
        sender_pollitem.fd = 0
        sender_pollitem.events = POLLIN | POLLOUT
        sender_pollitem.revents = 0

        ret = C.zmq_poll(sender_pollitem, 1, 0)
        assert ret == 0

        zmq_msg_again = ffi.new('zmq_msg_t*')
        message_again = ffi.new('char[11]', b'Hello Again')

        C.zmq_msg_init_data(
            zmq_msg_again,
            ffi.cast('void*', message_again),
            ffi.cast('size_t', 11),
            ffi.NULL,
            ffi.NULL,
        )

        assert 11 == C.zmq_msg_send(zmq_msg_again, receiver, 0)

        time.sleep(0.2)

        assert 0 <= C.zmq_poll(sender_pollitem, 1, 0)
        assert int(sender_pollitem.revents) & POLLIN
        assert 11 == C.zmq_msg_recv(zmq_msg2, sender, 0)
        assert 11 == C.zmq_msg_size(zmq_msg2)
        assert (
            b"Hello Again"
            == ffi.buffer(C.zmq_msg_data(zmq_msg2), int(C.zmq_msg_size(zmq_msg2)))[:]
        )
        assert 0 == C.zmq_close(sender)
        assert 0 == C.zmq_close(receiver)
        assert 0 == C.zmq_ctx_destroy(ctx)
        assert 0 == C.zmq_msg_close(zmq_msg)
        assert 0 == C.zmq_msg_close(zmq_msg2)
        assert 0 == C.zmq_msg_close(zmq_msg_again)
pyzmq-26.4.0/tests/test_constants.py000066400000000000000000000017021477374370200175700ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import pytest

import zmq
import zmq.constants


def test_constants():
    assert zmq.POLLIN is zmq.PollEvent.POLLIN
    assert zmq.PUSH is zmq.SocketType.PUSH
    assert zmq.constants.SUBSCRIBE is zmq.SocketOption.SUBSCRIBE
    assert (
        zmq.RECONNECT_STOP_AFTER_DISCONNECT
        is zmq.constants.ReconnectStop.AFTER_DISCONNECT
    )


def test_socket_options():
    assert zmq.IDENTITY is zmq.SocketOption.ROUTING_ID
    assert zmq.IDENTITY._opt_type is zmq.constants._OptType.bytes
    assert zmq.AFFINITY._opt_type is zmq.constants._OptType.int64
    assert zmq.CURVE_SERVER._opt_type is zmq.constants._OptType.int
    assert zmq.FD._opt_type is zmq.constants._OptType.fd


@pytest.mark.parametrize("event_name", list(zmq.Event.__members__))
def test_event_reprs(event_name):
    event = getattr(zmq.Event, event_name)
    assert event_name in repr(event)
pyzmq-26.4.0/tests/test_context.py000066400000000000000000000304751477374370200172510ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import copy
import gc
import os
import sys
import time
from queue import Queue
from threading import Event, Thread
from unittest import mock

import pytest
from pytest import mark

import zmq
from zmq_test_utils import PYPY, BaseZMQTestCase, GreenTest, SkipTest


class KwargTestSocket(zmq.Socket):
    test_kwarg_value = None

    def __init__(self, *args, **kwargs):
        self.test_kwarg_value = kwargs.pop('test_kwarg', None)
        super().__init__(*args, **kwargs)


class KwargTestContext(zmq.Context):
    _socket_class = KwargTestSocket


class TestContext(BaseZMQTestCase):
    def test_init(self):
        c1 = self.Context()
        assert isinstance(c1, self.Context)
        c1.term()
        c2 = self.Context()
        assert isinstance(c2, self.Context)
        c2.term()
        c3 = self.Context()
        assert isinstance(c3, self.Context)
        c3.term()

    _repr_cls = "zmq.Context"

    def test_repr(self):
        with self.Context() as ctx:
            assert f'{self._repr_cls}()' in repr(ctx)
            assert 'closed' not in repr(ctx)
            with ctx.socket(zmq.PUSH) as push:
                assert f'{self._repr_cls}(1 socket)' in repr(ctx)
                with ctx.socket(zmq.PULL) as pull:
                    assert f'{self._repr_cls}(2 sockets)' in repr(ctx)
        assert f'{self._repr_cls}()' in repr(ctx)
        assert 'closed' in repr(ctx)

    def test_dir(self):
        ctx = self.Context()
        assert 'socket' in dir(ctx)
        if zmq.zmq_version_info() > (3,):
            assert 'IO_THREADS' in dir(ctx)
        ctx.term()

    @mark.skipif(mock is None, reason="requires unittest.mock")
    def test_mockable(self):
        m = mock.Mock(spec=self.context)

    def test_term(self):
        c = self.Context()
        c.term()
        assert c.closed

    def test_context_manager(self):
        with pytest.warns(ResourceWarning):
            with self.Context() as ctx:
                s = ctx.socket(zmq.PUSH)
        # context exit destroys sockets
        assert s.closed
        assert ctx.closed

    def test_fail_init(self):
        self.assertRaisesErrno(zmq.EINVAL, self.Context, -1)

    def test_term_hang(self):
        rep, req = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)
        req.setsockopt(zmq.LINGER, 0)
        req.send(b'hello', copy=False)
        req.close()
        rep.close()
        self.context.term()

    def test_instance(self):
        ctx = self.Context.instance()
        c2 = self.Context.instance(io_threads=2)
        assert c2 is ctx
        c2.term()
        c3 = self.Context.instance()
        c4 = self.Context.instance()
        assert c3 is not c2
        assert not c3.closed
        assert c3 is c4

    def test_instance_subclass_first(self):
        self.context.term()

        class SubContext(zmq.Context):
            pass

        sctx = SubContext.instance()
        ctx = zmq.Context.instance()
        ctx.term()
        sctx.term()
        assert type(ctx) is zmq.Context
        assert type(sctx) is SubContext

    def test_instance_subclass_second(self):
        self.context.term()

        class SubContextInherit(zmq.Context):
            pass

        class SubContextNoInherit(zmq.Context):
            _instance = None

        ctx = zmq.Context.instance()
        sctx = SubContextInherit.instance()
        sctx2 = SubContextNoInherit.instance()
        ctx.term()
        sctx.term()
        sctx2.term()
        assert type(ctx) is zmq.Context
        assert type(sctx) is zmq.Context
        assert type(sctx2) is SubContextNoInherit

    def test_instance_threadsafe(self):
        self.context.term()  # clear default context

        q = Queue()

        # slow context initialization,
        # to ensure that we are both trying to create one at the same time
        class SlowContext(self.Context):
            def __init__(self, *a, **kw):
                time.sleep(1)
                super().__init__(*a, **kw)

        def f():
            q.put(SlowContext.instance())

        # call ctx.instance() in several threads at once
        N = 16
        threads = [Thread(target=f) for i in range(N)]
        [t.start() for t in threads]
        # also call it in the main thread (not first)
        ctx = SlowContext.instance()
        assert isinstance(ctx, SlowContext)
        # check that all the threads got the same context
        for i in range(N):
            thread_ctx = q.get(timeout=5)
            assert thread_ctx is ctx
        # cleanup
        ctx.term()
        [t.join(timeout=5) for t in threads]

    def test_socket_passes_kwargs(self):
        test_kwarg_value = 'testing one two three'
        with KwargTestContext() as ctx:
            with ctx.socket(zmq.DEALER, test_kwarg=test_kwarg_value) as socket:
                assert socket.test_kwarg_value is test_kwarg_value

    def test_socket_class_arg(self):
        class CustomSocket(zmq.Socket):
            pass

        with self.Context() as ctx:
            with ctx.socket(zmq.PUSH, socket_class=CustomSocket) as s:
                assert isinstance(s, CustomSocket)

    def test_many_sockets(self):
        """opening and closing many sockets shouldn't cause problems"""
        ctx = self.Context()
        for i in range(16):
            sockets = [ctx.socket(zmq.REP) for i in range(65)]
            [s.close() for s in sockets]
            # give the reaper a chance
            time.sleep(1e-2)
        ctx.term()

    def test_sockopts(self):
        """setting socket options with ctx attributes"""
        ctx = self.Context()
        ctx.linger = 5
        assert ctx.linger == 5
        s = ctx.socket(zmq.REQ)
        assert s.linger == 5
        assert s.getsockopt(zmq.LINGER) == 5
        s.close()
        # check that subscribe doesn't get set on sockets that don't subscribe:
        ctx.subscribe = b''
        s = ctx.socket(zmq.REQ)
        s.close()

        ctx.term()

    @mark.skipif(sys.platform.startswith('win'), reason='Segfaults on Windows')
    def test_destroy(self):
        """Context.destroy should close sockets"""
        ctx = self.Context()
        sockets = [ctx.socket(zmq.REP) for i in range(65)]

        # close half of the sockets
        [s.close() for s in sockets[::2]]

        ctx.destroy()
        # reaper is not instantaneous
        time.sleep(1e-2)
        for s in sockets:
            assert s.closed

    def test_destroy_linger(self):
        """Context.destroy should set linger on closing sockets"""
        req, rep = self.create_bound_pair(zmq.REQ, zmq.REP)
        req.send(b'hi')
        time.sleep(1e-2)
        self.context.destroy(linger=0)
        # reaper is not instantaneous
        time.sleep(1e-2)
        for s in (req, rep):
            assert s.closed

    def test_term_noclose(self):
        """Context.term won't close sockets"""
        ctx = self.Context()
        s = ctx.socket(zmq.REQ)
        assert not s.closed
        t = Thread(target=ctx.term)
        t.start()
        t.join(timeout=0.1)
        assert t.is_alive(), "Context should be waiting"
        s.close()
        t.join(timeout=0.1)
        assert not t.is_alive(), "Context should have closed"

    def test_gc(self):
        """test close&term by garbage collection alone"""
        if PYPY:
            raise SkipTest("GC doesn't work ")

        # test credit @dln (GH #137):
        def gcf():
            def inner():
                ctx = self.Context()
                ctx.socket(zmq.PUSH)

            # can't seem to catch these with pytest.warns(ResourceWarning)
            inner()
            gc.collect()

        t = Thread(target=gcf)
        t.start()
        t.join(timeout=1)
        assert not t.is_alive(), "Garbage collection should have cleaned up context"

    def test_cyclic_destroy(self):
        """ctx.destroy should succeed when cyclic ref prevents gc"""

        # test credit @dln (GH #137):
        class CyclicReference:
            def __init__(self, parent=None):
                self.parent = parent

            def crash(self, sock):
                self.sock = sock
                self.child = CyclicReference(self)

        def crash_zmq():
            ctx = self.Context()
            sock = ctx.socket(zmq.PULL)
            c = CyclicReference()
            c.crash(sock)
            ctx.destroy()

        crash_zmq()

    def test_term_thread(self):
        """ctx.term should not crash active threads (#139)"""
        ctx = self.Context()
        evt = Event()
        evt.clear()

        def block():
            s = ctx.socket(zmq.REP)
            s.bind_to_random_port('tcp://127.0.0.1')
            evt.set()
            try:
                s.recv()
            except zmq.ZMQError as e:
                assert e.errno == zmq.ETERM
                return
            finally:
                s.close()
            self.fail("recv should have been interrupted with ETERM")

        t = Thread(target=block)
        t.start()

        evt.wait(1)
        assert evt.is_set(), "sync event never fired"
        time.sleep(0.01)
        ctx.term()
        t.join(timeout=1)
        assert not t.is_alive(), "term should have interrupted s.recv()"

    def test_destroy_no_sockets(self):
        ctx = self.Context()
        s = ctx.socket(zmq.PUB)
        s.bind_to_random_port('tcp://127.0.0.1')
        s.close()
        ctx.destroy()
        assert s.closed
        assert ctx.closed

    def test_ctx_opts(self):
        if zmq.zmq_version_info() < (3,):
            raise SkipTest("context options require libzmq 3")
        ctx = self.Context()
        ctx.set(zmq.MAX_SOCKETS, 2)
        assert ctx.get(zmq.MAX_SOCKETS) == 2
        ctx.max_sockets = 100
        assert ctx.max_sockets == 100
        assert ctx.get(zmq.MAX_SOCKETS) == 100

    def test_copy(self):
        c1 = self.Context()
        c2 = copy.copy(c1)
        c2b = copy.deepcopy(c1)
        c3 = copy.deepcopy(c2)
        assert c2._shadow
        assert c3._shadow
        assert c1.underlying == c2.underlying
        assert c1.underlying == c3.underlying
        assert c1.underlying == c2b.underlying
        s = c3.socket(zmq.PUB)
        s.close()
        c1.term()

    def test_shadow(self):
        ctx = self.Context()
        ctx2 = self.Context.shadow(ctx.underlying)
        assert ctx.underlying == ctx2.underlying
        s = ctx.socket(zmq.PUB)
        s.close()
        del ctx2
        assert not ctx.closed
        s = ctx.socket(zmq.PUB)
        ctx2 = self.Context.shadow(ctx)
        with ctx2.socket(zmq.PUB) as s2:
            pass

        assert s2.closed
        assert not s.closed
        s.close()

        ctx3 = self.Context(ctx)
        assert ctx3.underlying == ctx.underlying
        del ctx3
        assert not ctx.closed

        ctx.term()
        self.assertRaisesErrno(zmq.EFAULT, ctx2.socket, zmq.PUB)
        del ctx2

    def test_shadow_pyczmq(self):
        try:
            from pyczmq import zctx, zsocket, zstr
        except Exception:
            raise SkipTest("Requires pyczmq")

        ctx = zctx.new()
        a = zsocket.new(ctx, zmq.PUSH)
        zsocket.bind(a, "inproc://a")
        ctx2 = self.Context.shadow_pyczmq(ctx)
        b = ctx2.socket(zmq.PULL)
        b.connect("inproc://a")
        zstr.send(a, b'hi')
        rcvd = self.recv(b)
        assert rcvd == b'hi'
        b.close()

    @mark.skipif(sys.platform.startswith('win'), reason='No fork on Windows')
    def test_fork_instance(self):
        ctx = self.Context.instance()
        parent_ctx_id = id(ctx)
        r_fd, w_fd = os.pipe()
        reader = os.fdopen(r_fd, 'r')
        child_pid = os.fork()
        if child_pid == 0:
            ctx = self.Context.instance()
            writer = os.fdopen(w_fd, 'w')
            child_ctx_id = id(ctx)
            ctx.term()
            writer.write(str(child_ctx_id) + "\n")
            writer.flush()
            writer.close()
            os._exit(0)
        else:
            os.close(w_fd)

        child_id_s = reader.readline()
        reader.close()
        assert child_id_s
        assert int(child_id_s) != parent_ctx_id
        ctx.term()


if False:  # disable green context tests

    class TestContextGreen(GreenTest, TestContext):
        """gevent subclass of context tests"""

        # skip tests that use real threads:
        test_gc = GreenTest.skip_green
        test_term_thread = GreenTest.skip_green
        test_destroy_linger = GreenTest.skip_green
        _repr_cls = "zmq.green.Context"
pyzmq-26.4.0/tests/test_cython.py000066400000000000000000000025331477374370200170630ustar00rootroot00000000000000import os
import sys

import pytest

import zmq

pyximport = pytest.importorskip("pyximport")

HERE = os.path.dirname(__file__)
cython_ext = os.path.join(HERE, "cython_ext.pyx")


@pytest.mark.skipif(
    not os.path.exists(cython_ext),
    reason=f"Requires cython test file {cython_ext}",
)
@pytest.mark.skipif(
    'zmq.backend.cython' not in sys.modules, reason="Requires cython backend"
)
@pytest.mark.skipif(
    sys.platform.startswith('win'), reason="Don't try runtime Cython on Windows"
)
@pytest.mark.skipif(
    os.environ.get("ZMQ_PREFIX") == "bundled", reason="bundled builds don't have zmq.h"
)
@pytest.mark.parametrize('language_level', [3, 2])
def test_cython(language_level, request, tmpdir):
    hook = pyximport.install(
        setup_args=dict(include_dirs=zmq.get_includes()),
        language_level=language_level,
        build_dir=str(tmpdir),
    )
    # don't actually need the hook, just the finder
    pyximport.uninstall(*hook)
    finder = hook[1]

    # loading the module tests the compilation
    spec = finder.find_spec("cython_ext", [HERE])
    cython_ext = spec.loader.create_module(spec)
    spec.loader.exec_module(cython_ext)

    assert hasattr(cython_ext, 'send_recv_test')

    # call the compiled function
    # this shouldn't do much
    msg = b'my msg'
    received = cython_ext.send_recv_test(msg)
    assert received == msg
pyzmq-26.4.0/tests/test_decorators.py000066400000000000000000000226341477374370200177300ustar00rootroot00000000000000import threading

from pytest import fixture, raises

import zmq
from zmq.decorators import context, socket
from zmq_test_utils import BaseZMQTestCase, term_context

##############################################
#  Test cases for @context
##############################################


@fixture(autouse=True)
def term_context_instance(request):
    request.addfinalizer(lambda: term_context(zmq.Context.instance(), timeout=10))


def test_ctx():
    @context()
    def test(ctx):
        assert isinstance(ctx, zmq.Context), ctx

    test()


def test_ctx_orig_args():
    @context()
    def f(foo, bar, ctx, baz=None):
        assert isinstance(ctx, zmq.Context), ctx
        assert foo == 42
        assert bar is True
        assert baz == 'mock'

    f(42, True, baz='mock')


def test_ctx_arg_naming():
    @context('myctx')
    def test(myctx):
        assert isinstance(myctx, zmq.Context), myctx

    test()


def test_ctx_args():
    @context('ctx', 5)
    def test(ctx):
        assert isinstance(ctx, zmq.Context), ctx
        assert ctx.IO_THREADS == 5, ctx.IO_THREADS

    test()


def test_ctx_arg_kwarg():
    @context('ctx', io_threads=5)
    def test(ctx):
        assert isinstance(ctx, zmq.Context), ctx
        assert ctx.IO_THREADS == 5, ctx.IO_THREADS

    test()


def test_ctx_kw_naming():
    @context(name='myctx')
    def test(myctx):
        assert isinstance(myctx, zmq.Context), myctx

    test()


def test_ctx_kwargs():
    @context(name='ctx', io_threads=5)
    def test(ctx):
        assert isinstance(ctx, zmq.Context), ctx
        assert ctx.IO_THREADS == 5, ctx.IO_THREADS

    test()


def test_ctx_kwargs_default():
    @context(name='ctx', io_threads=5)
    def test(ctx=None):
        assert isinstance(ctx, zmq.Context), ctx
        assert ctx.IO_THREADS == 5, ctx.IO_THREADS

    test()


def test_ctx_keyword_miss():
    @context(name='ctx')
    def test(other_name):
        pass  # the keyword ``ctx`` not found

    with raises(TypeError):
        test()


def test_ctx_multi_assign():
    @context(name='ctx')
    def test(ctx):
        pass  # explosion

    with raises(TypeError):
        test('mock')


def test_ctx_reinit():
    result = {'foo': None, 'bar': None}

    @context()
    def f(key, ctx):
        assert isinstance(ctx, zmq.Context), ctx
        result[key] = ctx

    foo_t = threading.Thread(target=f, args=('foo',))
    bar_t = threading.Thread(target=f, args=('bar',))

    foo_t.start()
    bar_t.start()

    foo_t.join()
    bar_t.join()

    assert result['foo'] is not None, result
    assert result['bar'] is not None, result
    assert result['foo'] is not result['bar'], result


def test_ctx_multi_thread():
    @context()
    @context()
    def f(foo, bar):
        assert isinstance(foo, zmq.Context), foo
        assert isinstance(bar, zmq.Context), bar

        assert len(set(map(id, [foo, bar]))) == 2, set(map(id, [foo, bar]))

    threads = [threading.Thread(target=f) for i in range(8)]
    [t.start() for t in threads]
    [t.join() for t in threads]


##############################################
#  Test cases for @socket
##############################################


def test_ctx_skt():
    @context()
    @socket(zmq.PUB)
    def test(ctx, skt):
        assert isinstance(ctx, zmq.Context), ctx
        assert isinstance(skt, zmq.Socket), skt
        assert skt.type == zmq.PUB

    test()


def test_skt_name():
    @context()
    @socket('myskt', zmq.PUB)
    def test(ctx, myskt):
        assert isinstance(myskt, zmq.Socket), myskt
        assert isinstance(ctx, zmq.Context), ctx
        assert myskt.type == zmq.PUB

    test()


def test_skt_kwarg():
    @context()
    @socket(zmq.PUB, name='myskt')
    def test(ctx, myskt):
        assert isinstance(myskt, zmq.Socket), myskt
        assert isinstance(ctx, zmq.Context), ctx
        assert myskt.type == zmq.PUB

    test()


def test_ctx_skt_name():
    @context('ctx')
    @socket('skt', zmq.PUB, context_name='ctx')
    def test(ctx, skt):
        assert isinstance(skt, zmq.Socket), skt
        assert isinstance(ctx, zmq.Context), ctx
        assert skt.type == zmq.PUB

    test()


def test_skt_default_ctx():
    @socket(zmq.PUB)
    def test(skt):
        assert isinstance(skt, zmq.Socket), skt
        assert skt.context is zmq.Context.instance()
        assert skt.type == zmq.PUB

    test()


def test_skt_reinit():
    result = {'foo': None, 'bar': None}

    @socket(zmq.PUB)
    def f(key, skt):
        assert isinstance(skt, zmq.Socket), skt

        result[key] = skt

    foo_t = threading.Thread(target=f, args=('foo',))
    bar_t = threading.Thread(target=f, args=('bar',))

    foo_t.start()
    bar_t.start()

    foo_t.join()
    bar_t.join()

    assert result['foo'] is not None, result
    assert result['bar'] is not None, result
    assert result['foo'] is not result['bar'], result


def test_ctx_skt_reinit():
    result = {'foo': {'ctx': None, 'skt': None}, 'bar': {'ctx': None, 'skt': None}}

    @context()
    @socket(zmq.PUB)
    def f(key, ctx, skt):
        assert isinstance(ctx, zmq.Context), ctx
        assert isinstance(skt, zmq.Socket), skt

        result[key]['ctx'] = ctx
        result[key]['skt'] = skt

    foo_t = threading.Thread(target=f, args=('foo',))
    bar_t = threading.Thread(target=f, args=('bar',))

    foo_t.start()
    bar_t.start()

    foo_t.join()
    bar_t.join()

    assert result['foo']['ctx'] is not None, result
    assert result['foo']['skt'] is not None, result
    assert result['bar']['ctx'] is not None, result
    assert result['bar']['skt'] is not None, result
    assert result['foo']['ctx'] is not result['bar']['ctx'], result
    assert result['foo']['skt'] is not result['bar']['skt'], result


def test_skt_type_miss():
    @context()
    @socket('myskt')
    def f(ctx, myskt):
        pass  # the socket type is missing

    with raises(TypeError):
        f()


def test_multi_skts():
    @socket(zmq.PUB)
    @socket(zmq.SUB)
    @socket(zmq.PUSH)
    def test(pub, sub, push):
        assert isinstance(pub, zmq.Socket), pub
        assert isinstance(sub, zmq.Socket), sub
        assert isinstance(push, zmq.Socket), push

        assert pub.context is zmq.Context.instance()
        assert sub.context is zmq.Context.instance()
        assert push.context is zmq.Context.instance()

        assert pub.type == zmq.PUB
        assert sub.type == zmq.SUB
        assert push.type == zmq.PUSH

    test()


def test_multi_skts_single_ctx():
    @context()
    @socket(zmq.PUB)
    @socket(zmq.SUB)
    @socket(zmq.PUSH)
    def test(ctx, pub, sub, push):
        assert isinstance(ctx, zmq.Context), ctx
        assert isinstance(pub, zmq.Socket), pub
        assert isinstance(sub, zmq.Socket), sub
        assert isinstance(push, zmq.Socket), push

        assert pub.context is ctx
        assert sub.context is ctx
        assert push.context is ctx

        assert pub.type == zmq.PUB
        assert sub.type == zmq.SUB
        assert push.type == zmq.PUSH

    test()


def test_multi_skts_with_name():
    @socket('foo', zmq.PUSH)
    @socket('bar', zmq.SUB)
    @socket('baz', zmq.PUB)
    def test(foo, bar, baz):
        assert isinstance(foo, zmq.Socket), foo
        assert isinstance(bar, zmq.Socket), bar
        assert isinstance(baz, zmq.Socket), baz

        assert foo.context is zmq.Context.instance()
        assert bar.context is zmq.Context.instance()
        assert baz.context is zmq.Context.instance()

        assert foo.type == zmq.PUSH
        assert bar.type == zmq.SUB
        assert baz.type == zmq.PUB

    test()


def test_func_return():
    @context()
    def f(ctx):
        assert isinstance(ctx, zmq.Context), ctx
        return 'something'

    assert f() == 'something'


def test_skt_multi_thread():
    @socket(zmq.PUB)
    @socket(zmq.SUB)
    @socket(zmq.PUSH)
    def f(pub, sub, push):
        assert isinstance(pub, zmq.Socket), pub
        assert isinstance(sub, zmq.Socket), sub
        assert isinstance(push, zmq.Socket), push

        assert pub.context is zmq.Context.instance()
        assert sub.context is zmq.Context.instance()
        assert push.context is zmq.Context.instance()

        assert pub.type == zmq.PUB
        assert sub.type == zmq.SUB
        assert push.type == zmq.PUSH

        assert len(set(map(id, [pub, sub, push]))) == 3

    threads = [threading.Thread(target=f) for i in range(8)]
    [t.start() for t in threads]
    [t.join() for t in threads]


class TestMethodDecorators(BaseZMQTestCase):
    @context()
    @socket(zmq.PUB)
    @socket(zmq.SUB)
    def multi_skts_method(self, ctx, pub, sub, foo='bar'):
        assert isinstance(self, TestMethodDecorators), self
        assert isinstance(ctx, zmq.Context), ctx
        assert isinstance(pub, zmq.Socket), pub
        assert isinstance(sub, zmq.Socket), sub
        assert foo == 'bar'

        assert pub.context is ctx
        assert sub.context is ctx

        assert pub.type == zmq.PUB
        assert sub.type == zmq.SUB

    def test_multi_skts_method(self):
        self.multi_skts_method()

    def test_multi_skts_method_other_args(self):
        @socket(zmq.PUB)
        @socket(zmq.SUB)
        def f(foo, pub, sub, bar=None):
            assert isinstance(pub, zmq.Socket), pub
            assert isinstance(sub, zmq.Socket), sub

            assert foo == 'mock'
            assert bar == 'fake'

            assert pub.context is zmq.Context.instance()
            assert sub.context is zmq.Context.instance()

            assert pub.type == zmq.PUB
            assert sub.type == zmq.SUB

        f('mock', bar='fake')
pyzmq-26.4.0/tests/test_device.py000066400000000000000000000135251477374370200170210ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import time

import zmq
from zmq import devices
from zmq_test_utils import PYPY, BaseZMQTestCase, GreenTest, SkipTest, have_gevent

if PYPY:
    # cleanup of shared Context doesn't work on PyPy
    devices.Device.context_factory = zmq.Context


class TestDevice(BaseZMQTestCase):
    def test_device_types(self):
        for devtype in (zmq.STREAMER, zmq.FORWARDER, zmq.QUEUE):
            dev = devices.Device(devtype, zmq.PAIR, zmq.PAIR)
            assert dev.device_type == devtype
            del dev

    def test_device_attributes(self):
        dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB)
        assert dev.in_type == zmq.SUB
        assert dev.out_type == zmq.PUB
        assert dev.device_type == zmq.QUEUE
        assert dev.daemon is True
        del dev

    def test_single_socket_forwarder_connect(self):
        if zmq.zmq_version() in ('4.1.1', '4.0.6'):
            raise SkipTest(f"libzmq-{zmq.zmq_version()} broke single-socket devices")
        dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
        req = self.context.socket(zmq.REQ)
        port = req.bind_to_random_port('tcp://127.0.0.1')
        dev.connect_in(f'tcp://127.0.0.1:{port}')
        dev.start()
        time.sleep(0.25)
        msg = b'hello'
        req.send(msg)
        assert msg == self.recv(req)
        del dev
        req.close()
        dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
        req = self.context.socket(zmq.REQ)
        port = req.bind_to_random_port('tcp://127.0.0.1')
        dev.connect_out(f'tcp://127.0.0.1:{port}')
        dev.start()
        time.sleep(0.25)
        msg = b'hello again'
        req.send(msg)
        assert msg == self.recv(req)
        del dev
        req.close()

    def test_single_socket_forwarder_bind(self):
        if zmq.zmq_version() in ('4.1.1', '4.0.6'):
            raise SkipTest(f"libzmq-{zmq.zmq_version()} broke single-socket devices")
        dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
        port = dev.bind_in_to_random_port('tcp://127.0.0.1')
        req = self.context.socket(zmq.REQ)
        req.connect(f'tcp://127.0.0.1:{port}')
        dev.start()
        time.sleep(0.25)
        msg = b'hello'
        req.send(msg)
        assert msg == self.recv(req)
        del dev
        req.close()
        dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1)
        port = dev.bind_in_to_random_port('tcp://127.0.0.1')
        req = self.context.socket(zmq.REQ)
        req.connect(f'tcp://127.0.0.1:{port}')
        dev.start()
        time.sleep(0.25)
        msg = b'hello again'
        req.send(msg)
        assert msg == self.recv(req)
        del dev
        req.close()

    def test_device_bind_to_random_with_args(self):
        dev = devices.ThreadDevice(zmq.PULL, zmq.PUSH, -1)
        iface = 'tcp://127.0.0.1'
        ports = []
        min, max = 5000, 5050
        ports.extend(
            [
                dev.bind_in_to_random_port(iface, min_port=min, max_port=max),
                dev.bind_out_to_random_port(iface, min_port=min, max_port=max),
            ]
        )
        for port in ports:
            if port < min or port > max:
                self.fail(f'Unexpected port number: {port}')

    def test_device_bind_to_random_binderror(self):
        dev = devices.ThreadDevice(zmq.PULL, zmq.PUSH, -1)
        iface = 'tcp://127.0.0.1'
        try:
            for i in range(11):
                dev.bind_in_to_random_port(iface, min_port=10000, max_port=10010)
        except zmq.ZMQBindError as e:
            return
        else:
            self.fail('Should have failed')

    def test_proxy(self):
        if zmq.zmq_version_info() < (3, 2):
            raise SkipTest("Proxies only in libzmq >= 3")
        dev = devices.ThreadProxy(zmq.PULL, zmq.PUSH, zmq.PUSH)
        iface = 'tcp://127.0.0.1'
        port = dev.bind_in_to_random_port(iface)
        port2 = dev.bind_out_to_random_port(iface)
        port3 = dev.bind_mon_to_random_port(iface)
        dev.start()
        time.sleep(0.25)
        msg = b'hello'
        push = self.context.socket(zmq.PUSH)
        push.connect(f"{iface}:{port}")
        pull = self.context.socket(zmq.PULL)
        pull.connect(f"{iface}:{port2}")
        mon = self.context.socket(zmq.PULL)
        mon.connect(f"{iface}:{port3}")
        push.send(msg)
        self.sockets.extend([push, pull, mon])
        assert msg == self.recv(pull)
        assert msg == self.recv(mon)

    def test_proxy_bind_to_random_with_args(self):
        if zmq.zmq_version_info() < (3, 2):
            raise SkipTest("Proxies only in libzmq >= 3")
        dev = devices.ThreadProxy(zmq.PULL, zmq.PUSH, zmq.PUSH)
        iface = 'tcp://127.0.0.1'
        ports = []
        min, max = 5000, 5050
        ports.extend(
            [
                dev.bind_in_to_random_port(iface, min_port=min, max_port=max),
                dev.bind_out_to_random_port(iface, min_port=min, max_port=max),
                dev.bind_mon_to_random_port(iface, min_port=min, max_port=max),
            ]
        )
        for port in ports:
            if port < min or port > max:
                self.fail(f'Unexpected port number: {port}')


if have_gevent:
    import gevent

    import zmq.green

    class TestDeviceGreen(GreenTest, BaseZMQTestCase):
        def test_green_device(self):
            rep = self.context.socket(zmq.REP)
            req = self.context.socket(zmq.REQ)
            self.sockets.extend([req, rep])
            port = rep.bind_to_random_port('tcp://127.0.0.1')
            g = gevent.spawn(zmq.green.device, zmq.QUEUE, rep, rep)
            req.connect(f'tcp://127.0.0.1:{port}')
            req.send(b'hi')
            timeout = gevent.Timeout(3)
            timeout.start()
            receiver = gevent.spawn(req.recv)
            assert receiver.get(2) == b'hi'
            timeout.cancel()
            g.kill(block=True)
pyzmq-26.4.0/tests/test_draft.py000066400000000000000000000025401477374370200166550ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import time

import pytest

import zmq
from zmq_test_utils import BaseZMQTestCase


class TestDraftSockets(BaseZMQTestCase):
    def setUp(self):
        if not zmq.DRAFT_API:
            pytest.skip("draft api unavailable")
        super().setUp()

    def test_client_server(self):
        client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER)
        client.send(b'request')
        msg = self.recv(server, copy=False)
        assert msg.routing_id is not None
        server.send(b'reply', routing_id=msg.routing_id)
        reply = self.recv(client)
        assert reply == b'reply'

    def test_radio_dish(self):
        dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO)
        dish.rcvtimeo = 250
        group = 'mygroup'
        dish.join(group)
        received_count = 0
        received = set()
        sent = set()
        for i in range(10):
            msg = str(i).encode('ascii')
            sent.add(msg)
            radio.send(msg, group=group)
            try:
                recvd = dish.recv()
            except zmq.Again:
                time.sleep(0.1)
            else:
                received.add(recvd)
                received_count += 1
        # assert that we got *something*
        assert len(received.intersection(sent)) >= 5
pyzmq-26.4.0/tests/test_error.py000066400000000000000000000021521477374370200167050ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from threading import Thread

import zmq
from zmq import Again, ContextTerminated, ZMQError, strerror
from zmq_test_utils import BaseZMQTestCase


class TestZMQError(BaseZMQTestCase):
    def test_strerror(self):
        """test that strerror gets the right type."""
        for i in range(10):
            e = strerror(i)
            assert isinstance(e, str)

    def test_zmqerror(self):
        for errno in range(10):
            e = ZMQError(errno)
            assert e.errno == errno
            assert str(e) == strerror(errno)

    def test_again(self):
        s = self.context.socket(zmq.REP)
        self.assertRaises(Again, s.recv, zmq.NOBLOCK)
        self.assertRaisesErrno(zmq.EAGAIN, s.recv, zmq.NOBLOCK)
        s.close()

    def atest_ctxterm(self):
        s = self.context.socket(zmq.REP)
        t = Thread(target=self.context.term)
        t.start()
        self.assertRaises(ContextTerminated, s.recv, zmq.NOBLOCK)
        self.assertRaisesErrno(zmq.TERM, s.recv, zmq.NOBLOCK)
        s.close()
        t.join()
pyzmq-26.4.0/tests/test_etc.py000066400000000000000000000010251477374370200163250ustar00rootroot00000000000000# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.


from pytest import mark

import zmq

only_bundled = mark.skipif(not hasattr(zmq, '_libzmq'), reason="bundled libzmq")


@mark.skipif('zmq.zmq_version_info() < (4, 1)')
def test_has():
    assert not zmq.has('something weird')


@only_bundled
def test_has_curve():
    """bundled libzmq has curve support"""
    assert zmq.has('curve')


@only_bundled
def test_has_ipc():
    """bundled libzmq has ipc support"""
    assert zmq.has('ipc')
pyzmq-26.4.0/tests/test_ext.py000066400000000000000000000014221477374370200163530ustar00rootroot00000000000000"""tests for extending pyzmq"""

import zmq


class CustomSocket(zmq.Socket):
    custom_attr: int

    def __init__(self, context, socket_type, custom_attr: int = 0):
        super().__init__(context, socket_type)
        self.custom_attr = custom_attr


class CustomContext(zmq.Context):
    extra_arg: str
    _socket_class = CustomSocket

    def __init__(self, extra_arg: str = 'x'):
        super().__init__()
        self.extra_arg = extra_arg


def test_custom_context():
    ctx = CustomContext('s')
    assert isinstance(ctx, CustomContext)

    assert ctx.extra_arg == 's'
    s = ctx.socket(zmq.PUSH, custom_attr=10)
    assert isinstance(s, CustomSocket)
    assert s.custom_attr == 10
    assert s.context is ctx
    assert s.type == zmq.PUSH
    s.close()
    ctx.term()
pyzmq-26.4.0/tests/test_future.py000066400000000000000000000245651477374370200171020ustar00rootroot00000000000000# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import json
import os
import sys
from datetime import timedelta

import pytest

gen = pytest.importorskip('tornado.gen')

from tornado.ioloop import IOLoop

import zmq
from zmq.eventloop import future
from zmq_test_utils import BaseZMQTestCase


class TestFutureSocket(BaseZMQTestCase):
    Context = future.Context

    def setUp(self):
        self.loop = IOLoop(make_current=False)
        super().setUp()

    def tearDown(self):
        super().tearDown()
        if self.loop:
            self.loop.close(all_fds=True)

    def test_socket_class(self):
        s = self.context.socket(zmq.PUSH)
        assert isinstance(s, future.Socket)
        s.close()

    def test_instance_subclass_first(self):
        actx = self.Context.instance()
        ctx = zmq.Context.instance()
        ctx.term()
        actx.term()
        assert type(ctx) is zmq.Context
        assert type(actx) is self.Context

    def test_instance_subclass_second(self):
        ctx = zmq.Context.instance()
        actx = self.Context.instance()
        ctx.term()
        actx.term()
        assert type(ctx) is zmq.Context
        assert type(actx) is self.Context

    def test_recv_multipart(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_multipart()
            assert not f.done()
            await a.send(b"hi")
            recvd = await f
            assert recvd == [b'hi']

        self.loop.run_sync(test)

    def test_recv(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv()
            assert not f1.done()
            assert not f2.done()
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f1.done()
            assert f1.result() == b'hi'
            assert recvd == b'there'

        self.loop.run_sync(test)

    def test_recv_cancel(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv_multipart()
            assert f1.cancel()
            assert f1.done()
            assert not f2.done()
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f1.cancelled()
            assert f2.done()
            assert recvd == [b'hi', b'there']

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO")
    def test_recv_timeout(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            b.rcvtimeo = 100
            f1 = b.recv()
            b.rcvtimeo = 1000
            f2 = b.recv_multipart()
            with pytest.raises(zmq.Again):
                await f1
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f2.done()
            assert recvd == [b'hi', b'there']

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO")
    def test_send_timeout(self):
        async def test():
            s = self.socket(zmq.PUSH)
            s.sndtimeo = 100
            with pytest.raises(zmq.Again):
                await s.send(b"not going anywhere")

        self.loop.run_sync(test)

    def test_send_noblock(self):
        async def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                await s.send(b"not going anywhere", flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    def test_send_multipart_noblock(self):
        async def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                await s.send_multipart([b"not going anywhere"], flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    def test_recv_string(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_string()
            assert not f.done()
            msg = 'πøøπ'
            await a.send_string(msg)
            recvd = await f
            assert f.done()
            assert f.result() == msg
            assert recvd == msg

        self.loop.run_sync(test)

    def test_recv_json(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            obj = dict(a=5)
            await a.send_json(obj)
            recvd = await f
            assert f.done()
            assert f.result() == obj
            assert recvd == obj

        self.loop.run_sync(test)

    def test_recv_json_cancelled(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            f.cancel()
            # cycle eventloop to allow cancel events to fire
            await gen.sleep(0)
            obj = dict(a=5)
            await a.send_json(obj)
            with pytest.raises(future.CancelledError):
                recvd = await f
            assert f.done()
            # give it a chance to incorrectly consume the event
            events = await b.poll(timeout=5)
            assert events
            await gen.sleep(0)
            # make sure cancelled recv didn't eat up event
            recvd = await gen.with_timeout(timedelta(seconds=5), b.recv_json())
            assert recvd == obj

        self.loop.run_sync(test)

    def test_recv_pyobj(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_pyobj()
            assert not f.done()
            obj = dict(a=5)
            await a.send_pyobj(obj)
            recvd = await f
            assert f.done()
            assert f.result() == obj
            assert recvd == obj

        self.loop.run_sync(test)

    def test_custom_serialize(self):
        def serialize(msg):
            frames = []
            frames.extend(msg.get('identities', []))
            content = json.dumps(msg['content']).encode('utf8')
            frames.append(content)
            return frames

        def deserialize(frames):
            identities = frames[:-1]
            content = json.loads(frames[-1].decode('utf8'))
            return {
                'identities': identities,
                'content': content,
            }

        async def test():
            a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)

            msg = {
                'content': {
                    'a': 5,
                    'b': 'bee',
                }
            }
            await a.send_serialized(msg, serialize)
            recvd = await b.recv_serialized(deserialize)
            assert recvd['content'] == msg['content']
            assert recvd['identities']
            # bounce back, tests identities
            await b.send_serialized(recvd, serialize)
            r2 = await a.recv_serialized(deserialize)
            assert r2['content'] == msg['content']
            assert not r2['identities']

        self.loop.run_sync(test)

    def test_custom_serialize_error(self):
        async def test():
            a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)

            msg = {
                'content': {
                    'a': 5,
                    'b': 'bee',
                }
            }
            with pytest.raises(TypeError):
                await a.send_serialized(json, json.dumps)

            await a.send(b"not json")
            with pytest.raises(TypeError):
                await b.recv_serialized(json.loads)

        self.loop.run_sync(test)

    def test_poll(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.poll(timeout=0)
            assert f.done()
            assert f.result() == 0

            f = b.poll(timeout=1)
            assert not f.done()
            evt = await f
            assert evt == 0

            f = b.poll(timeout=1000)
            assert not f.done()
            await a.send_multipart([b"hi", b"there"])
            evt = await f
            assert evt == zmq.POLLIN
            recvd = await b.recv_multipart()
            assert recvd == [b'hi', b'there']

        self.loop.run_sync(test)

    @pytest.mark.skipif(
        sys.platform.startswith('win'), reason='Windows unsupported socket type'
    )
    def test_poll_base_socket(self):
        async def test():
            ctx = zmq.Context()
            url = 'inproc://test'
            a = ctx.socket(zmq.PUSH)
            b = ctx.socket(zmq.PULL)
            self.sockets.extend([a, b])
            a.bind(url)
            b.connect(url)

            poller = future.Poller()
            poller.register(b, zmq.POLLIN)

            f = poller.poll(timeout=1000)
            assert not f.done()
            a.send_multipart([b'hi', b'there'])
            evt = await f
            assert evt == [(b, zmq.POLLIN)]
            recvd = b.recv_multipart()
            assert recvd == [b'hi', b'there']
            a.close()
            b.close()
            ctx.term()

        self.loop.run_sync(test)

    def test_close_all_fds(self):
        s = self.socket(zmq.PUB)

        async def attach():
            s._get_loop()

        self.loop.run_sync(attach)
        self.loop.close(all_fds=True)
        self.loop = None  # avoid second close later
        assert s.closed

    @pytest.mark.skipif(
        sys.platform.startswith('win'),
        reason='Windows does not support polling on files',
    )
    def test_poll_raw(self):
        async def test():
            p = future.Poller()
            # make a pipe
            r, w = os.pipe()
            r = os.fdopen(r, 'rb')
            w = os.fdopen(w, 'wb')

            # POLLOUT
            p.register(r, zmq.POLLIN)
            p.register(w, zmq.POLLOUT)
            evts = await p.poll(timeout=1)
            evts = dict(evts)
            assert r.fileno() not in evts
            assert w.fileno() in evts
            assert evts[w.fileno()] == zmq.POLLOUT

            # POLLIN
            p.unregister(w)
            w.write(b'x')
            w.flush()
            evts = await p.poll(timeout=1000)
            evts = dict(evts)
            assert r.fileno() in evts
            assert evts[r.fileno()] == zmq.POLLIN
            assert r.read(1) == b'x'
            r.close()
            w.close()

        self.loop.run_sync(test)
pyzmq-26.4.0/tests/test_imports.py000066400000000000000000000036651477374370200172630ustar00rootroot00000000000000"""
Test Imports - the quickest test to ensure that we haven't
introduced version-incompatible syntax errors.
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

# flake8: noqa: F401

import pytest


def test_toplevel():
    """test toplevel import"""
    import zmq


def test_core():
    """test core imports"""
    from zmq import (
        Context,
        Frame,
        Poller,
        Socket,
        constants,
        device,
        proxy,
        pyzmq_version,
        pyzmq_version_info,
        zmq_version,
        zmq_version_info,
    )


def test_devices():
    """test device imports"""
    import zmq.devices
    from zmq.devices import basedevice, monitoredqueue, monitoredqueuedevice


def test_log():
    """test log imports"""
    import zmq.log
    from zmq.log import handlers


def test_eventloop():
    """test eventloop imports"""
    pytest.importorskip("tornado")
    import zmq.eventloop
    from zmq.eventloop import ioloop, zmqstream


def test_utils():
    """test util imports"""
    import zmq.utils
    from zmq.utils import jsonapi, strtypes


def test_ssh():
    """test ssh imports"""
    from zmq.ssh import tunnel


def test_decorators():
    """test decorators imports"""
    from zmq.decorators import context, socket


def test_zmq_all():
    import zmq

    for name in zmq.__all__:
        assert hasattr(zmq, name)


@pytest.mark.parametrize("pkgname", ["zmq", "zmq.green"])
@pytest.mark.parametrize(
    "attr",
    [
        "RCVTIMEO",
        "PUSH",
        "zmq_version_info",
        "SocketOption",
        "device",
        "Socket",
        "Context",
    ],
)
def test_all_exports(pkgname, attr):
    import zmq

    subpkg = pytest.importorskip(pkgname)
    for name in zmq.__all__:
        assert hasattr(subpkg, name)

    assert attr in subpkg.__all__
    if attr not in ("Socket", "Context", "device"):
        assert getattr(subpkg, attr) is getattr(zmq, attr)
pyzmq-26.4.0/tests/test_includes.py000066400000000000000000000015631477374370200173670ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


from unittest import TestCase

import zmq


class TestIncludes(TestCase):
    def test_get_includes(self):
        from os.path import basename

        includes = zmq.get_includes()
        assert isinstance(includes, list)
        assert len(includes) >= 2
        parent = includes[0]
        assert isinstance(parent, str)
        utilsdir = includes[1]
        assert isinstance(utilsdir, str)
        utils = basename(utilsdir)
        assert utils == "utils"

    def test_get_library_dirs(self):
        from os.path import basename

        libdirs = zmq.get_library_dirs()
        assert isinstance(libdirs, list)
        assert len(libdirs) == 1
        parent = libdirs[0]
        assert isinstance(parent, str)
        libdir = basename(parent)
        assert libdir == "zmq"
pyzmq-26.4.0/tests/test_ioloop.py000066400000000000000000000007741477374370200170650ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import pytest

tornado = pytest.importorskip("tornado")


def test_ioloop():
    # may have been imported before,
    # can't capture the warning
    from zmq.eventloop import ioloop

    assert ioloop.IOLoop is tornado.ioloop.IOLoop
    assert ioloop.ZMQIOLoop is ioloop.IOLoop


def test_ioloop_install():
    from zmq.eventloop import ioloop

    with pytest.warns(DeprecationWarning):
        ioloop.install()
pyzmq-26.4.0/tests/test_log.py000066400000000000000000000154511477374370200163430ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import logging
import time

import zmq
from zmq.log import handlers
from zmq_test_utils import BaseZMQTestCase


class TestPubLog(BaseZMQTestCase):
    iface = 'inproc://zmqlog'
    topic = 'zmq'

    @property
    def logger(self):
        # print dir(self)
        logger = logging.getLogger('zmqtest')
        logger.setLevel(logging.DEBUG)
        return logger

    def connect_handler(self, topic=None):
        topic = self.topic if topic is None else topic
        logger = self.logger
        pub, sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
        handler = handlers.PUBHandler(pub)
        handler.setLevel(logging.DEBUG)
        handler.root_topic = topic
        logger.addHandler(handler)
        sub.setsockopt(zmq.SUBSCRIBE, topic.encode())
        time.sleep(0.1)
        return logger, handler, sub

    def test_init_iface(self):
        logger = self.logger
        ctx = self.context
        handler = handlers.PUBHandler(self.iface)
        assert handler.ctx is not ctx
        self.sockets.append(handler.socket)
        # handler.ctx.term()
        handler = handlers.PUBHandler(self.iface, self.context)
        self.sockets.append(handler.socket)
        assert handler.ctx is ctx
        handler.setLevel(logging.DEBUG)
        handler.root_topic = self.topic
        logger.addHandler(handler)
        sub = ctx.socket(zmq.SUB)
        self.sockets.append(sub)
        sub.setsockopt(zmq.SUBSCRIBE, self.topic.encode())
        sub.connect(self.iface)
        import time

        time.sleep(0.25)
        msg1 = 'message'
        logger.info(msg1)

        (topic, msg2) = sub.recv_multipart()
        assert topic == b'zmq.INFO'
        assert msg2 == (msg1 + "\n").encode("utf8")
        logger.removeHandler(handler)

    def test_init_socket(self):
        pub, sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
        logger = self.logger
        handler = handlers.PUBHandler(pub)
        handler.setLevel(logging.DEBUG)
        handler.root_topic = self.topic
        logger.addHandler(handler)

        assert handler.socket is pub
        assert handler.ctx is pub.context
        assert handler.ctx is self.context
        sub.setsockopt(zmq.SUBSCRIBE, self.topic.encode())
        import time

        time.sleep(0.1)
        msg1 = 'message'
        logger.info(msg1)

        (topic, msg2) = sub.recv_multipart()
        assert topic == b'zmq.INFO'
        assert msg2 == (msg1 + "\n").encode("utf8")
        logger.removeHandler(handler)

    def test_root_topic(self):
        logger, handler, sub = self.connect_handler()
        handler.socket.bind(self.iface)
        sub2 = sub.context.socket(zmq.SUB)
        self.sockets.append(sub2)
        sub2.connect(self.iface)
        sub2.setsockopt(zmq.SUBSCRIBE, b'')
        handler.root_topic = b'twoonly'
        msg1 = 'ignored'
        logger.info(msg1)
        self.assertRaisesErrno(zmq.EAGAIN, sub.recv, zmq.NOBLOCK)
        topic, msg2 = sub2.recv_multipart()
        assert topic == b'twoonly.INFO'
        assert msg2 == (msg1 + '\n').encode()

        logger.removeHandler(handler)

    def test_blank_root_topic(self):
        logger, handler, sub_everything = self.connect_handler()
        sub_everything.setsockopt(zmq.SUBSCRIBE, b'')
        handler.socket.bind(self.iface)
        sub_only_info = sub_everything.context.socket(zmq.SUB)
        self.sockets.append(sub_only_info)
        sub_only_info.connect(self.iface)
        sub_only_info.setsockopt(zmq.SUBSCRIBE, b'INFO')
        handler.setRootTopic(b'')
        msg_debug = 'debug_message'
        logger.debug(msg_debug)
        self.assertRaisesErrno(zmq.EAGAIN, sub_only_info.recv, zmq.NOBLOCK)
        topic, msg_debug_response = sub_everything.recv_multipart()
        assert topic == b'DEBUG'
        msg_info = 'info_message'
        logger.info(msg_info)
        topic, msg_info_response_everything = sub_everything.recv_multipart()
        assert topic == b'INFO'
        topic, msg_info_response_onlyinfo = sub_only_info.recv_multipart()
        assert topic == b'INFO'
        assert msg_info_response_everything == msg_info_response_onlyinfo

        logger.removeHandler(handler)

    def test_unicode_message(self):
        logger, handler, sub = self.connect_handler()
        base_topic = (self.topic + '.INFO').encode()
        for msg, expected in [
            ('hello', [base_topic, b'hello\n']),
            ('héllo', [base_topic, 'héllo\n'.encode()]),
            ('tøpic::héllo', [base_topic + '.tøpic'.encode(), 'héllo\n'.encode()]),
        ]:
            logger.info(msg)
            received = sub.recv_multipart()
            assert received == expected
        logger.removeHandler(handler)

    def test_set_info_formatter_via_property(self):
        logger, handler, sub = self.connect_handler()
        handler.formatters[logging.INFO] = logging.Formatter("%(message)s UNITTEST\n")
        handler.socket.bind(self.iface)
        sub.setsockopt(zmq.SUBSCRIBE, handler.root_topic.encode())
        logger.info('info message')
        topic, msg = sub.recv_multipart()
        assert msg == b'info message UNITTEST\n'
        logger.removeHandler(handler)

    def test_custom_global_formatter(self):
        logger, handler, sub = self.connect_handler()
        formatter = logging.Formatter("UNITTEST %(message)s")
        handler.setFormatter(formatter)
        handler.socket.bind(self.iface)
        sub.setsockopt(zmq.SUBSCRIBE, handler.root_topic.encode())
        logger.info('info message')
        topic, msg = sub.recv_multipart()
        assert msg == b'UNITTEST info message'
        logger.debug('debug message')
        topic, msg = sub.recv_multipart()
        assert msg == b'UNITTEST debug message'
        logger.removeHandler(handler)

    def test_custom_debug_formatter(self):
        logger, handler, sub = self.connect_handler()
        formatter = logging.Formatter("UNITTEST DEBUG %(message)s")
        handler.setFormatter(formatter, logging.DEBUG)
        handler.socket.bind(self.iface)
        sub.setsockopt(zmq.SUBSCRIBE, handler.root_topic.encode())
        logger.info('info message')
        topic, msg = sub.recv_multipart()
        assert msg == b'info message\n'
        logger.debug('debug message')
        topic, msg = sub.recv_multipart()
        assert msg == b'UNITTEST DEBUG debug message'
        logger.removeHandler(handler)

    def test_custom_message_type(self):
        class Message:
            def __init__(self, msg: str):
                self.msg = msg

            def __str__(self) -> str:
                return self.msg

        logger, handler, sub = self.connect_handler()
        msg = "hello"
        logger.info(Message(msg))
        topic, received = sub.recv_multipart()
        assert topic == b'zmq.INFO'
        assert received == b'hello\n'
        logger.removeHandler(handler)
pyzmq-26.4.0/tests/test_message.py000066400000000000000000000266011477374370200172050ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import copy
import gc
import sys

try:
    from sys import getrefcount
except ImportError:
    grc = None
else:
    grc = getrefcount

import time

import pytest

import zmq
from zmq_test_utils import PYPY, BaseZMQTestCase, SkipTest, skip_cpython_cffi, skip_pypy

# some useful constants:

x = b'x'

if grc:
    rc0 = grc(x)
    v = memoryview(x)
    view_rc = grc(x) - rc0


def await_gc(obj, rc):
    """wait for refcount on an object to drop to an expected value

    Necessary because of the zero-copy gc thread,
    which can take some time to receive its DECREF message.
    """
    # count refs for this function
    if sys.version_info < (3, 11):
        my_refs = 2
    else:
        my_refs = 1
    for i in range(50):
        # rc + 2 because of the refs in this function
        if grc(obj) <= rc + my_refs:
            return
        time.sleep(0.05)


class TestFrame(BaseZMQTestCase):
    def tearDown(self):
        super().tearDown()
        for i in range(3):
            gc.collect()

    @skip_pypy
    def test_above_30(self):
        """Message above 30 bytes are never copied by 0MQ."""
        for i in range(5, 16):  # 32, 64,..., 65536
            s = (2**i) * x
            rc = grc(s)
            m = zmq.Frame(s, copy=False)
            assert grc(s) == rc + 2
            del m
            await_gc(s, rc)
            assert grc(s) == rc
            del s

    def test_str(self):
        """Test the str representations of the Frames."""
        m = zmq.Frame(b'')
        assert str(m) == ""
        m = zmq.Frame(b'123456789')
        assert str(m) == ""
        m = zmq.Frame(b'x' * 20)
        assert str(m) == ""
        m = zmq.Frame(b'x' * 2000)
        assert str(m) == ""
        m = zmq.Frame(b'x' * 2_000_000)
        assert str(m) == ""

    def test_bytes(self):
        """Test the Frame.bytes property."""
        for i in range(1, 16):
            s = (2**i) * x
            m = zmq.Frame(s)
            b = m.bytes
            assert s == m.bytes
            if not PYPY:
                # check that it copies
                assert b is not s
            # check that it copies only once
            assert b is m.bytes

    def test_unicode(self):
        """Test the unicode representations of the Frames."""
        s = 'asdf'
        self.assertRaises(TypeError, zmq.Frame, s)
        for i in range(16):
            s = (2**i) * '§'
            m = zmq.Frame(s.encode('utf8'))
            assert s == m.bytes.decode('utf8')

    def test_len(self):
        """Test the len of the Frames."""
        for i in range(16):
            s = (2**i) * x
            m = zmq.Frame(s)
            assert len(s) == len(m)

    @skip_pypy
    def test_lifecycle1(self):
        """Run through a ref counting cycle with a copy."""
        for i in range(5, 16):  # 32, 64,..., 65536
            s = (2**i) * x
            rc = rc_0 = grc(s)
            m = zmq.Frame(s, copy=False)
            rc += 2
            assert grc(s) == rc
            m2 = copy.copy(m)
            rc += 1
            assert grc(s) == rc
            # no increase in refcount for accessing buffer
            # which references m2 directly
            buf = m2.buffer
            assert grc(s) == rc

            assert s == bytes(m2)
            assert s == m.bytes
            assert s == bytes(buf)
            del m2
            assert grc(s) == rc
            # buf holds direct reference to m2 which holds
            del buf
            rc -= 1
            assert grc(s) == rc
            del m
            rc -= 2
            await_gc(s, rc)
            assert grc(s) == rc
            assert rc == rc_0
            del s

    @skip_pypy
    def test_lifecycle2(self):
        """Run through a different ref counting cycle with a copy."""
        for i in range(5, 16):  # 32, 64,..., 65536
            s = (2**i) * x
            rc = rc_0 = grc(s)
            m = zmq.Frame(s, copy=False)
            rc += 2
            assert grc(s) == rc
            m2 = copy.copy(m)
            rc += 1
            assert grc(s) == rc
            # no increase in refcount for accessing buffer
            # which references m directly
            buf = m.buffer
            assert grc(s) == rc
            assert s == bytes(m2)
            assert s == m2.bytes
            assert s == m.bytes
            assert s == bytes(buf)
            del buf
            assert grc(s) == rc
            del m
            rc -= 1
            assert grc(s) == rc
            del m2
            rc -= 2
            await_gc(s, rc)
            assert grc(s) == rc
            assert rc == rc_0
            del s

    def test_tracker(self):
        m = zmq.Frame(b'asdf', copy=False, track=True)
        assert not m.tracker.done
        pm = zmq.MessageTracker(m)
        assert not pm.done
        del m
        for i in range(3):
            gc.collect()
        for i in range(10):
            if pm.done:
                break
            time.sleep(0.1)
        assert pm.done

    def test_no_tracker(self):
        m = zmq.Frame(b'asdf', track=False)
        assert m.tracker is None
        m2 = copy.copy(m)
        assert m2.tracker is None
        self.assertRaises(ValueError, zmq.MessageTracker, m)

    def test_multi_tracker(self):
        m = zmq.Frame(b'asdf', copy=False, track=True)
        m2 = zmq.Frame(b'whoda', copy=False, track=True)
        mt = zmq.MessageTracker(m, m2)
        assert not m.tracker.done
        assert not mt.done
        self.assertRaises(zmq.NotDone, mt.wait, 0.1)
        del m
        for i in range(3):
            gc.collect()
        self.assertRaises(zmq.NotDone, mt.wait, 0.1)
        assert not mt.done
        del m2
        for i in range(3):
            gc.collect()
        assert mt.wait(0.1) is None
        assert mt.done

    def test_buffer_in(self):
        """test using a buffer as input"""
        ins = "§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√".encode()
        zmq.Frame(memoryview(ins))
        zmq.Frame(bytearray(5))

    def test_bad_buffer_in(self):
        """test using a bad object"""
        with pytest.raises(TypeError):
            zmq.Frame(5)
        with pytest.raises(TypeError):
            zmq.Frame(object())
        with pytest.raises(TypeError):
            zmq.Frame("str")
        with pytest.raises(BufferError):
            zmq.Frame(memoryview(bytearray(10))[::2])

    def test_buffer_out(self):
        """receiving buffered output"""
        ins = "§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√".encode()
        m = zmq.Frame(ins)
        outb = m.buffer
        assert isinstance(outb, memoryview)
        assert outb is m.buffer
        assert m.buffer is m.buffer

    @skip_cpython_cffi
    def test_memoryview_shape(self):
        """memoryview shape info"""
        data = "§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√".encode()
        n = len(data)
        f = zmq.Frame(data)
        view1 = f.buffer
        assert view1.ndim == 1
        assert view1.shape == (n,)
        assert view1.tobytes() == data
        view2 = memoryview(f)
        assert view2.ndim == 1
        assert view2.shape == (n,)
        assert view2.tobytes() == data

    def test_multisend(self):
        """ensure that a message remains intact after multiple sends"""
        a, b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        s = b"message"
        m = zmq.Frame(s)
        assert s == m.bytes

        a.send(m, copy=False)
        time.sleep(0.1)
        assert s == m.bytes
        a.send(m, copy=False)
        time.sleep(0.1)
        assert s == m.bytes
        a.send(m, copy=True)
        time.sleep(0.1)
        assert s == m.bytes
        a.send(m, copy=True)
        time.sleep(0.1)
        assert s == m.bytes
        for i in range(4):
            r = b.recv()
            assert s == r
        assert s == m.bytes

    def test_memoryview(self):
        """test messages from memoryview"""
        s = b'carrotjuice'
        memoryview(s)
        m = zmq.Frame(s)
        buf = m.buffer
        s2 = buf.tobytes()
        assert s2 == s
        assert m.bytes == s

    @skip_cpython_cffi
    def test_noncopying_recv(self):
        """check for clobbering message buffers"""
        null = b'\0' * 64
        sa, sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        for i in range(32):
            # try a few times
            sb.send(null, copy=False)
            m = sa.recv(copy=False)
            mb = m.bytes
            # buf = memoryview(m)
            buf = m.buffer
            del m
            for i in range(5):
                ff = b'\xff' * (40 + i * 10)
                sb.send(ff, copy=False)
                m2 = sa.recv(copy=False)
                b = buf.tobytes()
                assert b == null
                assert mb == null
                assert m2.bytes == ff
                assert type(m2.bytes) is bytes

    @skip_cpython_cffi
    def test_noncopying_memoryview(self):
        """test non-copying memmoryview messages"""
        null = b'\0' * 64
        sa, sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        for i in range(32):
            # try a few times
            sb.send(memoryview(null), copy=False)
            m = sa.recv(copy=False)
            buf = memoryview(m)
            for i in range(5):
                ff = b'\xff' * (40 + i * 10)
                sb.send(memoryview(ff), copy=False)
                m2 = sa.recv(copy=False)
                buf2 = memoryview(m2)
                assert buf.tobytes() == null
                assert not buf.readonly
                assert buf2.tobytes() == ff
                assert not buf2.readonly
                assert type(buf) is memoryview

    def test_buffer_numpy(self):
        """test non-copying numpy array messages"""
        try:
            import numpy
            from numpy.testing import assert_array_equal
        except ImportError:
            raise SkipTest("requires numpy")
        rand = numpy.random.randint
        shapes = [rand(2, 5) for i in range(5)]
        a, b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        dtypes = [int, float, '>i4', 'B']
        for i in range(1, len(shapes) + 1):
            shape = shapes[:i]
            for dt in dtypes:
                A = numpy.empty(shape, dtype=dt)
                a.send(A, copy=False)
                msg = b.recv(copy=False)

                B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
                assert_array_equal(A, B)

            A = numpy.empty(shape, dtype=[('a', int), ('b', float), ('c', 'a32')])
            A['a'] = 1024
            A['b'] = 1e9
            A['c'] = 'hello there'
            a.send(A, copy=False)
            msg = b.recv(copy=False)

            B = numpy.frombuffer(msg, A.dtype).reshape(A.shape)
            assert_array_equal(A, B)

    @skip_pypy
    def test_frame_more(self):
        """test Frame.more attribute"""
        frame = zmq.Frame(b"hello")
        assert not frame.more
        sa, sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        sa.send_multipart([b'hi', b'there'])
        frame = self.recv(sb, copy=False)
        assert frame.more
        if zmq.zmq_version_info()[0] >= 3 and not PYPY:
            assert frame.get(zmq.MORE)
        frame = self.recv(sb, copy=False)
        assert not frame.more
        if zmq.zmq_version_info()[0] >= 3 and not PYPY:
            assert not frame.get(zmq.MORE)
pyzmq-26.4.0/tests/test_monitor.py000066400000000000000000000057701477374370200172540ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import zmq
import zmq.asyncio
from zmq.utils.monitor import recv_monitor_message
from zmq_test_utils import require_zmq_4

pytestmark = require_zmq_4
import pytest


@pytest.fixture(params=["zmq", "asyncio"])
def Context(request, event_loop):
    if request.param == "asyncio":
        return zmq.asyncio.Context
    else:
        return zmq.Context


async def test_monitor(context, socket):
    """Test monitoring interface for sockets."""
    s_rep = socket(zmq.REP)
    s_req = socket(zmq.REQ)
    s_req.bind("tcp://127.0.0.1:6666")
    # try monitoring the REP socket
    s_rep.monitor(
        "inproc://monitor.rep",
        zmq.EVENT_CONNECT_DELAYED | zmq.EVENT_CONNECTED | zmq.EVENT_MONITOR_STOPPED,
    )
    # create listening socket for monitor
    s_event = socket(zmq.PAIR)
    s_event.connect("inproc://monitor.rep")
    s_event.linger = 0
    # test receive event for connect event
    s_rep.connect("tcp://127.0.0.1:6666")
    m = recv_monitor_message(s_event)
    if isinstance(context, zmq.asyncio.Context):
        m = await m
    if m['event'] == zmq.EVENT_CONNECT_DELAYED:
        assert m['endpoint'] == b"tcp://127.0.0.1:6666"
        # test receive event for connected event
        m = recv_monitor_message(s_event)
        if isinstance(context, zmq.asyncio.Context):
            m = await m
    assert m['event'] == zmq.EVENT_CONNECTED
    assert m['endpoint'] == b"tcp://127.0.0.1:6666"

    # test monitor can be disabled.
    s_rep.disable_monitor()
    m = recv_monitor_message(s_event)
    if isinstance(context, zmq.asyncio.Context):
        m = await m
    assert m['event'] == zmq.EVENT_MONITOR_STOPPED


async def test_monitor_repeat(context, socket, sockets):
    s = socket(zmq.PULL)
    m = s.get_monitor_socket()
    sockets.append(m)
    m2 = s.get_monitor_socket()
    assert m is m2
    s.disable_monitor()
    evt = recv_monitor_message(m)
    if isinstance(context, zmq.asyncio.Context):
        evt = await evt
    assert evt['event'] == zmq.EVENT_MONITOR_STOPPED
    m.close()
    s.close()


async def test_monitor_connected(context, socket, sockets):
    """Test connected monitoring socket."""
    s_rep = socket(zmq.REP)
    s_req = socket(zmq.REQ)
    s_req.bind("tcp://127.0.0.1:6667")
    # try monitoring the REP socket
    # create listening socket for monitor
    s_event = s_rep.get_monitor_socket()
    s_event.linger = 0
    sockets.append(s_event)
    # test receive event for connect event
    s_rep.connect("tcp://127.0.0.1:6667")
    m = recv_monitor_message(s_event)
    if isinstance(context, zmq.asyncio.Context):
        m = await m
    if m['event'] == zmq.EVENT_CONNECT_DELAYED:
        assert m['endpoint'] == b"tcp://127.0.0.1:6667"
        # test receive event for connected event
        m = recv_monitor_message(s_event)
        if isinstance(context, zmq.asyncio.Context):
            m = await m
    assert m['event'] == zmq.EVENT_CONNECTED
    assert m['endpoint'] == b"tcp://127.0.0.1:6667"
pyzmq-26.4.0/tests/test_monqueue.py000066400000000000000000000201301477374370200174060ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import threading
import time

import zmq
from zmq import devices
from zmq_test_utils import PYPY, BaseZMQTestCase

if PYPY or zmq.zmq_version_info() >= (4, 1):
    # cleanup of shared Context doesn't work on PyPy
    # there also seems to be a bug in cleanup in libzmq-4.1 (zeromq/libzmq#1052)
    devices.Device.context_factory = zmq.Context


class TestMonitoredQueue(BaseZMQTestCase):
    def build_device(self, mon_sub=b"", in_prefix=b'in', out_prefix=b'out'):
        self.device = devices.ThreadMonitoredQueue(
            zmq.PAIR, zmq.PAIR, zmq.PUB, in_prefix, out_prefix
        )
        alice = self.context.socket(zmq.PAIR)
        bob = self.context.socket(zmq.PAIR)
        mon = self.context.socket(zmq.SUB)

        aport = alice.bind_to_random_port('tcp://127.0.0.1')
        bport = bob.bind_to_random_port('tcp://127.0.0.1')
        mport = mon.bind_to_random_port('tcp://127.0.0.1')
        mon.setsockopt(zmq.SUBSCRIBE, mon_sub)

        self.device.connect_in(f"tcp://127.0.0.1:{aport}")
        self.device.connect_out(f"tcp://127.0.0.1:{bport}")
        self.device.connect_mon(f"tcp://127.0.0.1:{mport}")
        self.device.start()
        time.sleep(0.2)
        try:
            # this is currently necessary to ensure no dropped monitor messages
            # see LIBZMQ-248 for more info
            mon.recv_multipart(zmq.NOBLOCK)
        except zmq.ZMQError:
            pass
        self.sockets.extend([alice, bob, mon])
        return alice, bob, mon

    def teardown_device(self):
        # spawn term in a background thread
        for i in range(50):
            # wait for device._context to be populated
            context = getattr(self.device, "_context", None)
            if context is not None:
                break
            time.sleep(0.1)

        if context is not None:
            t = threading.Thread(target=self.device._context.term, daemon=True)
            t.start()

        for socket in self.sockets:
            socket.close()

        if context is not None:
            t.join(timeout=5)

        self.device.join(timeout=5)

    def test_reply(self):
        alice, bob, mon = self.build_device()
        alices = b"hello bob".split()
        alice.send_multipart(alices)
        bobs = self.recv_multipart(bob)
        assert alices == bobs
        bobs = b"hello alice".split()
        bob.send_multipart(bobs)
        alices = self.recv_multipart(alice)
        assert alices == bobs
        self.teardown_device()

    def test_queue(self):
        alice, bob, mon = self.build_device()
        alices = b"hello bob".split()
        alice.send_multipart(alices)
        alices2 = b"hello again".split()
        alice.send_multipart(alices2)
        alices3 = b"hello again and again".split()
        alice.send_multipart(alices3)
        bobs = self.recv_multipart(bob)
        assert alices == bobs
        bobs = self.recv_multipart(bob)
        assert alices2 == bobs
        bobs = self.recv_multipart(bob)
        assert alices3 == bobs
        bobs = b"hello alice".split()
        bob.send_multipart(bobs)
        alices = self.recv_multipart(alice)
        assert alices == bobs
        self.teardown_device()

    def test_monitor(self):
        alice, bob, mon = self.build_device()
        alices = b"hello bob".split()
        alice.send_multipart(alices)
        alices2 = b"hello again".split()
        alice.send_multipart(alices2)
        alices3 = b"hello again and again".split()
        alice.send_multipart(alices3)
        bobs = self.recv_multipart(bob)
        assert alices == bobs
        mons = self.recv_multipart(mon)
        assert [b'in'] + bobs == mons
        bobs = self.recv_multipart(bob)
        assert alices2 == bobs
        bobs = self.recv_multipart(bob)
        assert alices3 == bobs
        mons = self.recv_multipart(mon)
        assert [b'in'] + alices2 == mons
        bobs = b"hello alice".split()
        bob.send_multipart(bobs)
        alices = self.recv_multipart(alice)
        assert alices == bobs
        mons = self.recv_multipart(mon)
        assert [b'in'] + alices3 == mons
        mons = self.recv_multipart(mon)
        assert [b'out'] + bobs == mons
        self.teardown_device()

    def test_prefix(self):
        alice, bob, mon = self.build_device(b"", b'foo', b'bar')
        alices = b"hello bob".split()
        alice.send_multipart(alices)
        alices2 = b"hello again".split()
        alice.send_multipart(alices2)
        alices3 = b"hello again and again".split()
        alice.send_multipart(alices3)
        bobs = self.recv_multipart(bob)
        assert alices == bobs
        mons = self.recv_multipart(mon)
        assert [b'foo'] + bobs == mons
        bobs = self.recv_multipart(bob)
        assert alices2 == bobs
        bobs = self.recv_multipart(bob)
        assert alices3 == bobs
        mons = self.recv_multipart(mon)
        assert [b'foo'] + alices2 == mons
        bobs = b"hello alice".split()
        bob.send_multipart(bobs)
        alices = self.recv_multipart(alice)
        assert alices == bobs
        mons = self.recv_multipart(mon)
        assert [b'foo'] + alices3 == mons
        mons = self.recv_multipart(mon)
        assert [b'bar'] + bobs == mons
        self.teardown_device()

    def test_monitor_subscribe(self):
        alice, bob, mon = self.build_device(b"out")
        alices = b"hello bob".split()
        alice.send_multipart(alices)
        alices2 = b"hello again".split()
        alice.send_multipart(alices2)
        alices3 = b"hello again and again".split()
        alice.send_multipart(alices3)
        bobs = self.recv_multipart(bob)
        assert alices == bobs
        bobs = self.recv_multipart(bob)
        assert alices2 == bobs
        bobs = self.recv_multipart(bob)
        assert alices3 == bobs
        bobs = b"hello alice".split()
        bob.send_multipart(bobs)
        alices = self.recv_multipart(alice)
        assert alices == bobs
        mons = self.recv_multipart(mon)
        assert [b'out'] + bobs == mons
        self.teardown_device()

    def test_router_router(self):
        """test router-router MQ devices"""
        dev = devices.ThreadMonitoredQueue(
            zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out'
        )
        self.device = dev
        dev.setsockopt_in(zmq.LINGER, 0)
        dev.setsockopt_out(zmq.LINGER, 0)
        dev.setsockopt_mon(zmq.LINGER, 0)

        porta = dev.bind_in_to_random_port('tcp://127.0.0.1')
        portb = dev.bind_out_to_random_port('tcp://127.0.0.1')
        a = self.context.socket(zmq.DEALER)
        a.identity = b'a'
        b = self.context.socket(zmq.DEALER)
        b.identity = b'b'
        self.sockets.extend([a, b])

        a.connect(f'tcp://127.0.0.1:{porta}')
        b.connect(f'tcp://127.0.0.1:{portb}')
        dev.start()
        time.sleep(1)
        if zmq.zmq_version_info() >= (3, 1, 0):
            # flush erroneous poll state, due to LIBZMQ-280
            ping_msg = [b'ping', b'pong']
            for s in (a, b):
                s.send_multipart(ping_msg)
                try:
                    s.recv(zmq.NOBLOCK)
                except zmq.ZMQError:
                    pass
        msg = [b'hello', b'there']
        a.send_multipart([b'b'] + msg)
        bmsg = self.recv_multipart(b)
        assert bmsg == [b'a'] + msg
        b.send_multipart(bmsg)
        amsg = self.recv_multipart(a)
        assert amsg == [b'b'] + msg
        self.teardown_device()

    def test_default_mq_args(self):
        self.device = dev = devices.ThreadMonitoredQueue(
            zmq.ROUTER, zmq.DEALER, zmq.PUB
        )
        dev.setsockopt_in(zmq.LINGER, 0)
        dev.setsockopt_out(zmq.LINGER, 0)
        dev.setsockopt_mon(zmq.LINGER, 0)
        # this will raise if default args are wrong
        dev.start()
        self.teardown_device()

    def test_mq_check_prefix(self):
        ins = self.context.socket(zmq.ROUTER)
        outs = self.context.socket(zmq.DEALER)
        mons = self.context.socket(zmq.PUB)
        self.sockets.extend([ins, outs, mons])

        ins = 'in'
        outs = 'out'
        self.assertRaises(TypeError, devices.monitoredqueue, ins, outs, mons)
pyzmq-26.4.0/tests/test_multipart.py000066400000000000000000000015311477374370200175750ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import zmq
from zmq_test_utils import BaseZMQTestCase, GreenTest, have_gevent


class TestMultipart(BaseZMQTestCase):
    def test_router_dealer(self):
        router, dealer = self.create_bound_pair(zmq.ROUTER, zmq.DEALER)

        msg1 = b'message1'
        dealer.send(msg1)
        self.recv(router)
        more = router.rcvmore
        assert more
        msg2 = self.recv(router)
        assert msg1 == msg2
        assert not router.rcvmore

    def test_basic_multipart(self):
        a, b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        msg = [b'hi', b'there', b'b']
        a.send_multipart(msg)
        recvd = b.recv_multipart()
        assert msg == recvd


if have_gevent:

    class TestMultipartGreen(GreenTest, TestMultipart):
        pass
pyzmq-26.4.0/tests/test_mypy.py000066400000000000000000000022571477374370200165600ustar00rootroot00000000000000"""
Test our typing with mypy
"""

import sys
from pathlib import Path
from subprocess import PIPE, STDOUT, Popen

import pytest

pytest.importorskip("mypy")
pytestmark = pytest.mark.skipif(sys.version_info < (3, 10), reason="targets 3.10")

repo_root = Path(__file__).parents[1]


print(repo_root)
examples_dir = repo_root / "examples"
mypy_dir = repo_root / "mypy_tests"


def run_mypy(*mypy_args):
    """Run mypy for a path

    Captures output and reports it on errors
    """
    p = Popen(
        [sys.executable, "-m", "mypy", "--python-version=3.10"] + list(mypy_args),
        stdout=PIPE,
        stderr=STDOUT,
    )
    o, _ = p.communicate()
    out = o.decode("utf8", "replace")
    print(out)
    assert p.returncode == 0, out


examples = [path.name for path in examples_dir.glob("*") if path.is_dir()]


@pytest.mark.parametrize("example", examples)
def test_mypy_example(example):
    example_dir = examples_dir / example
    run_mypy("--disallow-untyped-calls", str(example_dir))


mypy_tests = [p.name for p in mypy_dir.glob("*.py")]


@pytest.mark.parametrize("filename", mypy_tests)
def test_mypy(filename):
    run_mypy("--disallow-untyped-calls", str(mypy_dir / filename))
pyzmq-26.4.0/tests/test_pair.py000066400000000000000000000023251477374370200165110ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import zmq
from zmq_test_utils import BaseZMQTestCase, GreenTest, have_gevent

x = b' '


class TestPair(BaseZMQTestCase):
    def test_basic(self):
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)

        msg1 = b'message1'
        msg2 = self.ping_pong(s1, s2, msg1)
        assert msg1 == msg2

    def test_multiple(self):
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)

        for i in range(10):
            msg = i * x
            s1.send(msg)

        for i in range(10):
            msg = i * x
            s2.send(msg)

        for i in range(10):
            msg = s1.recv()
            assert msg == i * x

        for i in range(10):
            msg = s2.recv()
            assert msg == i * x

    def test_json(self):
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        o = dict(a=10, b=list(range(10)))
        self.ping_pong_json(s1, s2, o)

    def test_pyobj(self):
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        o = dict(a=10, b=range(10))
        self.ping_pong_pyobj(s1, s2, o)


if have_gevent:

    class TestReqRepGreen(GreenTest, TestPair):
        pass
pyzmq-26.4.0/tests/test_poll.py000066400000000000000000000157071477374370200165340ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import os
import sys
import time

from pytest import mark

import zmq
from zmq_test_utils import GreenTest, PollZMQTestCase, have_gevent


def wait():
    time.sleep(0.25)


class TestPoll(PollZMQTestCase):
    Poller = zmq.Poller

    def test_pair(self):
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)

        # Sleep to allow sockets to connect.
        wait()

        poller = self.Poller()
        poller.register(s1, zmq.POLLIN | zmq.POLLOUT)
        poller.register(s2, zmq.POLLIN | zmq.POLLOUT)
        # Poll result should contain both sockets
        socks = dict(poller.poll())
        # Now make sure that both are send ready.
        assert socks[s1] == zmq.POLLOUT
        assert socks[s2] == zmq.POLLOUT
        # Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN
        s1.send(b'msg1')
        s2.send(b'msg2')
        wait()
        socks = dict(poller.poll())
        assert socks[s1] == zmq.POLLOUT | zmq.POLLIN
        assert socks[s2] == zmq.POLLOUT | zmq.POLLIN
        # Make sure that both are in POLLOUT after recv.
        s1.recv()
        s2.recv()
        socks = dict(poller.poll())
        assert socks[s1] == zmq.POLLOUT
        assert socks[s2] == zmq.POLLOUT

        poller.unregister(s1)
        poller.unregister(s2)

    def test_reqrep(self):
        s1, s2 = self.create_bound_pair(zmq.REP, zmq.REQ)

        # Sleep to allow sockets to connect.
        wait()

        poller = self.Poller()
        poller.register(s1, zmq.POLLIN | zmq.POLLOUT)
        poller.register(s2, zmq.POLLIN | zmq.POLLOUT)

        # Make sure that s1 is in state 0 and s2 is in POLLOUT
        socks = dict(poller.poll())
        assert s1 not in socks
        assert socks[s2] == zmq.POLLOUT

        # Make sure that s2 goes immediately into state 0 after send.
        s2.send(b'msg1')
        socks = dict(poller.poll())
        assert s2 not in socks

        # Make sure that s1 goes into POLLIN state after a time.sleep().
        time.sleep(0.5)
        socks = dict(poller.poll())
        assert socks[s1] == zmq.POLLIN

        # Make sure that s1 goes into POLLOUT after recv.
        s1.recv()
        socks = dict(poller.poll())
        assert socks[s1] == zmq.POLLOUT

        # Make sure s1 goes into state 0 after send.
        s1.send(b'msg2')
        socks = dict(poller.poll())
        assert s1 not in socks

        # Wait and then see that s2 is in POLLIN.
        time.sleep(0.5)
        socks = dict(poller.poll())
        assert socks[s2] == zmq.POLLIN

        # Make sure that s2 is in POLLOUT after recv.
        s2.recv()
        socks = dict(poller.poll())
        assert socks[s2] == zmq.POLLOUT

        poller.unregister(s1)
        poller.unregister(s2)

    def test_no_events(self):
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        poller = self.Poller()
        poller.register(s1, zmq.POLLIN | zmq.POLLOUT)
        poller.register(s2, 0)
        assert s1 in poller
        assert s2 not in poller
        poller.register(s1, 0)
        assert s1 not in poller

    def test_pubsub(self):
        s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
        s2.setsockopt(zmq.SUBSCRIBE, b'')

        # Sleep to allow sockets to connect.
        wait()

        poller = self.Poller()
        poller.register(s1, zmq.POLLIN | zmq.POLLOUT)
        poller.register(s2, zmq.POLLIN)

        # Now make sure that both are send ready.
        socks = dict(poller.poll())
        assert socks[s1] == zmq.POLLOUT
        assert s2 not in socks
        # Make sure that s1 stays in POLLOUT after a send.
        s1.send(b'msg1')
        socks = dict(poller.poll())
        assert socks[s1] == zmq.POLLOUT

        # Make sure that s2 is POLLIN after waiting.
        wait()
        socks = dict(poller.poll())
        assert socks[s2] == zmq.POLLIN

        # Make sure that s2 goes into 0 after recv.
        s2.recv()
        socks = dict(poller.poll())
        assert s2 not in socks

        poller.unregister(s1)
        poller.unregister(s2)

    @mark.skipif(sys.platform.startswith('win'), reason='Windows')
    def test_raw(self):
        r, w = os.pipe()
        r = os.fdopen(r, 'rb')
        w = os.fdopen(w, 'wb')
        p = self.Poller()
        p.register(r, zmq.POLLIN)
        socks = dict(p.poll(1))
        assert socks == {}
        w.write(b'x')
        w.flush()
        socks = dict(p.poll(1))
        assert socks == {r.fileno(): zmq.POLLIN}
        w.close()
        r.close()

    @mark.flaky(reruns=3)
    def test_timeout(self):
        """make sure Poller.poll timeout has the right units (milliseconds)."""
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        poller = self.Poller()
        poller.register(s1, zmq.POLLIN)
        tic = time.perf_counter()
        poller.poll(0.005)
        toc = time.perf_counter()
        toc - tic < 0.5
        tic = time.perf_counter()
        poller.poll(50)
        toc = time.perf_counter()
        assert toc - tic < 0.5
        assert toc - tic > 0.01
        tic = time.perf_counter()
        poller.poll(500)
        toc = time.perf_counter()
        assert toc - tic < 1
        assert toc - tic > 0.1


class TestSelect(PollZMQTestCase):
    def test_pair(self):
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)

        # Sleep to allow sockets to connect.
        wait()

        rlist, wlist, xlist = zmq.select([s1, s2], [s1, s2], [s1, s2])
        assert s1 in wlist
        assert s2 in wlist
        assert s1 not in rlist
        assert s2 not in rlist

    @mark.flaky(reruns=3)
    def test_timeout(self):
        """make sure select timeout has the right units (seconds)."""
        s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        tic = time.perf_counter()
        r, w, x = zmq.select([s1, s2], [], [], 0.005)
        toc = time.perf_counter()
        assert toc - tic < 1
        assert toc - tic > 0.001
        tic = time.perf_counter()
        r, w, x = zmq.select([s1, s2], [], [], 0.25)
        toc = time.perf_counter()
        assert toc - tic < 1
        assert toc - tic > 0.1


if have_gevent:
    import gevent

    from zmq import green as gzmq

    class TestPollGreen(GreenTest, TestPoll):
        Poller = gzmq.Poller

        def test_wakeup(self):
            s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
            poller = self.Poller()
            poller.register(s2, zmq.POLLIN)

            tic = time.perf_counter()
            r = gevent.spawn(lambda: poller.poll(10000))
            s = gevent.spawn(lambda: s1.send(b'msg1'))
            r.join()
            toc = time.perf_counter()
            assert toc - tic < 1

        def test_socket_poll(self):
            s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)

            tic = time.perf_counter()
            r = gevent.spawn(lambda: s2.poll(10000))
            s = gevent.spawn(lambda: s1.send(b'msg1'))
            r.join()
            toc = time.perf_counter()
            assert toc - tic < 1
pyzmq-26.4.0/tests/test_proxy_steerable.py000066400000000000000000000075671477374370200210020ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import struct
import time

import zmq
from zmq import devices
from zmq_test_utils import PYPY, BaseZMQTestCase, SkipTest

if PYPY:
    # cleanup of shared Context doesn't work on PyPy
    devices.Device.context_factory = zmq.Context


class TestProxySteerable(BaseZMQTestCase):
    def setUp(self):
        if zmq.zmq_version_info() >= (4, 3, 5):
            raise SkipTest("Steerable Proxies removed in libzmq 4.3.5")
        super().setUp()

    def test_proxy_steerable(self):
        if zmq.zmq_version_info() < (4, 1):
            raise SkipTest("Steerable Proxies only in libzmq >= 4.1")
        if zmq.zmq_version_info() >= (4, 3, 5):
            raise SkipTest("Steerable Proxies removed in libzmq 4.3.5")
        dev = devices.ThreadProxySteerable(zmq.PULL, zmq.PUSH, zmq.PUSH, zmq.PAIR)
        iface = 'tcp://127.0.0.1'
        port = dev.bind_in_to_random_port(iface)
        port2 = dev.bind_out_to_random_port(iface)
        port3 = dev.bind_mon_to_random_port(iface)
        port4 = dev.bind_ctrl_to_random_port(iface)
        dev.start()
        time.sleep(0.25)
        msg = b'hello'
        push = self.context.socket(zmq.PUSH)
        push.connect(f"{iface}:{port}")
        pull = self.context.socket(zmq.PULL)
        pull.connect(f"{iface}:{port2}")
        mon = self.context.socket(zmq.PULL)
        mon.connect(f"{iface}:{port3}")
        ctrl = self.context.socket(zmq.PAIR)
        ctrl.connect(f"{iface}:{port4}")
        push.send(msg)
        self.sockets.extend([push, pull, mon, ctrl])
        assert msg == self.recv(pull)
        assert msg == self.recv(mon)
        ctrl.send(b'TERMINATE')
        dev.join()

    def test_proxy_steerable_bind_to_random_with_args(self):
        if zmq.zmq_version_info() < (4, 1):
            raise SkipTest("Steerable Proxies only in libzmq >= 4.1")
        dev = devices.ThreadProxySteerable(zmq.PULL, zmq.PUSH, zmq.PUSH, zmq.PAIR)
        iface = 'tcp://127.0.0.1'
        ports = []
        min, max = 5000, 5050
        ports.extend(
            [
                dev.bind_in_to_random_port(iface, min_port=min, max_port=max),
                dev.bind_out_to_random_port(iface, min_port=min, max_port=max),
                dev.bind_mon_to_random_port(iface, min_port=min, max_port=max),
                dev.bind_ctrl_to_random_port(iface, min_port=min, max_port=max),
            ]
        )
        for port in ports:
            if port < min or port > max:
                self.fail(f'Unexpected port number: {port}')

    def test_proxy_steerable_statistics(self):
        if zmq.zmq_version_info() < (4, 3):
            raise SkipTest("STATISTICS only in libzmq >= 4.3")
        dev = devices.ThreadProxySteerable(zmq.PULL, zmq.PUSH, zmq.PUSH, zmq.PAIR)
        iface = 'tcp://127.0.0.1'
        port = dev.bind_in_to_random_port(iface)
        port2 = dev.bind_out_to_random_port(iface)
        port3 = dev.bind_mon_to_random_port(iface)
        port4 = dev.bind_ctrl_to_random_port(iface)
        dev.start()
        time.sleep(0.25)
        msg = b'hello'
        push = self.context.socket(zmq.PUSH)
        push.connect(f"{iface}:{port}")
        pull = self.context.socket(zmq.PULL)
        pull.connect(f"{iface}:{port2}")
        mon = self.context.socket(zmq.PULL)
        mon.connect(f"{iface}:{port3}")
        ctrl = self.context.socket(zmq.PAIR)
        ctrl.connect(f"{iface}:{port4}")
        push.send(msg)
        self.sockets.extend([push, pull, mon, ctrl])
        assert msg == self.recv(pull)
        assert msg == self.recv(mon)
        ctrl.send(b'STATISTICS')
        stats = self.recv_multipart(ctrl)
        stats_int = [struct.unpack("=Q", x)[0] for x in stats]
        assert 1 == stats_int[0]
        assert len(msg) == stats_int[1]
        assert 1 == stats_int[6]
        assert len(msg) == stats_int[7]
        ctrl.send(b'TERMINATE')
        dev.join()
pyzmq-26.4.0/tests/test_pubsub.py000066400000000000000000000017741477374370200170650ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import time

import zmq
from zmq_test_utils import BaseZMQTestCase, GreenTest, have_gevent


class TestPubSub(BaseZMQTestCase):
    pass

    # We are disabling this test while an issue is being resolved.
    def test_basic(self):
        s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
        s2.setsockopt(zmq.SUBSCRIBE, b'')
        time.sleep(0.1)
        msg1 = b'message'
        s1.send(msg1)
        msg2 = s2.recv()  # This is blocking!
        assert msg1 == msg2

    def test_topic(self):
        s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
        s2.setsockopt(zmq.SUBSCRIBE, b'x')
        time.sleep(0.1)
        msg1 = b'message'
        s1.send(msg1)
        self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK)
        msg1 = b'xmessage'
        s1.send(msg1)
        msg2 = s2.recv()
        assert msg1 == msg2


if have_gevent:

    class TestPubSubGreen(GreenTest, TestPubSub):
        pass
pyzmq-26.4.0/tests/test_reqrep.py000066400000000000000000000033511477374370200170540ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import zmq
from zmq_test_utils import BaseZMQTestCase, GreenTest, have_gevent


class TestReqRep(BaseZMQTestCase):
    def test_basic(self):
        s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)

        msg1 = b'message 1'
        msg2 = self.ping_pong(s1, s2, msg1)
        assert msg1 == msg2

    def test_multiple(self):
        s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)

        for i in range(10):
            msg1 = i * b' '
            msg2 = self.ping_pong(s1, s2, msg1)
            assert msg1 == msg2

    def test_bad_send_recv(self):
        s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)

        if zmq.zmq_version() != '2.1.8':
            # this doesn't work on 2.1.8
            for copy in (True, False):
                self.assertRaisesErrno(zmq.EFSM, s1.recv, copy=copy)
                self.assertRaisesErrno(zmq.EFSM, s2.send, b'asdf', copy=copy)

        # I have to have this or we die on an Abort trap.
        msg1 = b'asdf'
        msg2 = self.ping_pong(s1, s2, msg1)
        assert msg1 == msg2

    def test_json(self):
        s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
        o = dict(a=10, b=list(range(10)))
        self.ping_pong_json(s1, s2, o)

    def test_pyobj(self):
        s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
        o = dict(a=10, b=range(10))
        self.ping_pong_pyobj(s1, s2, o)

    def test_large_msg(self):
        s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP)
        msg1 = 10000 * b'X'

        for i in range(10):
            msg2 = self.ping_pong(s1, s2, msg1)
            assert msg1 == msg2


if have_gevent:

    class TestReqRepGreen(GreenTest, TestReqRep):
        pass
pyzmq-26.4.0/tests/test_retry_eintr.py000066400000000000000000000054771477374370200201370ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import signal
import time
from threading import Thread

from pytest import mark

import zmq
from zmq_test_utils import BaseZMQTestCase, SkipTest

# Partially based on EINTRBaseTest from CPython 3.5 eintr_tester


class TestEINTRSysCall(BaseZMQTestCase):
    """Base class for EINTR tests."""

    # delay for initial signal delivery
    signal_delay = 0.1
    # timeout for tests. Must be > signal_delay
    timeout = 0.25
    timeout_ms = int(timeout * 1e3)

    def alarm(self, t=None):
        """start a timer to fire only once

        like signal.alarm, but with better resolution than integer seconds.
        """
        if not hasattr(signal, 'setitimer'):
            raise SkipTest('EINTR tests require setitimer')
        if t is None:
            t = self.signal_delay
        self.timer_fired = False
        self.orig_handler = signal.signal(signal.SIGALRM, self.stop_timer)
        # signal_period ignored, since only one timer event is allowed to fire
        signal.setitimer(signal.ITIMER_REAL, t, 1000)

    def stop_timer(self, *args):
        self.timer_fired = True
        signal.setitimer(signal.ITIMER_REAL, 0, 0)
        signal.signal(signal.SIGALRM, self.orig_handler)

    @mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO")
    def test_retry_recv(self):
        pull = self.socket(zmq.PULL)
        pull.rcvtimeo = self.timeout_ms
        self.alarm()
        self.assertRaises(zmq.Again, pull.recv)
        assert self.timer_fired

    @mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO")
    def test_retry_send(self):
        push = self.socket(zmq.PUSH)
        push.sndtimeo = self.timeout_ms
        self.alarm()
        self.assertRaises(zmq.Again, push.send, b'buf')
        assert self.timer_fired

    @mark.flaky(reruns=3)
    def test_retry_poll(self):
        x, y = self.create_bound_pair()
        poller = zmq.Poller()
        poller.register(x, zmq.POLLIN)
        self.alarm()

        def send():
            time.sleep(2 * self.signal_delay)
            y.send(b'ping')

        t = Thread(target=send)
        t.start()
        evts = dict(poller.poll(2 * self.timeout_ms))
        t.join()
        assert x in evts
        assert self.timer_fired
        x.recv()

    def test_retry_term(self):
        push = self.socket(zmq.PUSH)
        push.linger = self.timeout_ms
        push.connect('tcp://127.0.0.1:5555')
        push.send(b'ping')
        time.sleep(0.1)
        self.alarm()
        self.context.destroy()
        assert self.timer_fired
        assert self.context.closed

    def test_retry_getsockopt(self):
        raise SkipTest("TODO: find a way to interrupt getsockopt")

    def test_retry_setsockopt(self):
        raise SkipTest("TODO: find a way to interrupt setsockopt")
pyzmq-26.4.0/tests/test_security.py000066400000000000000000000171351477374370200174320ustar00rootroot00000000000000"""Test libzmq security (libzmq >= 3.3.0)"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import contextlib
import os
import time
from threading import Thread

import zmq
from zmq.utils import z85
from zmq_test_utils import PYPY, BaseZMQTestCase, SkipTest

USER = b"admin"
PASS = b"password"


class TestSecurity(BaseZMQTestCase):
    def setUp(self):
        if zmq.zmq_version_info() < (4, 0):
            raise SkipTest("security is new in libzmq 4.0")
        try:
            zmq.curve_keypair()
        except zmq.ZMQError:
            raise SkipTest("security requires libzmq to be built with CURVE support")
        super().setUp()

    def zap_handler(self):
        socket = self.context.socket(zmq.REP)
        socket.bind("inproc://zeromq.zap.01")
        try:
            msg = self.recv_multipart(socket)

            version, sequence, domain, address, identity, mechanism = msg[:6]
            if mechanism == b'PLAIN':
                username, password = msg[6:]
            elif mechanism == b'CURVE':
                msg[6]

            assert version == b"1.0"
            assert identity == b"IDENT"
            reply = [version, sequence]
            if (
                mechanism == b'CURVE'
                or (mechanism == b'PLAIN' and username == USER and password == PASS)
                or (mechanism == b'NULL')
            ):
                reply.extend(
                    [
                        b"200",
                        b"OK",
                        b"anonymous",
                        b"\5Hello\0\0\0\5World",
                    ]
                )
            else:
                reply.extend(
                    [
                        b"400",
                        b"Invalid username or password",
                        b"",
                        b"",
                    ]
                )
            socket.send_multipart(reply)
        finally:
            socket.close()

    @contextlib.contextmanager
    def zap(self):
        self.start_zap()
        time.sleep(0.5)  # allow time for the Thread to start
        try:
            yield
        finally:
            self.stop_zap()

    def start_zap(self):
        self.zap_thread = Thread(target=self.zap_handler)
        self.zap_thread.start()

    def stop_zap(self):
        self.zap_thread.join()

    def bounce(self, server, client, test_metadata=True):
        msg = [os.urandom(64), os.urandom(64)]
        client.send_multipart(msg)
        frames = self.recv_multipart(server, copy=False)
        recvd = list(map(lambda x: x.bytes, frames))

        try:
            if test_metadata and not PYPY:
                for frame in frames:
                    assert frame.get('User-Id') == 'anonymous'
                    assert frame.get('Hello') == 'World'
                    assert frame['Socket-Type'] == 'DEALER'
        except zmq.ZMQVersionError:
            pass

        assert recvd == msg
        server.send_multipart(recvd)
        msg2 = self.recv_multipart(client)
        assert msg2 == msg

    def test_null(self):
        """test NULL (default) security"""
        server = self.socket(zmq.DEALER)
        client = self.socket(zmq.DEALER)
        assert client.MECHANISM == zmq.NULL
        assert server.mechanism == zmq.NULL
        assert client.plain_server == 0
        assert server.plain_server == 0
        iface = 'tcp://127.0.0.1'
        port = server.bind_to_random_port(iface)
        client.connect(f"{iface}:{port}")
        self.bounce(server, client, False)

    def test_plain(self):
        """test PLAIN authentication"""
        server = self.socket(zmq.DEALER)
        server.identity = b'IDENT'
        client = self.socket(zmq.DEALER)
        assert client.plain_username == b''
        assert client.plain_password == b''
        client.plain_username = USER
        client.plain_password = PASS
        assert client.getsockopt(zmq.PLAIN_USERNAME) == USER
        assert client.getsockopt(zmq.PLAIN_PASSWORD) == PASS
        assert client.plain_server == 0
        assert server.plain_server == 0
        server.plain_server = True
        assert server.mechanism == zmq.PLAIN
        assert client.mechanism == zmq.PLAIN

        assert not client.plain_server
        assert server.plain_server

        with self.zap():
            iface = 'tcp://127.0.0.1'
            port = server.bind_to_random_port(iface)
            client.connect(f"{iface}:{port}")
            self.bounce(server, client)

    def skip_plain_inauth(self):
        """test PLAIN failed authentication"""
        server = self.socket(zmq.DEALER)
        server.identity = b'IDENT'
        client = self.socket(zmq.DEALER)
        self.sockets.extend([server, client])
        client.plain_username = USER
        client.plain_password = b'incorrect'
        server.plain_server = True
        assert server.mechanism == zmq.PLAIN
        assert client.mechanism == zmq.PLAIN

        with self.zap():
            iface = 'tcp://127.0.0.1'
            port = server.bind_to_random_port(iface)
            client.connect(f"{iface}:{port}")
            client.send(b'ping')
            server.rcvtimeo = 250
            self.assertRaisesErrno(zmq.EAGAIN, server.recv)

    def test_keypair(self):
        """test curve_keypair"""
        try:
            public, secret = zmq.curve_keypair()
        except zmq.ZMQError:
            raise SkipTest("CURVE unsupported")

        assert type(secret) == bytes
        assert type(public) == bytes
        assert len(secret) == 40
        assert len(public) == 40

        # verify that it is indeed Z85
        bsecret, bpublic = (z85.decode(key) for key in (public, secret))
        assert type(bsecret) == bytes
        assert type(bpublic) == bytes
        assert len(bsecret) == 32
        assert len(bpublic) == 32

    def test_curve_public(self):
        """test curve_public"""
        try:
            public, secret = zmq.curve_keypair()
        except zmq.ZMQError:
            raise SkipTest("CURVE unsupported")
        if zmq.zmq_version_info() < (4, 2):
            raise SkipTest("curve_public is new in libzmq 4.2")

        derived_public = zmq.curve_public(secret)

        assert type(derived_public) == bytes
        assert len(derived_public) == 40

        # verify that it is indeed Z85
        bpublic = z85.decode(derived_public)
        assert type(bpublic) == bytes
        assert len(bpublic) == 32

        # verify that it is equal to the known public key
        assert derived_public == public

    def test_curve(self):
        """test CURVE encryption"""
        server = self.socket(zmq.DEALER)
        server.identity = b'IDENT'
        client = self.socket(zmq.DEALER)
        self.sockets.extend([server, client])
        try:
            server.curve_server = True
        except zmq.ZMQError as e:
            # will raise EINVAL if no CURVE support
            if e.errno == zmq.EINVAL:
                raise SkipTest("CURVE unsupported")

        server_public, server_secret = zmq.curve_keypair()
        client_public, client_secret = zmq.curve_keypair()

        server.curve_secretkey = server_secret
        server.curve_publickey = server_public
        client.curve_serverkey = server_public
        client.curve_publickey = client_public
        client.curve_secretkey = client_secret

        assert server.mechanism == zmq.CURVE
        assert client.mechanism == zmq.CURVE

        assert server.get(zmq.CURVE_SERVER)
        assert not client.get(zmq.CURVE_SERVER)

        with self.zap():
            iface = 'tcp://127.0.0.1'
            port = server.bind_to_random_port(iface)
            client.connect(f"{iface}:{port}")
            self.bounce(server, client)
pyzmq-26.4.0/tests/test_socket.py000066400000000000000000000635611477374370200170570ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import copy
import errno
import json
import os
import platform
import socket
import sys
import time
import warnings
from array import array
from unittest import mock

import pytest
from pytest import mark

import zmq
from zmq_test_utils import BaseZMQTestCase, GreenTest, SkipTest, have_gevent, skip_pypy

pypy = platform.python_implementation().lower() == 'pypy'
windows = platform.platform().lower().startswith('windows')
on_ci = bool(os.environ.get('CI'))

# polling on windows is slow
POLL_TIMEOUT = 1000 if windows else 100


class TestSocket(BaseZMQTestCase):
    def test_create(self):
        ctx = self.Context()
        s = ctx.socket(zmq.PUB)
        # Superluminal protocol not yet implemented
        self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://a')
        self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://a')
        self.assertRaisesErrno(zmq.EINVAL, s.bind, 'tcp://')
        s.close()
        ctx.term()

    def test_context_manager(self):
        url = 'inproc://a'
        with self.Context() as ctx:
            with ctx.socket(zmq.PUSH) as a:
                a.bind(url)
                with ctx.socket(zmq.PULL) as b:
                    b.connect(url)
                    msg = b'hi'
                    a.send(msg)
                    rcvd = self.recv(b)
                    assert rcvd == msg
                assert b.closed is True
            assert a.closed is True
        assert ctx.closed is True

    def test_connectbind_context_managers(self):
        url = 'inproc://a'
        msg = b'hi'
        with self.Context() as ctx:
            # Test connect() context manager
            with ctx.socket(zmq.PUSH) as a, ctx.socket(zmq.PULL) as b:
                a.bind(url)
                connect_context = b.connect(url)
                assert f'connect={url!r}' in repr(connect_context)
                with connect_context:
                    a.send(msg)
                    rcvd = self.recv(b)
                    assert rcvd == msg
                # b should now be disconnected, so sending and receiving don't work
                with pytest.raises(zmq.Again):
                    a.send(msg, flags=zmq.DONTWAIT)
                with pytest.raises(zmq.Again):
                    b.recv(flags=zmq.DONTWAIT)
                a.unbind(url)
            # Test bind() context manager
            with ctx.socket(zmq.PUSH) as a, ctx.socket(zmq.PULL) as b:
                # unbind() just stops accepting of new connections, so we have to disconnect to test that
                # unbind happened.
                bind_context = a.bind(url)
                assert f'bind={url!r}' in repr(bind_context)
                with bind_context:
                    b.connect(url)
                    a.send(msg)
                    rcvd = self.recv(b)
                    assert rcvd == msg
                    b.disconnect(url)
                b.connect(url)
                # Since a is unbound from url, b is not connected to anything
                with pytest.raises(zmq.Again):
                    a.send(msg, flags=zmq.DONTWAIT)
                with pytest.raises(zmq.Again):
                    b.recv(flags=zmq.DONTWAIT)

    def test_bind_random_context(self):
        with self.context.socket(zmq.PUSH) as push:
            with push.bind("tcp://127.0.0.1:0"):
                url = push.last_endpoint
                with self.context.socket(zmq.PULL) as pull:
                    pull.connect(url)
                    push.send(b"msg")
                    self.recv(pull)

    _repr_cls = "zmq.Socket"

    def test_repr(self):
        with self.context.socket(zmq.PUSH) as s:
            assert f'{self._repr_cls}(zmq.PUSH)' in repr(s)
            assert 'closed' not in repr(s)
        assert f'{self._repr_cls}(zmq.PUSH)' in repr(s)
        assert 'closed' in repr(s)

    def test_dir(self):
        ctx = self.Context()
        s = ctx.socket(zmq.PUB)
        assert 'send' in dir(s)
        assert 'IDENTITY' in dir(s)
        assert 'AFFINITY' in dir(s)
        assert 'FD' in dir(s)
        s.close()
        ctx.term()

    @mark.skipif(mock is None, reason="requires unittest.mock")
    def test_mockable(self):
        s = self.socket(zmq.SUB)
        m = mock.Mock(spec=s)
        s.close()

    def test_bind_unicode(self):
        s = self.socket(zmq.PUB)
        p = s.bind_to_random_port("tcp://*")

    def test_connect_unicode(self):
        s = self.socket(zmq.PUB)
        s.connect("tcp://127.0.0.1:5555")

    def test_bind_to_random_port(self):
        # Check that bind_to_random_port do not hide useful exception
        ctx = self.Context()
        s = ctx.socket(zmq.PUB)
        # Invalid format
        try:
            s.bind_to_random_port('tcp:*')
        except zmq.ZMQError as e:
            assert e.errno == zmq.EINVAL
        # Invalid protocol
        try:
            s.bind_to_random_port('rand://*')
        except zmq.ZMQError as e:
            assert e.errno == zmq.EPROTONOSUPPORT

        s.close()
        ctx.term()

    def test_bind_connect_addr_error(self):
        with self.socket(zmq.PUSH) as s:
            url = "tcp://1.2.3.4.5:1234567"
            with pytest.raises(zmq.ZMQError) as exc:
                s.bind(url)
            assert url in str(exc.value)

            url = "noproc://no/such/file"
            with pytest.raises(zmq.ZMQError) as exc:
                s.connect(url)
            assert url in str(exc.value)

    def test_identity(self):
        s = self.context.socket(zmq.PULL)
        self.sockets.append(s)
        ident = b'identity\0\0'
        s.identity = ident
        assert s.get(zmq.IDENTITY) == ident

    def test_unicode_sockopts(self):
        """test setting/getting sockopts with unicode strings"""
        topic = "tést"
        p, s = self.create_bound_pair(zmq.PUB, zmq.SUB)
        assert s.send_unicode == s.send_unicode
        assert p.recv_unicode == p.recv_unicode
        self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic)
        self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic)
        s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16')
        self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic)
        s.setsockopt_unicode(zmq.SUBSCRIBE, topic)
        self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY)
        self.assertRaisesErrno(zmq.EINVAL, s.getsockopt_unicode, zmq.SUBSCRIBE)

        identb = s.getsockopt(zmq.IDENTITY)
        identu = identb.decode('utf16')
        identu2 = s.getsockopt_unicode(zmq.IDENTITY, 'utf16')
        assert identu == identu2
        time.sleep(0.1)  # wait for connection/subscription
        p.send_unicode(topic, zmq.SNDMORE)
        p.send_unicode(topic * 2, encoding='latin-1')
        assert topic == s.recv_unicode()
        assert topic * 2 == s.recv_unicode(encoding='latin-1')

    def test_int_sockopts(self):
        "test integer sockopts"
        v = zmq.zmq_version_info()
        if v < (3, 0):
            default_hwm = 0
        else:
            default_hwm = 1000
        p, s = self.create_bound_pair(zmq.PUB, zmq.SUB)
        p.setsockopt(zmq.LINGER, 0)
        assert p.getsockopt(zmq.LINGER) == 0
        p.setsockopt(zmq.LINGER, -1)
        assert p.getsockopt(zmq.LINGER) == -1
        assert p.hwm == default_hwm
        p.hwm = 11
        assert p.hwm == 11
        # p.setsockopt(zmq.EVENTS, zmq.POLLIN)
        assert p.getsockopt(zmq.EVENTS) == zmq.POLLOUT
        self.assertRaisesErrno(zmq.EINVAL, p.setsockopt, zmq.EVENTS, 2**7 - 1)
        assert p.getsockopt(zmq.TYPE) == p.socket_type
        assert p.getsockopt(zmq.TYPE) == zmq.PUB
        assert s.getsockopt(zmq.TYPE) == s.socket_type
        assert s.getsockopt(zmq.TYPE) == zmq.SUB

        # check for overflow / wrong type:
        errors = []
        backref = {}
        constants = zmq.constants
        for name in constants.__all__:
            value = getattr(constants, name)
            if isinstance(value, int):
                backref[value] = name
        for opt in zmq.constants.SocketOption:
            if opt._opt_type not in {
                zmq.constants._OptType.int,
                zmq.constants._OptType.int64,
            }:
                continue
            if opt.name.startswith(
                (
                    'HWM',
                    'ROUTER',
                    'XPUB',
                    'TCP',
                    'FAIL',
                    'REQ_',
                    'CURVE_',
                    'PROBE_ROUTER',
                    'IPC_FILTER',
                    'GSSAPI',
                    'STREAM_',
                    'VMCI_BUFFER_SIZE',
                    'VMCI_BUFFER_MIN_SIZE',
                    'VMCI_BUFFER_MAX_SIZE',
                    'VMCI_CONNECT_TIMEOUT',
                    'BLOCKY',
                    'IN_BATCH_SIZE',
                    'OUT_BATCH_SIZE',
                    'WSS_TRUST_SYSTEM',
                    'ONLY_FIRST_SUBSCRIBE',
                    'PRIORITY',
                    'RECONNECT_STOP',
                    'NORM_',
                    'ROUTER_',
                    'BUSY_POLL',
                    'XSUB_VERBOSE_',
                    'TOPICS_',
                )
            ):
                # some sockopts are write-only
                continue
            try:
                n = p.getsockopt(opt)
            except zmq.ZMQError as e:
                errors.append(f"getsockopt({opt!r}) raised {e}.")
            else:
                if n > 2**31:
                    errors.append(
                        f"getsockopt({opt!r}) returned a ridiculous value."
                        " It is probably the wrong type."
                    )
        if errors:
            self.fail('\n'.join([''] + errors))

    def test_bad_sockopts(self):
        """Test that appropriate errors are raised on bad socket options"""
        s = self.context.socket(zmq.PUB)
        self.sockets.append(s)
        s.setsockopt(zmq.LINGER, 0)
        # unrecognized int sockopts pass through to libzmq, and should raise EINVAL
        self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, 9999, 5)
        self.assertRaisesErrno(zmq.EINVAL, s.getsockopt, 9999)
        # but only int sockopts are allowed through this way, otherwise raise a TypeError
        self.assertRaises(TypeError, s.setsockopt, 9999, b"5")
        # some sockopts are valid in general, but not on every socket:
        self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, zmq.SUBSCRIBE, b'hi')

    def test_sockopt_roundtrip(self):
        "test set/getsockopt roundtrip."
        p = self.context.socket(zmq.PUB)
        self.sockets.append(p)
        p.setsockopt(zmq.LINGER, 11)
        assert p.getsockopt(zmq.LINGER) == 11

    def test_send_unicode(self):
        "test sending unicode objects"
        a, b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        self.sockets.extend([a, b])
        u = "çπ§"
        self.assertRaises(TypeError, a.send, u, copy=False)
        self.assertRaises(TypeError, a.send, u, copy=True)
        a.send_unicode(u)
        s = b.recv()
        assert s == u.encode('utf8')
        assert s.decode('utf8') == u
        a.send_unicode(u, encoding='utf16')
        s = b.recv_unicode(encoding='utf16')
        assert s == u

    def test_send_multipart_check_type(self):
        "check type on all frames in send_multipart"
        a, b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
        self.sockets.extend([a, b])
        self.assertRaises(TypeError, a.send_multipart, [b'a', 5])
        a.send_multipart([b'b'])
        rcvd = self.recv_multipart(b)
        assert rcvd == [b'b']

    @skip_pypy
    def test_tracker(self):
        "test the MessageTracker object for tracking when zmq is done with a buffer"
        addr = 'tcp://127.0.0.1'
        # get a port:
        sock = socket.socket()
        sock.bind(('127.0.0.1', 0))
        port = sock.getsockname()[1]
        iface = f"{addr}:{port}"
        sock.close()
        time.sleep(0.1)

        a = self.context.socket(zmq.PUSH)
        b = self.context.socket(zmq.PULL)
        self.sockets.extend([a, b])
        a.connect(iface)
        time.sleep(0.1)
        p1 = a.send(b'something', copy=False, track=True)
        assert isinstance(p1, zmq.MessageTracker)
        assert p1 is zmq._FINISHED_TRACKER
        # small message, should start done
        assert p1.done

        # disable zero-copy threshold
        a.copy_threshold = 0

        p2 = a.send_multipart([b'something', b'else'], copy=False, track=True)
        assert isinstance(p2, zmq.MessageTracker)
        assert not p2.done

        b.bind(iface)
        msg = self.recv_multipart(b)
        for i in range(10):
            if p1.done:
                break
            time.sleep(0.1)
        assert p1.done is True
        assert msg == [b'something']
        msg = self.recv_multipart(b)
        for i in range(10):
            if p2.done:
                break
            time.sleep(0.1)
        assert p2.done is True
        assert msg == [b'something', b'else']
        m = zmq.Frame(b"again", copy=False, track=True)
        assert m.tracker.done is False
        p1 = a.send(m, copy=False)
        p2 = a.send(m, copy=False)
        assert m.tracker.done is False
        assert p1.done is False
        assert p2.done is False
        msg = self.recv_multipart(b)
        assert m.tracker.done is False
        assert msg == [b'again']
        msg = self.recv_multipart(b)
        assert m.tracker.done is False
        assert msg == [b'again']
        assert p1.done is False
        assert p2.done is False
        m.tracker
        del m
        for i in range(10):
            if p1.done:
                break
            time.sleep(0.1)
        assert p1.done is True
        assert p2.done is True
        m = zmq.Frame(b'something', track=False)
        self.assertRaises(ValueError, a.send, m, copy=False, track=True)

    def test_close(self):
        ctx = self.Context()
        s = ctx.socket(zmq.PUB)
        s.close()
        self.assertRaisesErrno(zmq.ENOTSOCK, s.bind, b'')
        self.assertRaisesErrno(zmq.ENOTSOCK, s.connect, b'')
        self.assertRaisesErrno(zmq.ENOTSOCK, s.setsockopt, zmq.SUBSCRIBE, b'')
        self.assertRaisesErrno(zmq.ENOTSOCK, s.send, b'asdf')
        self.assertRaisesErrno(zmq.ENOTSOCK, s.recv)
        ctx.term()

    def test_attr(self):
        """set setting/getting sockopts as attributes"""
        s = self.context.socket(zmq.DEALER)
        self.sockets.append(s)
        linger = 10
        s.linger = linger
        assert linger == s.linger
        assert linger == s.getsockopt(zmq.LINGER)
        assert s.fd == s.getsockopt(zmq.FD)

    def test_bad_attr(self):
        s = self.context.socket(zmq.DEALER)
        self.sockets.append(s)
        try:
            s.apple = 'foo'
        except AttributeError:
            pass
        else:
            self.fail("bad setattr should have raised AttributeError")
        try:
            s.apple
        except AttributeError:
            pass
        else:
            self.fail("bad getattr should have raised AttributeError")

    def test_subclass(self):
        """subclasses can assign attributes"""

        class S(zmq.Socket):
            a = None

            def __init__(self, *a, **kw):
                self.a = -1
                super().__init__(*a, **kw)

        s = S(self.context, zmq.REP)
        self.sockets.append(s)
        assert s.a == -1
        s.a = 1
        assert s.a == 1
        a = s.a
        assert a == 1

    def test_recv_multipart(self):
        a, b = self.create_bound_pair()
        msg = b'hi'
        for i in range(3):
            a.send(msg)
        time.sleep(0.1)
        for i in range(3):
            assert self.recv_multipart(b) == [msg]

    def test_recv_into(self):
        a, b = self.create_bound_pair()
        if not self.green:
            b.rcvtimeo = 1000
        msg = [
            b'hello',
            b'there world',
            b'part 3',
            b'rest',
        ]
        a.send_multipart(msg)

        # default nbytes: fits in array
        # make sure itemsize > 1 is handled right
        buf = array('Q', [0])
        nbytes = b.recv_into(buf)
        assert nbytes == len(msg[0])
        assert buf.tobytes()[:nbytes] == msg[0]

        # default nbytes: truncates to sizeof(buf)
        buf = bytearray(4)
        nbytes = b.recv_into(buf)
        # returned nbytes is the actual received length,
        # which indicates truncation
        assert nbytes == len(msg[1])
        assert buf[:] == msg[1][: len(buf)]

        # specify nbytes, truncates
        buf = bytearray(10)
        nbytes = 4
        nbytes_recvd = b.recv_into(buf, nbytes=nbytes)
        assert nbytes_recvd == len(msg[2])
        assert buf[:nbytes] == msg[2][:nbytes]
        # didn't recv excess bytes
        assert buf[nbytes:] == bytearray(10 - nbytes)

        # recv_into empty buffer discards everything
        buf = bytearray(10)
        view = memoryview(buf)[:0]
        assert view.nbytes == 0
        nbytes = b.recv_into(view)
        assert nbytes == len(msg[3])
        assert buf == bytearray(10)

    def test_recv_into_bad(self):
        a, b = self.create_bound_pair()
        if not self.green:
            b.rcvtimeo = 1000

        # bad calls

        # negative nbytes
        buf = bytearray(10)
        with pytest.raises(ValueError):
            b.recv_into(buf, nbytes=-1)
        # not contiguous
        buf = memoryview(bytearray(10))[::2]
        with pytest.raises(BufferError):
            b.recv_into(buf)
        # readonly
        buf = memoryview(b"readonly")
        with pytest.raises(BufferError):
            b.recv_into(buf)
        # too big
        buf = bytearray(10)
        with pytest.raises(ValueError):
            b.recv_into(buf, nbytes=11)
        # not memory-viewable
        with pytest.raises(TypeError):
            b.recv_into(pytest)

        # make sure flags work
        with pytest.raises(zmq.Again):
            b.recv_into(bytearray(5), flags=zmq.DONTWAIT)

    def test_close_after_destroy(self):
        """s.close() after ctx.destroy() should be fine"""
        ctx = self.Context()
        s = ctx.socket(zmq.REP)
        ctx.destroy()
        # reaper is not instantaneous
        time.sleep(1e-2)
        s.close()
        assert s.closed

    def test_poll(self):
        a, b = self.create_bound_pair()
        time.time()
        evt = a.poll(POLL_TIMEOUT)
        assert evt == 0
        evt = a.poll(POLL_TIMEOUT, zmq.POLLOUT)
        assert evt == zmq.POLLOUT
        msg = b'hi'
        a.send(msg)
        evt = b.poll(POLL_TIMEOUT)
        assert evt == zmq.POLLIN
        msg2 = self.recv(b)
        evt = b.poll(POLL_TIMEOUT)
        assert evt == 0
        assert msg2 == msg

    def test_ipc_path_max_length(self):
        """IPC_PATH_MAX_LEN is a sensible value"""
        if zmq.IPC_PATH_MAX_LEN == 0:
            raise SkipTest("IPC_PATH_MAX_LEN undefined")

        msg = f"Surprising value for IPC_PATH_MAX_LEN: {zmq.IPC_PATH_MAX_LEN}"
        assert zmq.IPC_PATH_MAX_LEN > 30, msg
        assert zmq.IPC_PATH_MAX_LEN < 1025, msg

    def test_ipc_path_max_length_msg(self):
        if zmq.IPC_PATH_MAX_LEN == 0:
            raise SkipTest("IPC_PATH_MAX_LEN undefined")

        s = self.context.socket(zmq.PUB)
        self.sockets.append(s)
        try:
            s.bind('ipc://{}'.format('a' * (zmq.IPC_PATH_MAX_LEN + 1)))
        except zmq.ZMQError as e:
            assert str(zmq.IPC_PATH_MAX_LEN) in e.strerror

    @mark.skipif(windows, reason="ipc not supported on Windows.")
    def test_ipc_path_no_such_file_or_directory_message(self):
        """Display the ipc path in case of an ENOENT exception"""
        s = self.context.socket(zmq.PUB)
        self.sockets.append(s)
        invalid_path = '/foo/bar'
        with pytest.raises(zmq.ZMQError) as error:
            s.bind(f'ipc://{invalid_path}')
        assert error.value.errno == errno.ENOENT
        error_message = str(error.value)
        assert invalid_path in error_message
        assert "no such file or directory" in error_message.lower()

    def test_hwm(self):
        zmq3 = zmq.zmq_version_info()[0] >= 3
        for stype in (zmq.PUB, zmq.ROUTER, zmq.SUB, zmq.REQ, zmq.DEALER):
            s = self.context.socket(stype)
            s.hwm = 100
            assert s.hwm == 100
            if zmq3:
                try:
                    assert s.sndhwm == 100
                except AttributeError:
                    pass
                try:
                    assert s.rcvhwm == 100
                except AttributeError:
                    pass
            s.close()

    def test_copy(self):
        s = self.socket(zmq.PUB)
        scopy = copy.copy(s)
        sdcopy = copy.deepcopy(s)
        assert scopy._shadow
        assert sdcopy._shadow
        assert s.underlying == scopy.underlying
        assert s.underlying == sdcopy.underlying
        s.close()

    def test_send_buffer(self):
        a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
        for buffer_type in (memoryview, bytearray):
            rawbytes = str(buffer_type).encode('ascii')
            msg = buffer_type(rawbytes)
            a.send(msg)
            recvd = b.recv()
            assert recvd == rawbytes

    def test_shadow(self):
        p = self.socket(zmq.PUSH)
        p.bind("tcp://127.0.0.1:5555")
        p2 = zmq.Socket.shadow(p.underlying)
        assert p.underlying == p2.underlying
        s = self.socket(zmq.PULL)
        s2 = zmq.Socket.shadow(s)
        assert s2._shadow_obj is s
        assert s.underlying != p.underlying
        assert s2.underlying == s.underlying
        assert s2.context is s.context
        s3 = zmq.Socket(s)
        assert s3._shadow_obj is s
        assert s3.underlying == s.underlying
        s2.connect("tcp://127.0.0.1:5555")
        sent = b'hi'
        p2.send(sent)
        rcvd = self.recv(s2)
        assert rcvd == sent

    def test_shadow_pyczmq(self):
        try:
            from pyczmq import zctx, zsocket
        except Exception:
            raise SkipTest("Requires pyczmq")

        ctx = zctx.new()
        ca = zsocket.new(ctx, zmq.PUSH)
        cb = zsocket.new(ctx, zmq.PULL)
        a = zmq.Socket.shadow(ca)
        b = zmq.Socket.shadow(cb)
        a.bind("inproc://a")
        b.connect("inproc://a")
        a.send(b'hi')
        rcvd = self.recv(b)
        assert rcvd == b'hi'

    def test_subscribe_method(self):
        pub, sub = self.create_bound_pair(zmq.PUB, zmq.SUB)
        sub.subscribe('prefix')
        sub.subscribe = 'c'
        p = zmq.Poller()
        p.register(sub, zmq.POLLIN)
        # wait for subscription handshake
        for i in range(100):
            pub.send(b'canary')
            events = p.poll(250)
            if events:
                break
        self.recv(sub)
        pub.send(b'prefixmessage')
        msg = self.recv(sub)
        assert msg == b'prefixmessage'
        sub.unsubscribe('prefix')
        pub.send(b'prefixmessage')
        events = p.poll(1000)
        assert events == []

    # CI often can't handle how much memory PyPy uses on this test
    @mark.skipif(
        (pypy and on_ci) or (sys.maxsize < 2**32) or (windows),
        reason="only run on 64b and not on CI.",
    )
    @mark.large
    def test_large_send(self):
        c = os.urandom(1)
        N = 2**31 + 1
        try:
            buf = c * N
        except MemoryError as e:
            raise SkipTest(f"Not enough memory: {e}")
        a, b = self.create_bound_pair()
        try:
            a.send(buf, copy=False)
            rcvd = b.recv(copy=False)
        except MemoryError as e:
            raise SkipTest(f"Not enough memory: {e}")
        # sample the front and back of the received message
        # without checking the whole content
        byte = ord(c)
        view = memoryview(rcvd.buffer)
        assert len(view) == N
        assert view[0] == byte
        assert view[-1] == byte

    def test_custom_serialize(self):
        a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)

        def serialize(msg):
            frames = []
            frames.extend(msg.get('identities', []))
            content = json.dumps(msg['content']).encode('utf8')
            frames.append(content)
            return frames

        def deserialize(frames):
            identities = frames[:-1]
            content = json.loads(frames[-1].decode('utf8'))
            return {
                'identities': identities,
                'content': content,
            }

        msg = {
            'content': {
                'a': 5,
                'b': 'bee',
            }
        }
        a.send_serialized(msg, serialize)
        recvd = b.recv_serialized(deserialize)
        assert recvd['content'] == msg['content']
        assert recvd['identities']
        # bounce back, tests identities
        b.send_serialized(recvd, serialize)
        r2 = a.recv_serialized(deserialize)
        assert r2['content'] == msg['content']
        assert not r2['identities']


if have_gevent and not windows:
    import gevent

    class TestSocketGreen(GreenTest, TestSocket):
        test_bad_attr = GreenTest.skip_green
        test_close_after_destroy = GreenTest.skip_green
        _repr_cls = "zmq.green.Socket"

        def test_timeout(self):
            a, b = self.create_bound_pair()
            g = gevent.spawn_later(0.5, lambda: a.send(b'hi'))
            timeout = gevent.Timeout(0.1)
            timeout.start()
            self.assertRaises(gevent.Timeout, b.recv)
            g.kill()

        @mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO")
        def test_warn_set_timeo(self):
            s = self.context.socket(zmq.REQ)
            with warnings.catch_warnings(record=True) as w:
                s.rcvtimeo = 5
            s.close()
            assert len(w) == 1
            assert w[0].category == UserWarning

        @mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO")
        def test_warn_get_timeo(self):
            s = self.context.socket(zmq.REQ)
            with warnings.catch_warnings(record=True) as w:
                s.sndtimeo
            s.close()
            assert len(w) == 1
            assert w[0].category == UserWarning
pyzmq-26.4.0/tests/test_ssh.py000066400000000000000000000003531477374370200163520ustar00rootroot00000000000000from zmq.ssh.tunnel import select_random_ports


def test_random_ports():
    for i in range(4096):
        ports = select_random_ports(10)
        assert len(ports) == 10
        for p in ports:
            assert ports.count(p) == 1
pyzmq-26.4.0/tests/test_version.py000066400000000000000000000022701477374370200172420ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


from unittest import TestCase

import zmq
from zmq.sugar import version


class TestVersion(TestCase):
    def test_pyzmq_version(self):
        vs = zmq.pyzmq_version()
        vs2 = zmq.__version__
        assert isinstance(vs, str)
        if zmq.__revision__:
            assert vs == '@'.join(vs2, zmq.__revision__)
        else:
            assert vs == vs2
        if version.VERSION_EXTRA:
            assert version.VERSION_EXTRA in vs
            assert version.VERSION_EXTRA in vs2

    def test_pyzmq_version_info(self):
        info = zmq.pyzmq_version_info()
        assert isinstance(info, tuple)
        for n in info[:3]:
            assert isinstance(n, int)
        if version.VERSION_EXTRA:
            assert len(info) == 4
            assert info[-1] == float('inf')
        else:
            assert len(info) == 3

    def test_zmq_version_info(self):
        info = zmq.zmq_version_info()
        assert isinstance(info, tuple)
        for n in info[:3]:
            assert isinstance(n, int)

    def test_zmq_version(self):
        v = zmq.zmq_version()
        assert isinstance(v, str)
pyzmq-26.4.0/tests/test_win32_shim.py000066400000000000000000000032021477374370200175330ustar00rootroot00000000000000import sys
import time
from functools import wraps

from pytest import mark

from zmq.utils.win32 import allow_interrupt
from zmq_test_utils import BaseZMQTestCase


def count_calls(f):
    @wraps(f)
    def _(*args, **kwds):
        try:
            return f(*args, **kwds)
        finally:
            _.__calls__ += 1

    _.__calls__ = 0
    return _


@mark.new_console
class TestWindowsConsoleControlHandler(BaseZMQTestCase):
    @mark.new_console
    @mark.skipif(not sys.platform.startswith('win'), reason='Windows only test')
    def test_handler(self):
        @count_calls
        def interrupt_polling():
            print('Caught CTRL-C!')

        from ctypes import windll
        from ctypes.wintypes import BOOL, DWORD

        kernel32 = windll.LoadLibrary('kernel32')

        # 
        GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent
        GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD)
        GenerateConsoleCtrlEvent.restype = BOOL

        # Simulate CTRL-C event while handler is active.
        try:
            with allow_interrupt(interrupt_polling) as context:
                result = GenerateConsoleCtrlEvent(0, 0)
                # Sleep so that we give time to the handler to
                # capture the Ctrl-C event.
                time.sleep(0.5)
        except KeyboardInterrupt:
            pass
        else:
            if result == 0:
                raise OSError()
            else:
                self.fail('Expecting `KeyboardInterrupt` exception!')

        # Make sure our handler was called.
        assert interrupt_polling.__calls__ == 1
pyzmq-26.4.0/tests/test_z85.py000066400000000000000000000040741477374370200162070ustar00rootroot00000000000000"""Test Z85 encoding

confirm values and roundtrip with test values from the reference implementation.
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from unittest import TestCase

from zmq.utils import z85


class TestZ85(TestCase):
    def test_client_public(self):
        client_public = (
            b"\xbb\x88\x47\x1d\x65\xe2\x65\x9b"
            b"\x30\xc5\x5a\x53\x21\xce\xbb\x5a"
            b"\xab\x2b\x70\xa3\x98\x64\x5c\x26"
            b"\xdc\xa2\xb2\xfc\xb4\x3f\xc5\x18"
        )
        encoded = z85.encode(client_public)

        assert encoded == b"Yne@$w-vo}U?@Lns47E1%kR.o@n%FcmmsL/@{H8]yf7"
        decoded = z85.decode(encoded)
        assert decoded == server_public

    def test_server_secret(self):
        server_secret = (
            b"\x8e\x0b\xdd\x69\x76\x28\xb9\x1d"
            b"\x8f\x24\x55\x87\xee\x95\xc5\xb0"
            b"\x4d\x48\x96\x3f\x79\x25\x98\x77"
            b"\xb4\x9c\xd9\x06\x3a\xea\xd3\xb7"
        )
        encoded = z85.encode(server_secret)

        assert encoded == b"JTKVSB%%)wK0E.X)V>+}o?pNmC{O&4W4b!Ni{Lh6"
        decoded = z85.decode(encoded)
        assert decoded == server_secret
pyzmq-26.4.0/tests/test_zmqstream.py000066400000000000000000000101411477374370200175740ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


import asyncio
import logging
import warnings

import pytest

import zmq
import zmq.asyncio

try:
    import tornado

    from zmq.eventloop import zmqstream
except ImportError:
    tornado = None  # type: ignore


pytestmark = pytest.mark.usefixtures("io_loop")


@pytest.fixture
async def push_pull(socket):
    push = zmqstream.ZMQStream(socket(zmq.PUSH))
    pull = zmqstream.ZMQStream(socket(zmq.PULL))
    port = push.bind_to_random_port('tcp://127.0.0.1')
    pull.connect(f'tcp://127.0.0.1:{port}')
    return (push, pull)


@pytest.fixture
def push(push_pull):
    push, pull = push_pull
    return push


@pytest.fixture
def pull(push_pull):
    push, pull = push_pull
    return pull


async def test_callable_check(pull):
    """Ensure callable check works."""

    pull.on_send(lambda *args: None)
    pull.on_recv(lambda *args: None)
    with pytest.raises(AssertionError):
        pull.on_recv(1)
    with pytest.raises(AssertionError):
        pull.on_send(1)
    with pytest.raises(AssertionError):
        pull.on_recv(zmq)


async def test_on_recv_basic(push, pull):
    sent = [b'basic']
    push.send_multipart(sent)
    f = asyncio.Future()

    def callback(msg):
        f.set_result(msg)

    pull.on_recv(callback)
    recvd = await asyncio.wait_for(f, timeout=5)
    assert recvd == sent


async def test_on_recv_wake(push, pull):
    sent = [b'wake']

    f = asyncio.Future()
    pull.on_recv(f.set_result)
    await asyncio.sleep(0.5)
    push.send_multipart(sent)
    recvd = await asyncio.wait_for(f, timeout=5)
    assert recvd == sent


async def test_on_recv_async(push, pull):
    if tornado.version_info < (5,):
        pytest.skip()
    sent = [b'wake']

    f = asyncio.Future()

    async def callback(msg):
        await asyncio.sleep(0.1)
        f.set_result(msg)

    pull.on_recv(callback)
    await asyncio.sleep(0.5)
    push.send_multipart(sent)
    recvd = await asyncio.wait_for(f, timeout=5)
    assert recvd == sent


async def test_on_recv_async_error(push, pull, caplog):
    sent = [b'wake']

    f = asyncio.Future()

    async def callback(msg):
        f.set_result(msg)
        1 / 0

    pull.on_recv(callback)
    await asyncio.sleep(0.1)
    with caplog.at_level(logging.ERROR, logger=zmqstream.gen_log.name):
        push.send_multipart(sent)
        recvd = await asyncio.wait_for(f, timeout=5)
        assert recvd == sent
        # logging error takes a tick later
        await asyncio.sleep(0.5)

    messages = [
        x.message
        for x in caplog.get_records("call")
        if x.name == zmqstream.gen_log.name
    ]
    assert "Uncaught exception in ZMQStream callback" in "\n".join(messages)


async def test_shadow_socket(context):
    with context.socket(zmq.PUSH, socket_class=zmq.asyncio.Socket) as socket:
        with pytest.warns(RuntimeWarning):
            stream = zmqstream.ZMQStream(socket)
        assert type(stream.socket) is zmq.Socket
        assert stream.socket.underlying == socket.underlying
        stream.close()


async def test_shadow_socket_close(context, caplog):
    with context.socket(zmq.PUSH) as push, context.socket(zmq.PULL) as pull:
        push.linger = pull.linger = 0
        port = push.bind_to_random_port('tcp://127.0.0.1')
        pull.connect(f'tcp://127.0.0.1:{port}')
        shadow_pull = zmq.Socket.shadow(pull)
        stream = zmqstream.ZMQStream(shadow_pull)
        # send some messages
        for i in range(10):
            push.send_string(str(i))
        # make sure at least one message has been delivered
        pull.recv()
        # register callback
        # this should schedule event callback on the next tick
        stream.on_recv(print)
        # close the shadowed socket
        pull.close()
    # run the event loop, which should see some events on the shadow socket
    # but the socket has been closed!
    with warnings.catch_warnings(record=True) as records:
        await asyncio.sleep(0.2)
    warning_text = "\n".join(str(r.message) for r in records)
    assert "after closing socket" in warning_text
    assert "closed socket" in caplog.text
pyzmq-26.4.0/tests/zmq_test_utils.py000066400000000000000000000173641477374370200176160ustar00rootroot00000000000000# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.

import os
import platform
import signal
import sys
import time
import warnings
from functools import partial
from threading import Thread
from typing import List
from unittest import SkipTest, TestCase

from pytest import mark

import zmq
from zmq.utils import jsonapi

try:
    import gevent

    from zmq import green as gzmq

    have_gevent = True
except ImportError:
    have_gevent = False


CFFI = zmq.backend.Socket.__module__.startswith("zmq.backend.cffi.")
PYPY = platform.python_implementation() == 'PyPy' or CFFI

# -----------------------------------------------------------------------------
# skip decorators (directly from unittest)
# -----------------------------------------------------------------------------


def _id(x):
    return x


skip_pypy = mark.skipif(PYPY, reason="Doesn't work on CFFI backend")
skip_cpython_cffi = mark.skipif(
    platform.python_implementation() == 'CPython' and CFFI,
    reason="CFFI on CPython is unsupported",
)
require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4")

# -----------------------------------------------------------------------------
# Base test class
# -----------------------------------------------------------------------------


def term_context(ctx, timeout):
    """Terminate a context with a timeout"""
    t = Thread(target=ctx.term)
    t.daemon = True
    t.start()
    t.join(timeout=timeout)
    if t.is_alive():
        # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
        zmq.sugar.context.Context._instance = None
        raise RuntimeError(
            "context could not terminate, open sockets likely remain in test"
        )


class BaseZMQTestCase(TestCase):
    green = False
    teardown_timeout = 10
    test_timeout_seconds = int(os.environ.get("ZMQ_TEST_TIMEOUT") or 60)
    sockets: List[zmq.Socket]

    @property
    def _should_test_timeout(self):
        return hasattr(signal, 'SIGALRM') and self.test_timeout_seconds

    @property
    def Context(self):
        if self.green:
            return gzmq.Context
        else:
            return zmq.Context

    def socket(self, socket_type):
        s = self.context.socket(socket_type)
        self.sockets.append(s)
        return s

    def _alarm_timeout(self, timeout, *args):
        raise TimeoutError(f"Test did not complete in {timeout} seconds")

    def setUp(self):
        super().setUp()
        if self.green and not have_gevent:
            raise SkipTest("requires gevent")

        self.context = self.Context.instance()
        self.sockets = []
        if self._should_test_timeout:
            # use SIGALRM to avoid test hangs
            signal.signal(
                signal.SIGALRM, partial(self._alarm_timeout, self.test_timeout_seconds)
            )
            signal.alarm(self.test_timeout_seconds)

    def tearDown(self):
        if self._should_test_timeout:
            # cancel the timeout alarm, if there was one
            signal.alarm(0)
        contexts = {self.context}
        while self.sockets:
            sock = self.sockets.pop()
            contexts.add(sock.context)  # in case additional contexts are created
            sock.close(0)
        for ctx in contexts:
            try:
                term_context(ctx, self.teardown_timeout)
            except Exception:
                # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
                zmq.sugar.context.Context._instance = None
                raise

        super().tearDown()

    def create_bound_pair(
        self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'
    ):
        """Create a bound socket pair using a random port."""
        s1 = self.context.socket(type1)
        s1.setsockopt(zmq.LINGER, 0)
        port = s1.bind_to_random_port(interface)
        s2 = self.context.socket(type2)
        s2.setsockopt(zmq.LINGER, 0)
        s2.connect(f'{interface}:{port}')
        self.sockets.extend([s1, s2])
        s2.setsockopt(zmq.LINGER, 0)
        return s1, s2

    def ping_pong(self, s1, s2, msg):
        s1.send(msg)
        msg2 = s2.recv()
        s2.send(msg2)
        msg3 = s1.recv()
        return msg3

    def ping_pong_json(self, s1, s2, o):
        if jsonapi.jsonmod is None:
            raise SkipTest("No json library")
        s1.send_json(o)
        o2 = s2.recv_json()
        s2.send_json(o2)
        o3 = s1.recv_json()
        return o3

    def ping_pong_pyobj(self, s1, s2, o):
        s1.send_pyobj(o)
        o2 = s2.recv_pyobj()
        s2.send_pyobj(o2)
        o3 = s1.recv_pyobj()
        return o3

    def assertRaisesErrno(self, errno, func, *args, **kwargs):
        try:
            func(*args, **kwargs)
        except zmq.ZMQError as e:
            self.assertEqual(
                e.errno,
                errno,
                f"wrong error raised, expected '{zmq.ZMQError(errno)}' \
got '{zmq.ZMQError(e.errno)}'",
            )
        else:
            self.fail("Function did not raise any error")

    def _select_recv(self, multipart, socket, **kwargs):
        """call recv[_multipart] in a way that raises if there is nothing to receive"""
        if zmq.zmq_version_info() >= (3, 1, 0):
            # zmq 3.1 has a bug, where poll can return false positives,
            # so we wait a little bit just in case
            # See LIBZMQ-280 on JIRA
            time.sleep(0.1)

        r, w, x = zmq.select([socket], [], [], timeout=kwargs.pop('timeout', 5))
        assert len(r) > 0, "Should have received a message"
        kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)

        recv = socket.recv_multipart if multipart else socket.recv
        return recv(**kwargs)

    def recv(self, socket, **kwargs):
        """call recv in a way that raises if there is nothing to receive"""
        return self._select_recv(False, socket, **kwargs)

    def recv_multipart(self, socket, **kwargs):
        """call recv_multipart in a way that raises if there is nothing to receive"""
        return self._select_recv(True, socket, **kwargs)


class PollZMQTestCase(BaseZMQTestCase):
    pass


class GreenTest:
    """Mixin for making green versions of test classes"""

    green = True
    teardown_timeout = 10

    def assertRaisesErrno(self, errno, func, *args, **kwargs):
        if errno == zmq.EAGAIN:
            raise SkipTest("Skipping because we're green.")
        try:
            func(*args, **kwargs)
        except zmq.ZMQError:
            e = sys.exc_info()[1]
            self.assertEqual(
                e.errno,
                errno,
                f"wrong error raised, expected '{zmq.ZMQError(errno)}' \
got '{zmq.ZMQError(e.errno)}'",
            )
        else:
            self.fail("Function did not raise any error")

    def tearDown(self):
        if self._should_test_timeout:
            # cancel the timeout alarm, if there was one
            signal.alarm(0)
        contexts = {self.context}
        while self.sockets:
            sock = self.sockets.pop()
            contexts.add(sock.context)  # in case additional contexts are created
            sock.close()
        try:
            gevent.joinall(
                [gevent.spawn(ctx.term) for ctx in contexts],
                timeout=self.teardown_timeout,
                raise_error=True,
            )
        except gevent.Timeout:
            raise RuntimeError(
                "context could not terminate, open sockets likely remain in test"
            )

    def skip_green(self):
        raise SkipTest("Skipping because we are green")


def skip_green(f):
    def skipping_test(self, *args, **kwargs):
        if self.green:
            raise SkipTest("Skipping because we are green")
        else:
            return f(self, *args, **kwargs)

    return skipping_test
pyzmq-26.4.0/tools/000077500000000000000000000000001477374370200141415ustar00rootroot00000000000000pyzmq-26.4.0/tools/backend_imports.py000066400000000000000000000003001477374370200176500ustar00rootroot00000000000000import zmq.backend
import zmq.sugar

print("from .backend import (")
for name in sorted(set(zmq.backend.__all__).difference(zmq.sugar.__all__)):
    print(f"    {name} as {name},")
print(")")
pyzmq-26.4.0/tools/circle_wheels.py000066400000000000000000000054101477374370200173230ustar00rootroot00000000000000import os
import sys
import time
from pathlib import Path

import requests

s = requests.Session()
if os.getenv("CIRCLECI_TOKEN"):
    # get credentials
    # not _required_
    s.headers["Circle-Token"] = os.environ["CIRCLECI_TOKEN"]

slug = "gh/zeromq/pyzmq"


def get(url):
    """Make an API request"""
    print(f"Getting {url}")
    r = s.get(url)
    r.raise_for_status()
    return r.json()


def get_pipeline(sha):
    print(f"Getting pipeline for {sha}")
    pipelines = get(f"https://circleci.com/api/v2/project/{slug}/pipeline")
    for pipeline in pipelines["items"]:
        print(
            pipeline['number'], pipeline['vcs']['revision'], pipeline['vcs'].get('tag')
        )
        if pipeline['vcs']['revision'] == sha:
            return pipeline
    print(f"No pipeline found for {sha}")
    return None


def get_workflows(pipeline):
    print(f"Getting workflows for pipeline {pipeline['number']}")
    return get(f"https://circleci.com/api/v2/pipeline/{pipeline['id']}/workflow")[
        "items"
    ]


def get_jobs(workflow):
    print(f"Getting jobs for for workflow {workflow['name']}")
    return get(f"https://circleci.com/api/v2/workflow/{workflow['id']}/job")["items"]


def download_artifact(artifact):
    print(f"Downloading {artifact['path']}")
    p = Path(artifact['path'])
    p.parent.mkdir(exist_ok=True)
    with p.open("wb") as f:
        r = s.get(artifact["url"], stream=True)
        for chunk in r.iter_content(65536):
            f.write(chunk)


def download_artifacts(job):
    print(f"Downloading artifacts for {job['job_number']}")
    for artifact in get(
        f"https://circleci.com/api/v2/project/{slug}/{job['job_number']}/artifacts"
    )["items"]:
        download_artifact(artifact)


def main():
    # circleci tracks the PR head,
    # but github only reports the PR merge commit
    sha = os.getenv("PR_HEAD_SHA")
    if not sha:
        sha = os.environ["GITHUB_SHA"]

    for _ in range(10):
        pipeline = get_pipeline(sha)
        if pipeline is None:
            # wait and try again
            time.sleep(10)
        else:
            break
    workflows = get_workflows(pipeline)
    while not all(w["stopped_at"] for w in workflows):
        for w in workflows:
            print(
                f"Workflow {pipeline['number']}/{w['name']}: {w['status']} started at {w['started_at']}"
            )
        time.sleep(15)
        workflows = get_workflows(pipeline)

    for workflow in workflows:
        if workflow["status"] != "success":
            sys.exit(
                f"workflow {workflow['name']} did not succeed: {workflow['status']}"
            )

    jobs = []
    for workflow in workflows:
        jobs.extend(get_jobs(workflow))
    for job in jobs:
        download_artifacts(job)


if __name__ == "__main__":
    main()
pyzmq-26.4.0/tools/collect_cmake.py000066400000000000000000000032041477374370200172770ustar00rootroot00000000000000"""
collect cmake -LH output

for inclusion in docs
"""

import sys
from pathlib import Path
from subprocess import PIPE, run
from tempfile import TemporaryDirectory

here = Path(__file__).parent.absolute()
repo = here.parent
home = str(Path.home())


def summarize_cmake_output(text: str) -> str:
    """Summarize cmake -LH output

    Formats help strings nicer, excludes common
    """
    text = text.replace(sys.prefix, "$PREFIX")
    text = text.replace(home, "~")
    chunks = text.split("\n\n")
    new_chunks = []
    for chunk in chunks:
        if not chunk:
            continue
        lines = chunk.splitlines()
        doc_lines, assignment = lines[:-1], lines[-1]
        if assignment.startswith(("CMAKE_", "FETCHCONTENT_")):
            continue
        doc_lines = [
            "# " + doc_line.lstrip("/ ")
            for doc_line in doc_lines
            if not doc_line.startswith("--")
        ]
        new_chunks.append("\n".join(doc_lines + [assignment]))
    return "\n\n".join(new_chunks)


def summarize_cmake(path: Path) -> str:
    """Collect summarized cmake -LH output from a repo"""
    path = Path(path).absolute()
    with TemporaryDirectory() as td:
        p = run(
            ["cmake", "-LH", str(path)],
            text=True,
            stderr=sys.stderr,
            stdout=PIPE,
            check=False,
            cwd=td,
        )
    return summarize_cmake_output(p.stdout)


def main():
    if len(sys.argv) < 2:
        paths = [repo]
    else:
        paths = sys.argv[1:]
    for path in paths:
        print(path)
        print(summarize_cmake(path))
        print("\n\n")


if __name__ == "__main__":
    main()
pyzmq-26.4.0/tools/find_vcredist.py000066400000000000000000000031671477374370200173450ustar00rootroot00000000000000"""
Locate the latest MSVC redist dir

and add it to $GITHUB_PATH so delvewheel can find the DLLs

finds 'C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Redist/MSVC/14.38.33135/arm64/Microsoft.VC143.CRT'
as of writing (2024-02-27)
"""

import os
import sys
from pathlib import Path


def log(msg):
    """Log a message to stderr"""
    print(msg, file=sys.stderr)


vs_version = "2022"
arch = os.environ.get("CIBW_ARCHS", "arm64")
vc_redist_path = (
    Path("C:/Program Files/Microsoft Visual Studio")
    / vs_version
    / "Enterprise/VC/Redist/MSVC"
)

log("Found VC redist versions:")
for v in vc_redist_path.glob("*"):
    log(v)


def _sort_key(dll_path):
    # redist paths look like
    # C:/.../MSVC/14.38.33135/
    # sort by the version number in the directory below MSVC
    version_dir = dll_path.relative_to(vc_redist_path).parents[-2]
    version_str = version_dir.name
    try:
        return tuple(int(part) for part in version_str.split("."))
    except ValueError:
        log(f"Not an apparent version: {version_str}")
        return (0, 0, 0, version_str)


log(f"Found msvcp for {arch}:")
# looking for .../MSVC/x.y.z/arm64/Microsoft.VC143.CRT/msvcp140.dll
# specifically *, not ** because we don't want onecore/arm64/...
found_arm_msvcp = sorted(
    vc_redist_path.glob(f"*/{arch}/**/msvcp140.dll"), key=_sort_key
)

for dll in found_arm_msvcp:
    log(dll)

selected_path = found_arm_msvcp[-1].parent
log(f"Selecting {selected_path}")

if os.environ.get("GITHUB_PATH"):
    log(f"Adding {selected_path} to $GITHUB_PATH")
    with open(os.environ["GITHUB_PATH"], "a") as f:
        f.write(str(selected_path) + "\n")
pyzmq-26.4.0/tools/install_libzmq.sh000066400000000000000000000047651477374370200175350ustar00rootroot00000000000000#!/usr/bin/env bash
# script to install libzmq/libsodium for use in wheels
set -ex
LIBSODIUM_VERSION=$(python buildutils/bundle.py libsodium)
LIBZMQ_VERSION=$(python buildutils/bundle.py)
PYZMQ_DIR="$PWD"
LICENSE_DIR="$PYZMQ_DIR/licenses"
test -d "$LICENSE_DIR" || mkdir "$LICENSE_DIR"
SHLIB_EXT=".so"
if [[ "$(uname)" == "Darwin" ]]; then
    SHLIB_EXT=".dylib"
    # make sure deployment target is set
    echo "${MACOSX_DEPLOYMENT_TARGET=}"
    test ! -z "${MACOSX_DEPLOYMENT_TARGET}"
    # need LT_MULTI_MODULE or libtool will strip out
    # all multi-arch symbols at the last step
    export LT_MULTI_MODULE=1
    ARCHS="x86_64"
    case "${CIBW_ARCHS_MACOS:-${CIBW_ARCHS:-auto}}" in
        "universal2")
            ARCHS="x86_64 arm64"
            ;;
        "arm64")
            ARCHS="arm64"
            ;;
        "x86_64")
            ARCHS="x86_64"
            ;;
        "auto")
            ;;
        *)
            echo "Unexpected arch: ${CIBW_ARCHS_MACOS}"
            exit 1
            ;;
    esac
    echo "building libzmq for mac ${ARCHS}"
    export CXX="${CC:-clang++}"
    for arch in ${ARCHS}; do
        # seem to need ARCH in CXX for libtool
        export CXX="${CXX} -arch ${arch}"
        export CFLAGS="-arch ${arch} ${CFLAGS:-}"
        export CXXFLAGS="-arch ${arch} ${CXXFLAGS:-}"
        export LDFLAGS="-arch ${arch} ${LDFLAGS:-}"
    done
fi

PREFIX="${ZMQ_PREFIX:-/usr/local}"

if [ -f "$PREFIX/lib/libzmq${SHLIB_EXT}" ]; then
  echo "using $PREFIX/lib/libzmq${SHLIB_EXT}"
  exit 0
fi

# add rpath so auditwheel patches it
export LDFLAGS="${LDFLAGS} -Wl,-rpath,$PREFIX/lib"

curl -L -O "https://github.com/jedisct1/libsodium/releases/download/${LIBSODIUM_VERSION}-RELEASE/libsodium-${LIBSODIUM_VERSION}.tar.gz"

curl -L -O "https://github.com/zeromq/libzmq/releases/download/v${LIBZMQ_VERSION}/zeromq-${LIBZMQ_VERSION}.tar.gz"

tar -xzf libsodium-${LIBSODIUM_VERSION}.tar.gz
cd libsodium-*/
cp LICENSE "${LICENSE_DIR}/LICENSE.libsodium.txt"
./configure --prefix="$PREFIX"
make -j4
make install

cd ..

which ldconfig && ldconfig || true

# make sure to find our libsodium
export PKG_CONFIG_PATH=$PREFIX/lib/pkgconfig

tar -xzf zeromq-${LIBZMQ_VERSION}.tar.gz
cd zeromq-${LIBZMQ_VERSION}
cp LICENSE "${LICENSE_DIR}/LICENSE.zeromq.txt"
# avoid error on warning
export CXXFLAGS="-Wno-error ${CXXFLAGS:-}"

./configure --prefix="$PREFIX" --disable-perf --without-docs --enable-curve --with-libsodium --disable-drafts --disable-libsodium_randombytes_close
make -j4
make install

which ldconfig && ldconfig || true
pyzmq-26.4.0/tools/showvcvars.py000066400000000000000000000003671477374370200167260ustar00rootroot00000000000000#!/usr/bin/env python3

import pprint

from setuptools import msvc
from setuptools._distutils.util import get_platform

plat = get_platform()
print(f"platform: {plat}")

vcvars = msvc.msvc14_get_vc_env(plat)
print("vcvars:")
pprint.pprint(vcvars)
pyzmq-26.4.0/tools/test_sdist.py000066400000000000000000000027611477374370200167060ustar00rootroot00000000000000"""
Verify the contents of our sdists
"""

import tarfile
from fnmatch import fnmatch
from pathlib import Path
from subprocess import run

import pytest

repo = Path(__file__).parent.parent.resolve()


@pytest.fixture
def sdist():
    path = list(repo.glob("dist/*.tar.gz"))[0]
    return tarfile.open(path)


@pytest.fixture
def sdist_files(sdist):
    paths = set()
    for name in sdist.getnames():
        # 'pyzmq-25.1.0.dev0/zmq/utils/config.json'
        root, _, relative = name.partition("/")
        paths.add(relative)
    return paths


@pytest.fixture
def git_files():
    p = run(["git", "ls-files"], cwd=repo, capture_output=True, text=True)
    paths = set()
    for line in p.stdout.splitlines():
        paths.add(line)
    return paths


def test_git_files(sdist_files, git_files):
    missing_git_files = git_files.difference(sdist_files)
    assert missing_git_files == set()


@pytest.mark.parametrize(
    "path",
    [
        # generated files that should be in the dist
        "PKG-INFO",
    ],
)
def test_included(sdist_files, path):
    assert path in sdist_files


@pytest.mark.parametrize(
    "path",
    [
        ".git",
        "build",
        "dist",
        "**/*.dylib",
        "**/*.so",
        "**/*.a",
        "**/*.lib",
        "**/__pycache__",
        "bundled",
        "CMakeCache.txt",
        "CMakeFiles",
        "cmake_install.cmake",
    ],
)
def test_excluded(sdist_files, path):
    matches = [f for f in sdist_files if fnmatch(f, path)]
    assert not matches
pyzmq-26.4.0/tools/test_wheel.py000066400000000000000000000042601477374370200166600ustar00rootroot00000000000000"""Light tests to verify that the wheel works

Just import things
"""

import os
import platform
import sys
from fnmatch import fnmatch

import pytest

try:
    from importlib.metadata import distribution
except ImportError:
    from importlib_metadata import distribution


@pytest.mark.parametrize("feature", ["curve", "ipc"])
def test_has(feature):
    import zmq

    if feature == 'ipc' and sys.platform.startswith('win32'):
        # IPC support is broken in enough cases on Windows
        # that we can't ship wheels with it (for now)
        assert not zmq.has(feature)
    else:
        assert zmq.has(feature)


def test_simple_socket():
    import zmq

    ctx = zmq.Context()
    s = ctx.socket(zmq.PUSH)
    s.close()
    ctx.term()


@pytest.mark.skipif(
    sys.platform != "win32" or platform.python_implementation() != "CPython",
    reason="only on CPython + Windows",
)
def test_bundle_msvcp():
    import zmq

    zmq_dir = os.path.abspath(os.path.dirname(zmq.__file__))
    # pyzmq.libs is *next to* zmq itself
    pyzmq_lib_dir = os.path.join(zmq_dir, os.pardir, "pyzmq.libs")
    dlls = []
    if os.path.exists(pyzmq_lib_dir):
        dlls = sorted(
            name for name in os.listdir(pyzmq_lib_dir) if name.endswith(".dll")
        )
    print(dlls)
    # Is concrt140 needed? delvewheel doesn't detect it anymore
    # check for vcruntime?
    should_bundle = []
    shouldnt_bundle = ["msvcp140*.dll"]

    for pattern in shouldnt_bundle:
        matched = [dll for dll in dlls if fnmatch(dll, pattern)]
        assert not matched

    for pattern in should_bundle:
        matched = [dll for dll in dlls if fnmatch(dll, pattern)]
        assert matched


@pytest.mark.parametrize(
    "license_name",
    [
        "LICENSE.md",
        "LICENSE.zeromq.txt",
        "LICENSE.libsodium.txt",
    ],
)
def test_license_files(license_name):
    pyzmq = distribution("pyzmq")
    license_files = [f for f in pyzmq.files if "licenses" in str(f)]
    license_file_names = [f.name for f in license_files]
    assert license_name in license_file_names
    for license_file in license_files:
        if license_file.name == license_name:
            break
    assert license_file.locate().exists()
pyzmq-26.4.0/tools/wheel-requirements.txt000066400000000000000000000001011477374370200205170ustar00rootroot00000000000000cibuildwheel==2.23.*
delvewheel==1.10.*; sys_platform == 'win32'
pyzmq-26.4.0/tools/wheel_summary.py000066400000000000000000000016221477374370200173750ustar00rootroot00000000000000"""Print a markdown table of sdist/wheel outputs

for use in github job summary
"""

from pathlib import Path


def make_summary(dist_dir: str | Path) -> str:
    """Render a list of files as a markdown table

    For use summarizing wheel outputs
    """

    dist_dir = Path(dist_dir)
    all_dists = sorted(dist_dir.glob("*"))
    lines = [
        f"### {len(all_dists)} files",
        "",
        "| filename | size |",
        "|----------|------|",
    ]
    for path in all_dists:
        size = path.stat().st_size
        if size < 1e6:
            size_s = f"{size / 1e3:.0f} kB"
        else:
            size_s = f"{size / 1e6:.1f} MB"
        lines.append(f"| {path.name} | {size_s} |")
    return "\n".join(lines)


if __name__ == "__main__":
    import sys

    if len(sys.argv) > 1:
        dist_dir = Path(sys.argv[1])
    else:
        dist_dir = Path("dist")
    print(make_summary(dist_dir))
pyzmq-26.4.0/zmq/000077500000000000000000000000001477374370200136105ustar00rootroot00000000000000pyzmq-26.4.0/zmq/__init__.pxd000066400000000000000000000000771477374370200160700ustar00rootroot00000000000000from zmq.backend.cython cimport Context, Frame, Socket, libzmq
pyzmq-26.4.0/zmq/__init__.py000066400000000000000000000042701477374370200157240ustar00rootroot00000000000000"""Python bindings for 0MQ"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

import os
import sys
from contextlib import contextmanager


@contextmanager
def _libs_on_path():
    """context manager for libs directory on $PATH

    Works around mysterious issue where os.add_dll_directory
    does not resolve imports (conda-forge Python >= 3.8)
    """

    if not sys.platform.startswith("win"):
        yield
        return

    libs_dir = os.path.abspath(
        os.path.join(
            os.path.dirname(__file__),
            os.pardir,
            "pyzmq.libs",
        )
    )
    if not os.path.exists(libs_dir):
        # no bundled libs
        yield
        return

    path_before = os.environ.get("PATH")
    try:
        os.environ["PATH"] = os.pathsep.join([path_before or "", libs_dir])
        yield
    finally:
        if path_before is None:
            os.environ.pop("PATH")
        else:
            os.environ["PATH"] = path_before


# zmq top-level imports

# workaround for Windows
with _libs_on_path():
    from zmq import backend

from . import constants  # noqa
from .constants import *  # noqa
from zmq.backend import *  # noqa
from zmq import sugar
from zmq.sugar import *  # noqa


def get_includes():
    """Return a list of directories to include for linking against pyzmq with cython."""
    from os.path import abspath, dirname, exists, join, pardir

    base = dirname(__file__)
    parent = abspath(join(base, pardir))
    includes = [parent] + [join(parent, base, subdir) for subdir in ('utils',)]
    if exists(join(parent, base, 'include')):
        includes.append(join(parent, base, 'include'))
    return includes


def get_library_dirs():
    """Return a list of directories used to link against pyzmq's bundled libzmq."""
    from os.path import abspath, dirname, join, pardir

    base = dirname(__file__)
    parent = abspath(join(base, pardir))
    return [join(parent, base)]


COPY_THRESHOLD = 65536
DRAFT_API = backend.has("draft")

__all__ = (
    [
        'get_includes',
        'COPY_THRESHOLD',
        'DRAFT_API',
    ]
    + constants.__all__
    + sugar.__all__
    + backend.__all__
)
pyzmq-26.4.0/zmq/__init__.pyi000066400000000000000000000016321477374370200160740ustar00rootroot00000000000000from typing import List

from . import backend, sugar

COPY_THRESHOLD: int
DRAFT_API: bool
__version__: str

# mypy doesn't like overwriting symbols with * so be explicit
# about what comes from backend, not from sugar
# see tools/backend_imports.py to generate this list
# note: `x as x` is required for re-export
# see https://github.com/python/mypy/issues/2190
from .backend import IPC_PATH_MAX_LEN as IPC_PATH_MAX_LEN
from .backend import curve_keypair as curve_keypair
from .backend import curve_public as curve_public
from .backend import has as has
from .backend import proxy as proxy
from .backend import proxy_steerable as proxy_steerable
from .backend import strerror as strerror
from .backend import zmq_errno as zmq_errno
from .backend import zmq_poll as zmq_poll
from .constants import *
from .error import *
from .sugar import *

def get_includes() -> list[str]: ...
def get_library_dirs() -> list[str]: ...
pyzmq-26.4.0/zmq/_future.py000066400000000000000000000574241477374370200156470ustar00rootroot00000000000000"""Future-returning APIs for coroutines."""

# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import warnings
from asyncio import Future
from collections import deque
from functools import partial
from itertools import chain
from typing import (
    Any,
    Awaitable,
    Callable,
    NamedTuple,
    TypeVar,
    cast,
)

import zmq as _zmq
from zmq import EVENTS, POLLIN, POLLOUT


class _FutureEvent(NamedTuple):
    future: Future
    kind: str
    args: tuple
    kwargs: dict
    msg: Any
    timer: Any


# These are incomplete classes and need a Mixin for compatibility with an eventloop
# defining the following attributes:
#
# _Future
# _READ
# _WRITE
# _default_loop()


class _Async:
    """Mixin for common async logic"""

    _current_loop: Any = None
    _Future: type[Future]

    def _get_loop(self) -> Any:
        """Get event loop

        Notice if event loop has changed,
        and register init_io_state on activation of a new event loop
        """
        if self._current_loop is None:
            self._current_loop = self._default_loop()
            self._init_io_state(self._current_loop)
            return self._current_loop
        current_loop = self._default_loop()
        if current_loop is not self._current_loop:
            # warn? This means a socket is being used in multiple loops!
            self._current_loop = current_loop
            self._init_io_state(current_loop)
        return current_loop

    def _default_loop(self) -> Any:
        raise NotImplementedError("Must be implemented in a subclass")

    def _init_io_state(self, loop=None) -> None:
        pass


class _AsyncPoller(_Async, _zmq.Poller):
    """Poller that returns a Future on poll, instead of blocking."""

    _socket_class: type[_AsyncSocket]
    _READ: int
    _WRITE: int
    raw_sockets: list[Any]

    def _watch_raw_socket(self, loop: Any, socket: Any, evt: int, f: Callable) -> None:
        """Schedule callback for a raw socket"""
        raise NotImplementedError()

    def _unwatch_raw_sockets(self, loop: Any, *sockets: Any) -> None:
        """Unschedule callback for a raw socket"""
        raise NotImplementedError()

    def poll(self, timeout=-1) -> Awaitable[list[tuple[Any, int]]]:  # type: ignore
        """Return a Future for a poll event"""
        future = self._Future()
        if timeout == 0:
            try:
                result = super().poll(0)
            except Exception as e:
                future.set_exception(e)
            else:
                future.set_result(result)
            return future

        loop = self._get_loop()

        # register Future to be called as soon as any event is available on any socket
        watcher = self._Future()

        # watch raw sockets:
        raw_sockets: list[Any] = []

        def wake_raw(*args):
            if not watcher.done():
                watcher.set_result(None)

        watcher.add_done_callback(
            lambda f: self._unwatch_raw_sockets(loop, *raw_sockets)
        )

        wrapped_sockets: list[_AsyncSocket] = []

        def _clear_wrapper_io(f):
            for s in wrapped_sockets:
                s._clear_io_state()

        for socket, mask in self.sockets:
            if isinstance(socket, _zmq.Socket):
                if not isinstance(socket, self._socket_class):
                    # it's a blocking zmq.Socket, wrap it in async
                    socket = self._socket_class.from_socket(socket)
                    wrapped_sockets.append(socket)
                if mask & _zmq.POLLIN:
                    socket._add_recv_event('poll', future=watcher)
                if mask & _zmq.POLLOUT:
                    socket._add_send_event('poll', future=watcher)
            else:
                raw_sockets.append(socket)
                evt = 0
                if mask & _zmq.POLLIN:
                    evt |= self._READ
                if mask & _zmq.POLLOUT:
                    evt |= self._WRITE
                self._watch_raw_socket(loop, socket, evt, wake_raw)

        def on_poll_ready(f):
            if future.done():
                return
            if watcher.cancelled():
                try:
                    future.cancel()
                except RuntimeError:
                    # RuntimeError may be called during teardown
                    pass
                return
            if watcher.exception():
                future.set_exception(watcher.exception())
            else:
                try:
                    result = super(_AsyncPoller, self).poll(0)
                except Exception as e:
                    future.set_exception(e)
                else:
                    future.set_result(result)

        watcher.add_done_callback(on_poll_ready)

        if wrapped_sockets:
            watcher.add_done_callback(_clear_wrapper_io)

        if timeout is not None and timeout > 0:
            # schedule cancel to fire on poll timeout, if any
            def trigger_timeout():
                if not watcher.done():
                    watcher.set_result(None)

            timeout_handle = loop.call_later(1e-3 * timeout, trigger_timeout)

            def cancel_timeout(f):
                if hasattr(timeout_handle, 'cancel'):
                    timeout_handle.cancel()
                else:
                    loop.remove_timeout(timeout_handle)

            future.add_done_callback(cancel_timeout)

        def cancel_watcher(f):
            if not watcher.done():
                watcher.cancel()

        future.add_done_callback(cancel_watcher)

        return future


class _NoTimer:
    @staticmethod
    def cancel():
        pass


T = TypeVar("T", bound="_AsyncSocket")


class _AsyncSocket(_Async, _zmq.Socket[Future]):
    # Warning : these class variables are only here to allow to call super().__setattr__.
    # They be overridden at instance initialization and not shared in the whole class
    _recv_futures = None
    _send_futures = None
    _state = 0
    _shadow_sock: _zmq.Socket
    _poller_class = _AsyncPoller
    _fd = None

    def __init__(
        self,
        context=None,
        socket_type=-1,
        io_loop=None,
        _from_socket: _zmq.Socket | None = None,
        **kwargs,
    ) -> None:
        if isinstance(context, _zmq.Socket):
            context, _from_socket = (None, context)
        if _from_socket is not None:
            super().__init__(shadow=_from_socket.underlying)  # type: ignore
            self._shadow_sock = _from_socket
        else:
            super().__init__(context, socket_type, **kwargs)  # type: ignore
            self._shadow_sock = _zmq.Socket.shadow(self.underlying)

        if io_loop is not None:
            warnings.warn(
                f"{self.__class__.__name__}(io_loop) argument is deprecated in pyzmq 22.2."
                " The currently active loop will always be used.",
                DeprecationWarning,
                stacklevel=3,
            )
        self._recv_futures = deque()
        self._send_futures = deque()
        self._state = 0
        self._fd = self._shadow_sock.FD

    @classmethod
    def from_socket(cls: type[T], socket: _zmq.Socket, io_loop: Any = None) -> T:
        """Create an async socket from an existing Socket"""
        return cls(_from_socket=socket, io_loop=io_loop)

    def close(self, linger: int | None = None) -> None:
        if not self.closed and self._fd is not None:
            event_list: list[_FutureEvent] = list(
                chain(self._recv_futures or [], self._send_futures or [])
            )
            for event in event_list:
                if not event.future.done():
                    try:
                        event.future.cancel()
                    except RuntimeError:
                        # RuntimeError may be called during teardown
                        pass
            self._clear_io_state()
        super().close(linger=linger)

    close.__doc__ = _zmq.Socket.close.__doc__

    def get(self, key):
        result = super().get(key)
        if key == EVENTS:
            self._schedule_remaining_events(result)
        return result

    get.__doc__ = _zmq.Socket.get.__doc__

    def recv_multipart(
        self, flags: int = 0, copy: bool = True, track: bool = False
    ) -> Awaitable[list[bytes] | list[_zmq.Frame]]:
        """Receive a complete multipart zmq message.

        Returns a Future whose result will be a multipart message.
        """
        return self._add_recv_event(
            'recv_multipart', kwargs=dict(flags=flags, copy=copy, track=track)
        )

    def recv(  # type: ignore
        self, flags: int = 0, copy: bool = True, track: bool = False
    ) -> Awaitable[bytes | _zmq.Frame]:
        """Receive a single zmq frame.

        Returns a Future, whose result will be the received frame.

        Recommend using recv_multipart instead.
        """
        return self._add_recv_event(
            'recv', kwargs=dict(flags=flags, copy=copy, track=track)
        )

    def recv_into(  # type: ignore
        self, buf, /, *, nbytes: int = 0, flags: int = 0
    ) -> Awaitable[int]:
        """Receive a single zmq frame into a pre-allocated buffer.

        Returns a Future, whose result will be the number of bytes received.
        """
        return self._add_recv_event(
            'recv_into', args=(buf,), kwargs=dict(nbytes=nbytes, flags=flags)
        )

    def send_multipart(  # type: ignore
        self, msg_parts: Any, flags: int = 0, copy: bool = True, track=False, **kwargs
    ) -> Awaitable[_zmq.MessageTracker | None]:
        """Send a complete multipart zmq message.

        Returns a Future that resolves when sending is complete.
        """
        kwargs['flags'] = flags
        kwargs['copy'] = copy
        kwargs['track'] = track
        return self._add_send_event('send_multipart', msg=msg_parts, kwargs=kwargs)

    def send(  # type: ignore
        self,
        data: Any,
        flags: int = 0,
        copy: bool = True,
        track: bool = False,
        **kwargs: Any,
    ) -> Awaitable[_zmq.MessageTracker | None]:
        """Send a single zmq frame.

        Returns a Future that resolves when sending is complete.

        Recommend using send_multipart instead.
        """
        kwargs['flags'] = flags
        kwargs['copy'] = copy
        kwargs['track'] = track
        kwargs.update(dict(flags=flags, copy=copy, track=track))
        return self._add_send_event('send', msg=data, kwargs=kwargs)

    def _deserialize(self, recvd, load):
        """Deserialize with Futures"""
        f = self._Future()

        def _chain(_):
            """Chain result through serialization to recvd"""
            if f.done():
                # chained future may be cancelled, which means nobody is going to get this result
                # if it's an error, that's no big deal (probably zmq.Again),
                # but if it's a successful recv, this is a dropped message!
                if not recvd.cancelled() and recvd.exception() is None:
                    warnings.warn(
                        # is there a useful stacklevel?
                        # ideally, it would point to where `f.cancel()` was called
                        f"Future {f} completed while awaiting {recvd}. A message has been dropped!",
                        RuntimeWarning,
                    )
                return
            if recvd.exception():
                f.set_exception(recvd.exception())
            else:
                buf = recvd.result()
                try:
                    loaded = load(buf)
                except Exception as e:
                    f.set_exception(e)
                else:
                    f.set_result(loaded)

        recvd.add_done_callback(_chain)

        def _chain_cancel(_):
            """Chain cancellation from f to recvd"""
            if recvd.done():
                return
            if f.cancelled():
                recvd.cancel()

        f.add_done_callback(_chain_cancel)

        return f

    def poll(self, timeout=None, flags=_zmq.POLLIN) -> Awaitable[int]:  # type: ignore
        """poll the socket for events

        returns a Future for the poll results.
        """

        if self.closed:
            raise _zmq.ZMQError(_zmq.ENOTSUP)

        p = self._poller_class()
        p.register(self, flags)
        poll_future = cast(Future, p.poll(timeout))

        future = self._Future()

        def unwrap_result(f):
            if future.done():
                return
            if poll_future.cancelled():
                try:
                    future.cancel()
                except RuntimeError:
                    # RuntimeError may be called during teardown
                    pass
                return
            if f.exception():
                future.set_exception(poll_future.exception())
            else:
                evts = dict(poll_future.result())
                future.set_result(evts.get(self, 0))

        if poll_future.done():
            # hook up result if already done
            unwrap_result(poll_future)
        else:
            poll_future.add_done_callback(unwrap_result)

        def cancel_poll(future):
            """Cancel underlying poll if request has been cancelled"""
            if not poll_future.done():
                try:
                    poll_future.cancel()
                except RuntimeError:
                    # RuntimeError may be called during teardown
                    pass

        future.add_done_callback(cancel_poll)

        return future

    def _add_timeout(self, future, timeout):
        """Add a timeout for a send or recv Future"""

        def future_timeout():
            if future.done():
                # future already resolved, do nothing
                return

            # raise EAGAIN
            future.set_exception(_zmq.Again())

        return self._call_later(timeout, future_timeout)

    def _call_later(self, delay, callback):
        """Schedule a function to be called later

        Override for different IOLoop implementations

        Tornado and asyncio happen to both have ioloop.call_later
        with the same signature.
        """
        return self._get_loop().call_later(delay, callback)

    @staticmethod
    def _remove_finished_future(future, event_list, event=None):
        """Make sure that futures are removed from the event list when they resolve

        Avoids delaying cleanup until the next send/recv event,
        which may never come.
        """
        # "future" instance is shared between sockets, but each socket has its own event list.
        if not event_list:
            return
        # only unconsumed events (e.g. cancelled calls)
        # will be present when this happens
        try:
            event_list.remove(event)
        except ValueError:
            # usually this will have been removed by being consumed
            return

    def _add_recv_event(
        self,
        kind: str,
        *,
        args: tuple | None = None,
        kwargs: dict[str, Any] | None = None,
        future: Future | None = None,
    ) -> Future:
        """Add a recv event, returning the corresponding Future"""
        f = future or self._Future()
        if args is None:
            args = ()
        if kwargs is None:
            kwargs = {}
        if kind.startswith('recv') and kwargs.get('flags', 0) & _zmq.DONTWAIT:
            # short-circuit non-blocking calls
            recv = getattr(self._shadow_sock, kind)
            try:
                r = recv(*args, **kwargs)
            except Exception as e:
                f.set_exception(e)
            else:
                f.set_result(r)
            return f

        timer = _NoTimer
        if hasattr(_zmq, 'RCVTIMEO'):
            timeout_ms = self._shadow_sock.rcvtimeo
            if timeout_ms >= 0:
                timer = self._add_timeout(f, timeout_ms * 1e-3)

        # we add it to the list of futures before we add the timeout as the
        # timeout will remove the future from recv_futures to avoid leaks
        _future_event = _FutureEvent(
            f, kind, args=args, kwargs=kwargs, msg=None, timer=timer
        )
        self._recv_futures.append(_future_event)

        if self._shadow_sock.get(EVENTS) & POLLIN:
            # recv immediately, if we can
            self._handle_recv()
        if self._recv_futures and _future_event in self._recv_futures:
            # Don't let the Future sit in _recv_events after it's done
            # no need to register this if we've already been handled
            # (i.e. immediately-resolved recv)
            f.add_done_callback(
                partial(
                    self._remove_finished_future,
                    event_list=self._recv_futures,
                    event=_future_event,
                )
            )
            self._add_io_state(POLLIN)
        return f

    def _add_send_event(self, kind, msg=None, kwargs=None, future=None):
        """Add a send event, returning the corresponding Future"""
        f = future or self._Future()
        # attempt send with DONTWAIT if no futures are waiting
        # short-circuit for sends that will resolve immediately
        # only call if no send Futures are waiting
        if kind in ('send', 'send_multipart') and not self._send_futures:
            flags = kwargs.get('flags', 0)
            nowait_kwargs = kwargs.copy()
            nowait_kwargs['flags'] = flags | _zmq.DONTWAIT

            # short-circuit non-blocking calls
            send = getattr(self._shadow_sock, kind)
            # track if the send resolved or not
            # (EAGAIN if DONTWAIT is not set should proceed with)
            finish_early = True
            try:
                r = send(msg, **nowait_kwargs)
            except _zmq.Again as e:
                if flags & _zmq.DONTWAIT:
                    f.set_exception(e)
                else:
                    # EAGAIN raised and DONTWAIT not requested,
                    # proceed with async send
                    finish_early = False
            except Exception as e:
                f.set_exception(e)
            else:
                f.set_result(r)

            if finish_early:
                # short-circuit resolved, return finished Future
                # schedule wake for recv if there are any receivers waiting
                if self._recv_futures:
                    self._schedule_remaining_events()
                return f

        timer = _NoTimer
        if hasattr(_zmq, 'SNDTIMEO'):
            timeout_ms = self._shadow_sock.get(_zmq.SNDTIMEO)
            if timeout_ms >= 0:
                timer = self._add_timeout(f, timeout_ms * 1e-3)

        # we add it to the list of futures before we add the timeout as the
        # timeout will remove the future from recv_futures to avoid leaks
        _future_event = _FutureEvent(
            f, kind, args=(), kwargs=kwargs, msg=msg, timer=timer
        )
        self._send_futures.append(_future_event)
        # Don't let the Future sit in _send_futures after it's done
        f.add_done_callback(
            partial(
                self._remove_finished_future,
                event_list=self._send_futures,
                event=_future_event,
            )
        )

        self._add_io_state(POLLOUT)
        return f

    def _handle_recv(self):
        """Handle recv events"""
        if not self._shadow_sock.get(EVENTS) & POLLIN:
            # event triggered, but state may have been changed between trigger and callback
            return
        f = None
        while self._recv_futures:
            f, kind, args, kwargs, _, timer = self._recv_futures.popleft()
            # skip any cancelled futures
            if f.done():
                f = None
            else:
                break

        if not self._recv_futures:
            self._drop_io_state(POLLIN)

        if f is None:
            return

        timer.cancel()

        if kind == 'poll':
            # on poll event, just signal ready, nothing else.
            f.set_result(None)
            return
        elif kind == 'recv_multipart':
            recv = self._shadow_sock.recv_multipart
        elif kind == 'recv':
            recv = self._shadow_sock.recv
        elif kind == 'recv_into':
            recv = self._shadow_sock.recv_into
        else:
            raise ValueError(f"Unhandled recv event type: {kind!r}")

        kwargs['flags'] |= _zmq.DONTWAIT
        try:
            result = recv(*args, **kwargs)
        except Exception as e:
            f.set_exception(e)
        else:
            f.set_result(result)

    def _handle_send(self):
        if not self._shadow_sock.get(EVENTS) & POLLOUT:
            # event triggered, but state may have been changed between trigger and callback
            return
        f = None
        while self._send_futures:
            f, kind, args, kwargs, msg, timer = self._send_futures.popleft()
            # skip any cancelled futures
            if f.done():
                f = None
            else:
                break

        if not self._send_futures:
            self._drop_io_state(POLLOUT)

        if f is None:
            return

        timer.cancel()

        if kind == 'poll':
            # on poll event, just signal ready, nothing else.
            f.set_result(None)
            return
        elif kind == 'send_multipart':
            send = self._shadow_sock.send_multipart
        elif kind == 'send':
            send = self._shadow_sock.send
        else:
            raise ValueError(f"Unhandled send event type: {kind!r}")

        kwargs['flags'] |= _zmq.DONTWAIT
        try:
            result = send(msg, **kwargs)
        except Exception as e:
            f.set_exception(e)
        else:
            f.set_result(result)

    # event masking from ZMQStream
    def _handle_events(self, fd=0, events=0):
        """Dispatch IO events to _handle_recv, etc."""
        if self._shadow_sock.closed:
            return

        zmq_events = self._shadow_sock.get(EVENTS)
        if zmq_events & _zmq.POLLIN:
            self._handle_recv()
        if zmq_events & _zmq.POLLOUT:
            self._handle_send()
        self._schedule_remaining_events()

    def _schedule_remaining_events(self, events=None):
        """Schedule a call to handle_events next loop iteration

        If there are still events to handle.
        """
        # edge-triggered handling
        # allow passing events in, in case this is triggered by retrieving events,
        # so we don't have to retrieve it twice.
        if self._state == 0:
            # not watching for anything, nothing to schedule
            return
        if events is None:
            events = self._shadow_sock.get(EVENTS)
        if events & self._state:
            self._call_later(0, self._handle_events)

    def _add_io_state(self, state):
        """Add io_state to poller."""
        if self._state != state:
            state = self._state = self._state | state
        self._update_handler(self._state)

    def _drop_io_state(self, state):
        """Stop poller from watching an io_state."""
        if self._state & state:
            self._state = self._state & (~state)
        self._update_handler(self._state)

    def _update_handler(self, state):
        """Update IOLoop handler with state.

        zmq FD is always read-only.
        """
        # ensure loop is registered and init_io has been called
        # if there are any events to watch for
        if state:
            self._get_loop()
        self._schedule_remaining_events()

    def _init_io_state(self, loop=None):
        """initialize the ioloop event handler"""
        if loop is None:
            loop = self._get_loop()
        loop.add_handler(self._shadow_sock, self._handle_events, self._READ)
        self._call_later(0, self._handle_events)

    def _clear_io_state(self):
        """unregister the ioloop event handler

        called once during close
        """
        fd = self._shadow_sock
        if self._shadow_sock.closed:
            fd = self._fd
        if self._current_loop is not None:
            self._current_loop.remove_handler(fd)
pyzmq-26.4.0/zmq/_future.pyi000066400000000000000000000063721477374370200160140ustar00rootroot00000000000000"""type annotations for async sockets"""

from __future__ import annotations

from asyncio import Future
from pickle import DEFAULT_PROTOCOL
from typing import Any, Awaitable, Literal, Sequence, TypeVar, overload

import zmq as _zmq

class _AsyncPoller(_zmq.Poller):
    _socket_class: type[_AsyncSocket]

    def poll(self, timeout=-1) -> Awaitable[list[tuple[Any, int]]]: ...  # type: ignore

T = TypeVar("T", bound="_AsyncSocket")

class _AsyncSocket(_zmq.Socket[Future]):
    @classmethod
    def from_socket(cls: type[T], socket: _zmq.Socket, io_loop: Any = None) -> T: ...
    def send(  # type: ignore
        self,
        data: Any,
        flags: int = 0,
        copy: bool = True,
        track: bool = False,
        routing_id: int | None = None,
        group: str | None = None,
    ) -> Awaitable[_zmq.MessageTracker | None]: ...
    @overload  # type: ignore
    def recv(self, flags: int = 0, *, track: bool = False) -> Awaitable[bytes]: ...
    @overload
    def recv(
        self, flags: int = 0, *, copy: Literal[True], track: bool = False
    ) -> Awaitable[bytes]: ...
    @overload
    def recv(
        self, flags: int = 0, *, copy: Literal[False], track: bool = False
    ) -> Awaitable[_zmq.Frame]: ...
    @overload
    def recv(
        self, flags: int = 0, copy: bool = True, track: bool = False
    ) -> Awaitable[bytes | _zmq.Frame]: ...
    def recv_into(  # type: ignore
        self, buffer: Any, /, *, nbytes: int = 0, flags: int = 0
    ) -> Awaitable[int]: ...
    def send_multipart(  # type: ignore
        self,
        msg_parts: Sequence,
        flags: int = 0,
        copy: bool = True,
        track: bool = False,
        routing_id: int | None = None,
        group: str | None = None,
    ) -> Awaitable[_zmq.MessageTracker | None]: ...
    @overload  # type: ignore
    def recv_multipart(
        self, flags: int = 0, *, track: bool = False
    ) -> Awaitable[list[bytes]]: ...
    @overload
    def recv_multipart(
        self, flags: int = 0, *, copy: Literal[True], track: bool = False
    ) -> Awaitable[list[bytes]]: ...
    @overload
    def recv_multipart(
        self, flags: int = 0, *, copy: Literal[False], track: bool = False
    ) -> Awaitable[list[_zmq.Frame]]: ...
    @overload
    def recv_multipart(
        self, flags: int = 0, copy: bool = True, track: bool = False
    ) -> Awaitable[list[bytes] | list[_zmq.Frame]]: ...

    # serialization wrappers

    def send_string(  # type: ignore
        self,
        u: str,
        flags: int = 0,
        copy: bool = True,
        *,
        encoding: str = 'utf-8',
        **kwargs,
    ) -> Awaitable[_zmq.Frame | None]: ...
    def recv_string(  # type: ignore
        self, flags: int = 0, encoding: str = 'utf-8'
    ) -> Awaitable[str]: ...
    def send_pyobj(  # type: ignore
        self, obj: Any, flags: int = 0, protocol: int = DEFAULT_PROTOCOL, **kwargs
    ) -> Awaitable[_zmq.Frame | None]: ...
    def recv_pyobj(self, flags: int = 0) -> Awaitable[Any]: ...  # type: ignore
    def send_json(  # type: ignore
        self, obj: Any, flags: int = 0, **kwargs
    ) -> Awaitable[_zmq.Frame | None]: ...
    def recv_json(self, flags: int = 0, **kwargs) -> Awaitable[Any]: ...  # type: ignore
    def poll(self, timeout=-1) -> Awaitable[list[tuple[Any, int]]]: ...  # type: ignore
pyzmq-26.4.0/zmq/_typing.py000066400000000000000000000003611477374370200156330ustar00rootroot00000000000000from __future__ import annotations

import sys

if sys.version_info >= (3, 10):
    from typing import TypeAlias
else:
    try:
        from typing_extensions import TypeAlias
    except ImportError:
        TypeAlias = type  # type: ignore
pyzmq-26.4.0/zmq/asyncio.py000066400000000000000000000145661477374370200156430ustar00rootroot00000000000000"""AsyncIO support for zmq

Requires asyncio and Python 3.
"""

# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import asyncio
import selectors
import sys
import warnings
from asyncio import Future, SelectorEventLoop
from weakref import WeakKeyDictionary

import zmq as _zmq
from zmq import _future

# registry of asyncio loop : selector thread
_selectors: WeakKeyDictionary = WeakKeyDictionary()


class ProactorSelectorThreadWarning(RuntimeWarning):
    """Warning class for notifying about the extra thread spawned by tornado

    We automatically support proactor via tornado's AddThreadSelectorEventLoop"""


def _get_selector_windows(
    asyncio_loop,
) -> asyncio.AbstractEventLoop:
    """Get selector-compatible loop

    Returns an object with ``add_reader`` family of methods,
    either the loop itself or a SelectorThread instance.

    Workaround Windows proactor removal of
    *reader methods, which we need for zmq sockets.
    """

    if asyncio_loop in _selectors:
        return _selectors[asyncio_loop]

    # detect add_reader instead of checking for proactor?
    if hasattr(asyncio, "ProactorEventLoop") and isinstance(
        asyncio_loop,
        asyncio.ProactorEventLoop,  # type: ignore
    ):
        try:
            from tornado.platform.asyncio import AddThreadSelectorEventLoop
        except ImportError:
            raise RuntimeError(
                "Proactor event loop does not implement add_reader family of methods required for zmq."
                " zmq will work with proactor if tornado >= 6.1 can be found."
                " Use `asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())`"
                " or install 'tornado>=6.1' to avoid this error."
            )

        warnings.warn(
            "Proactor event loop does not implement add_reader family of methods required for zmq."
            " Registering an additional selector thread for add_reader support via tornado."
            " Use `asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())`"
            " to avoid this warning.",
            RuntimeWarning,
            # stacklevel 5 matches most likely zmq.asyncio.Context().socket()
            stacklevel=5,
        )

        selector_loop = _selectors[asyncio_loop] = AddThreadSelectorEventLoop(
            asyncio_loop
        )  # type: ignore

        # patch loop.close to also close the selector thread
        loop_close = asyncio_loop.close

        def _close_selector_and_loop():
            # restore original before calling selector.close,
            # which in turn calls eventloop.close!
            asyncio_loop.close = loop_close
            _selectors.pop(asyncio_loop, None)
            selector_loop.close()

        asyncio_loop.close = _close_selector_and_loop  # type: ignore # mypy bug - assign a function to method
        return selector_loop
    else:
        return asyncio_loop


def _get_selector_noop(loop) -> asyncio.AbstractEventLoop:
    """no-op on non-Windows"""
    return loop


if sys.platform == "win32":
    _get_selector = _get_selector_windows
else:
    _get_selector = _get_selector_noop


class _AsyncIO:
    _Future = Future
    _WRITE = selectors.EVENT_WRITE
    _READ = selectors.EVENT_READ

    def _default_loop(self):
        try:
            return asyncio.get_running_loop()
        except RuntimeError:
            warnings.warn(
                "No running event loop. zmq.asyncio should be used from within an asyncio loop.",
                RuntimeWarning,
                stacklevel=4,
            )
        # get_event_loop deprecated in 3.10:
        return asyncio.get_event_loop()


class Poller(_AsyncIO, _future._AsyncPoller):
    """Poller returning asyncio.Future for poll results."""

    def _watch_raw_socket(self, loop, socket, evt, f):
        """Schedule callback for a raw socket"""
        selector = _get_selector(loop)
        if evt & self._READ:
            selector.add_reader(socket, lambda *args: f())
        if evt & self._WRITE:
            selector.add_writer(socket, lambda *args: f())

    def _unwatch_raw_sockets(self, loop, *sockets):
        """Unschedule callback for a raw socket"""
        selector = _get_selector(loop)
        for socket in sockets:
            selector.remove_reader(socket)
            selector.remove_writer(socket)


class Socket(_AsyncIO, _future._AsyncSocket):
    """Socket returning asyncio Futures for send/recv/poll methods."""

    _poller_class = Poller

    def _get_selector(self, io_loop=None):
        if io_loop is None:
            io_loop = self._get_loop()
        return _get_selector(io_loop)

    def _init_io_state(self, io_loop=None):
        """initialize the ioloop event handler"""
        self._get_selector(io_loop).add_reader(
            self._fd, lambda: self._handle_events(0, 0)
        )

    def _clear_io_state(self):
        """clear any ioloop event handler

        called once at close
        """
        loop = self._current_loop
        if loop and not loop.is_closed() and self._fd != -1:
            self._get_selector(loop).remove_reader(self._fd)


Poller._socket_class = Socket


class Context(_zmq.Context[Socket]):
    """Context for creating asyncio-compatible Sockets"""

    _socket_class = Socket

    # avoid sharing instance with base Context class
    _instance = None

    # overload with no changes to satisfy pyright
    def __init__(
        self: Context,
        io_threads: int | _zmq.Context = 1,
        shadow: _zmq.Context | int = 0,
    ) -> None:
        super().__init__(io_threads, shadow)  # type: ignore


class ZMQEventLoop(SelectorEventLoop):
    """DEPRECATED: AsyncIO eventloop using zmq_poll.

    pyzmq sockets should work with any asyncio event loop as of pyzmq 17.
    """

    def __init__(self, selector=None):
        _deprecated()
        return super().__init__(selector)


_loop = None


def _deprecated():
    if _deprecated.called:  # type: ignore
        return
    _deprecated.called = True  # type: ignore

    warnings.warn(
        "ZMQEventLoop and zmq.asyncio.install are deprecated in pyzmq 17. Special eventloop integration is no longer needed.",
        DeprecationWarning,
        stacklevel=3,
    )


_deprecated.called = False  # type: ignore


def install():
    """DEPRECATED: No longer needed in pyzmq 17"""
    _deprecated()


__all__ = [
    "Context",
    "Socket",
    "Poller",
    "ZMQEventLoop",
    "install",
]
pyzmq-26.4.0/zmq/auth/000077500000000000000000000000001477374370200145515ustar00rootroot00000000000000pyzmq-26.4.0/zmq/auth/__init__.py000066400000000000000000000005321477374370200166620ustar00rootroot00000000000000"""Utilities for ZAP authentication.

To run authentication in a background thread, see :mod:`zmq.auth.thread`.
For integration with the asyncio event loop, see :mod:`zmq.auth.asyncio`.

Authentication examples are provided in the pyzmq codebase, under
`/examples/security/`.

.. versionadded:: 14.1
"""

from .base import *
from .certs import *
pyzmq-26.4.0/zmq/auth/asyncio.py000066400000000000000000000034071477374370200165740ustar00rootroot00000000000000"""ZAP Authenticator integrated with the asyncio IO loop.

.. versionadded:: 15.2
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import asyncio
import warnings
from typing import Any, Optional

import zmq
from zmq.asyncio import Poller

from .base import Authenticator


class AsyncioAuthenticator(Authenticator):
    """ZAP authentication for use in the asyncio IO loop"""

    __poller: Optional[Poller]
    __task: Any

    def __init__(
        self,
        context: Optional["zmq.Context"] = None,
        loop: Any = None,
        encoding: str = 'utf-8',
        log: Any = None,
    ):
        super().__init__(context, encoding, log)
        if loop is not None:
            warnings.warn(
                f"{self.__class__.__name__}(loop) is deprecated and ignored",
                DeprecationWarning,
                stacklevel=2,
            )
        self.__poller = None
        self.__task = None

    async def __handle_zap(self) -> None:
        while self.__poller is not None:
            events = await self.__poller.poll()
            if self.zap_socket in dict(events):
                msg = self.zap_socket.recv_multipart()
                await self.handle_zap_message(msg)

    def start(self) -> None:
        """Start ZAP authentication"""
        super().start()
        self.__poller = Poller()
        self.__poller.register(self.zap_socket, zmq.POLLIN)
        self.__task = asyncio.ensure_future(self.__handle_zap())

    def stop(self) -> None:
        """Stop ZAP authentication"""
        if self.__task:
            self.__task.cancel()
        if self.__poller:
            self.__poller.unregister(self.zap_socket)
            self.__poller = None
        super().stop()


__all__ = ["AsyncioAuthenticator"]
pyzmq-26.4.0/zmq/auth/base.py000066400000000000000000000377211477374370200160470ustar00rootroot00000000000000"""Base implementation of 0MQ authentication."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import logging
import os
from typing import Any, Awaitable, Dict, List, Optional, Set, Tuple, Union

import zmq
from zmq.error import _check_version
from zmq.utils import z85

from .certs import load_certificates

CURVE_ALLOW_ANY = '*'
VERSION = b'1.0'


class Authenticator:
    """Implementation of ZAP authentication for zmq connections.

    This authenticator class does not register with an event loop. As a result,
    you will need to manually call `handle_zap_message`::

        auth = zmq.Authenticator()
        auth.allow("127.0.0.1")
        auth.start()
        while True:
            await auth.handle_zap_msg(auth.zap_socket.recv_multipart())

    Alternatively, you can register `auth.zap_socket` with a poller.

    Since many users will want to run ZAP in a way that does not block the
    main thread, other authentication classes (such as :mod:`zmq.auth.thread`)
    are provided.

    Note:

    - libzmq provides four levels of security: default NULL (which the Authenticator does
      not see), and authenticated NULL, PLAIN, CURVE, and GSSAPI, which the Authenticator can see.
    - until you add policies, all incoming NULL connections are allowed.
      (classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied.
    - GSSAPI requires no configuration.
    """

    context: "zmq.Context"
    encoding: str
    allow_any: bool
    credentials_providers: Dict[str, Any]
    zap_socket: "zmq.Socket"
    _allowed: Set[str]
    _denied: Set[str]
    passwords: Dict[str, Dict[str, str]]
    certs: Dict[str, Dict[bytes, Any]]
    log: Any

    def __init__(
        self,
        context: Optional["zmq.Context"] = None,
        encoding: str = 'utf-8',
        log: Any = None,
    ):
        _check_version((4, 0), "security")
        self.context = context or zmq.Context.instance()
        self.encoding = encoding
        self.allow_any = False
        self.credentials_providers = {}
        self.zap_socket = None  # type: ignore
        self._allowed = set()
        self._denied = set()
        # passwords is a dict keyed by domain and contains values
        # of dicts with username:password pairs.
        self.passwords = {}
        # certs is dict keyed by domain and contains values
        # of dicts keyed by the public keys from the specified location.
        self.certs = {}
        self.log = log or logging.getLogger('zmq.auth')

    def start(self) -> None:
        """Create and bind the ZAP socket"""
        self.zap_socket = self.context.socket(zmq.REP, socket_class=zmq.Socket)
        self.zap_socket.linger = 1
        self.zap_socket.bind("inproc://zeromq.zap.01")
        self.log.debug("Starting")

    def stop(self) -> None:
        """Close the ZAP socket"""
        if self.zap_socket:
            self.zap_socket.close()
        self.zap_socket = None  # type: ignore

    def allow(self, *addresses: str) -> None:
        """Allow IP address(es).

        Connections from addresses not explicitly allowed will be rejected.

        - For NULL, all clients from this address will be accepted.
        - For real auth setups, they will be allowed to continue with authentication.

        allow is mutually exclusive with deny.
        """
        if self._denied:
            raise ValueError("Only use allow or deny, not both")
        self.log.debug("Allowing %s", ','.join(addresses))
        self._allowed.update(addresses)

    def deny(self, *addresses: str) -> None:
        """Deny IP address(es).

        Addresses not explicitly denied will be allowed to continue with authentication.

        deny is mutually exclusive with allow.
        """
        if self._allowed:
            raise ValueError("Only use a allow or deny, not both")
        self.log.debug("Denying %s", ','.join(addresses))
        self._denied.update(addresses)

    def configure_plain(
        self, domain: str = '*', passwords: Optional[Dict[str, str]] = None
    ) -> None:
        """Configure PLAIN authentication for a given domain.

        PLAIN authentication uses a plain-text password file.
        To cover all domains, use "*".
        You can modify the password file at any time; it is reloaded automatically.
        """
        if passwords:
            self.passwords[domain] = passwords
        self.log.debug("Configure plain: %s", domain)

    def configure_curve(
        self, domain: str = '*', location: Union[str, os.PathLike] = "."
    ) -> None:
        """Configure CURVE authentication for a given domain.

        CURVE authentication uses a directory that holds all public client certificates,
        i.e. their public keys.

        To cover all domains, use "*".

        You can add and remove certificates in that directory at any time. configure_curve must be called
        every time certificates are added or removed, in order to update the Authenticator's state

        To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location.
        """
        # If location is CURVE_ALLOW_ANY then allow all clients. Otherwise
        # treat location as a directory that holds the certificates.
        self.log.debug("Configure curve: %s[%s]", domain, location)
        if location == CURVE_ALLOW_ANY:
            self.allow_any = True
        else:
            self.allow_any = False
            try:
                self.certs[domain] = load_certificates(location)
            except Exception as e:
                self.log.error("Failed to load CURVE certs from %s: %s", location, e)

    def configure_curve_callback(
        self, domain: str = '*', credentials_provider: Any = None
    ) -> None:
        """Configure CURVE authentication for a given domain.

        CURVE authentication using a callback function validating
        the client public key according to a custom mechanism, e.g. checking the
        key against records in a db. credentials_provider is an object of a class which
        implements a callback method accepting two parameters (domain and key), e.g.::

            class CredentialsProvider(object):

                def __init__(self):
                    ...e.g. db connection

                def callback(self, domain, key):
                    valid = ...lookup key and/or domain in db
                    if valid:
                        logging.info('Authorizing: {0}, {1}'.format(domain, key))
                        return True
                    else:
                        logging.warning('NOT Authorizing: {0}, {1}'.format(domain, key))
                        return False

        To cover all domains, use "*".
        """

        self.allow_any = False

        if credentials_provider is not None:
            self.credentials_providers[domain] = credentials_provider
        else:
            self.log.error("None credentials_provider provided for domain:%s", domain)

    def curve_user_id(self, client_public_key: bytes) -> str:
        """Return the User-Id corresponding to a CURVE client's public key

        Default implementation uses the z85-encoding of the public key.

        Override to define a custom mapping of public key : user-id

        This is only called on successful authentication.

        Parameters
        ----------
        client_public_key: bytes
            The client public key used for the given message

        Returns
        -------
        user_id: unicode
            The user ID as text
        """
        return z85.encode(client_public_key).decode('ascii')

    def configure_gssapi(
        self, domain: str = '*', location: Optional[str] = None
    ) -> None:
        """Configure GSSAPI authentication

        Currently this is a no-op because there is nothing to configure with GSSAPI.
        """

    async def handle_zap_message(self, msg: List[bytes]):
        """Perform ZAP authentication"""
        if len(msg) < 6:
            self.log.error("Invalid ZAP message, not enough frames: %r", msg)
            if len(msg) < 2:
                self.log.error("Not enough information to reply")
            else:
                self._send_zap_reply(msg[1], b"400", b"Not enough frames")
            return

        version, request_id, domain, address, identity, mechanism = msg[:6]
        credentials = msg[6:]

        domain = domain.decode(self.encoding, 'replace')
        address = address.decode(self.encoding, 'replace')

        if version != VERSION:
            self.log.error("Invalid ZAP version: %r", msg)
            self._send_zap_reply(request_id, b"400", b"Invalid version")
            return

        self.log.debug(
            "version: %r, request_id: %r, domain: %r,"
            " address: %r, identity: %r, mechanism: %r",
            version,
            request_id,
            domain,
            address,
            identity,
            mechanism,
        )

        # Is address is explicitly allowed or _denied?
        allowed = False
        denied = False
        reason = b"NO ACCESS"

        if self._allowed:
            if address in self._allowed:
                allowed = True
                self.log.debug("PASSED (allowed) address=%s", address)
            else:
                denied = True
                reason = b"Address not allowed"
                self.log.debug("DENIED (not allowed) address=%s", address)

        elif self._denied:
            if address in self._denied:
                denied = True
                reason = b"Address denied"
                self.log.debug("DENIED (denied) address=%s", address)
            else:
                allowed = True
                self.log.debug("PASSED (not denied) address=%s", address)

        # Perform authentication mechanism-specific checks if necessary
        username = "anonymous"
        if not denied:
            if mechanism == b'NULL' and not allowed:
                # For NULL, we allow if the address wasn't denied
                self.log.debug("ALLOWED (NULL)")
                allowed = True

            elif mechanism == b'PLAIN':
                # For PLAIN, even a _alloweded address must authenticate
                if len(credentials) != 2:
                    self.log.error("Invalid PLAIN credentials: %r", credentials)
                    self._send_zap_reply(request_id, b"400", b"Invalid credentials")
                    return
                username, password = (
                    c.decode(self.encoding, 'replace') for c in credentials
                )
                allowed, reason = self._authenticate_plain(domain, username, password)

            elif mechanism == b'CURVE':
                # For CURVE, even a _alloweded address must authenticate
                if len(credentials) != 1:
                    self.log.error("Invalid CURVE credentials: %r", credentials)
                    self._send_zap_reply(request_id, b"400", b"Invalid credentials")
                    return
                key = credentials[0]
                allowed, reason = await self._authenticate_curve(domain, key)
                if allowed:
                    username = self.curve_user_id(key)

            elif mechanism == b'GSSAPI':
                if len(credentials) != 1:
                    self.log.error("Invalid GSSAPI credentials: %r", credentials)
                    self._send_zap_reply(request_id, b"400", b"Invalid credentials")
                    return
                # use principal as user-id for now
                principal = credentials[0]
                username = principal.decode("utf8")
                allowed, reason = self._authenticate_gssapi(domain, principal)

        if allowed:
            self._send_zap_reply(request_id, b"200", b"OK", username)
        else:
            self._send_zap_reply(request_id, b"400", reason)

    def _authenticate_plain(
        self, domain: str, username: str, password: str
    ) -> Tuple[bool, bytes]:
        """PLAIN ZAP authentication"""
        allowed = False
        reason = b""
        if self.passwords:
            # If no domain is not specified then use the default domain
            if not domain:
                domain = '*'

            if domain in self.passwords:
                if username in self.passwords[domain]:
                    if password == self.passwords[domain][username]:
                        allowed = True
                    else:
                        reason = b"Invalid password"
                else:
                    reason = b"Invalid username"
            else:
                reason = b"Invalid domain"

            if allowed:
                self.log.debug(
                    "ALLOWED (PLAIN) domain=%s username=%s password=%s",
                    domain,
                    username,
                    password,
                )
            else:
                self.log.debug("DENIED %s", reason)

        else:
            reason = b"No passwords defined"
            self.log.debug("DENIED (PLAIN) %s", reason)

        return allowed, reason

    async def _authenticate_curve(
        self, domain: str, client_key: bytes
    ) -> Tuple[bool, bytes]:
        """CURVE ZAP authentication"""
        allowed = False
        reason = b""
        if self.allow_any:
            allowed = True
            reason = b"OK"
            self.log.debug("ALLOWED (CURVE allow any client)")
        elif self.credentials_providers != {}:
            # If no explicit domain is specified then use the default domain
            if not domain:
                domain = '*'

            if domain in self.credentials_providers:
                z85_client_key = z85.encode(client_key)
                # Callback to check if key is Allowed
                r = self.credentials_providers[domain].callback(domain, z85_client_key)
                if isinstance(r, Awaitable):
                    r = await r
                if r:
                    allowed = True
                    reason = b"OK"
                else:
                    reason = b"Unknown key"

                status = "ALLOWED" if allowed else "DENIED"
                self.log.debug(
                    "%s (CURVE auth_callback) domain=%s client_key=%s",
                    status,
                    domain,
                    z85_client_key,
                )
            else:
                reason = b"Unknown domain"
        else:
            # If no explicit domain is specified then use the default domain
            if not domain:
                domain = '*'

            if domain in self.certs:
                # The certs dict stores keys in z85 format, convert binary key to z85 bytes
                z85_client_key = z85.encode(client_key)
                if self.certs[domain].get(z85_client_key):
                    allowed = True
                    reason = b"OK"
                else:
                    reason = b"Unknown key"

                status = "ALLOWED" if allowed else "DENIED"
                self.log.debug(
                    "%s (CURVE) domain=%s client_key=%s",
                    status,
                    domain,
                    z85_client_key,
                )
            else:
                reason = b"Unknown domain"

        return allowed, reason

    def _authenticate_gssapi(self, domain: str, principal: bytes) -> Tuple[bool, bytes]:
        """Nothing to do for GSSAPI, which has already been handled by an external service."""
        self.log.debug("ALLOWED (GSSAPI) domain=%s principal=%s", domain, principal)
        return True, b'OK'

    def _send_zap_reply(
        self,
        request_id: bytes,
        status_code: bytes,
        status_text: bytes,
        user_id: str = 'anonymous',
    ) -> None:
        """Send a ZAP reply to finish the authentication."""
        user_id = user_id if status_code == b'200' else b''
        if isinstance(user_id, str):
            user_id = user_id.encode(self.encoding, 'replace')
        metadata = b''  # not currently used
        self.log.debug("ZAP reply code=%s text=%s", status_code, status_text)
        reply = [VERSION, request_id, status_code, status_text, user_id, metadata]
        self.zap_socket.send_multipart(reply)


__all__ = ['Authenticator', 'CURVE_ALLOW_ANY']
pyzmq-26.4.0/zmq/auth/certs.py000066400000000000000000000103511477374370200162430ustar00rootroot00000000000000"""0MQ authentication related functions and classes."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import datetime
import glob
import os
from typing import Dict, Optional, Tuple, Union

import zmq

_cert_secret_banner = """#   ****  Generated on {0} by pyzmq  ****
#   ZeroMQ CURVE **Secret** Certificate
#   DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions.

"""


_cert_public_banner = """#   ****  Generated on {0} by pyzmq  ****
#   ZeroMQ CURVE Public Certificate
#   Exchange securely, or use a secure mechanism to verify the contents
#   of this file after exchange. Store public certificates in your home
#   directory, in the .curve subdirectory.

"""


def _write_key_file(
    key_filename: Union[str, os.PathLike],
    banner: str,
    public_key: Union[str, bytes],
    secret_key: Optional[Union[str, bytes]] = None,
    metadata: Optional[Dict[str, str]] = None,
    encoding: str = 'utf-8',
) -> None:
    """Create a certificate file"""
    if isinstance(public_key, bytes):
        public_key = public_key.decode(encoding)
    if isinstance(secret_key, bytes):
        secret_key = secret_key.decode(encoding)
    with open(key_filename, 'w', encoding='utf8') as f:
        f.write(banner.format(datetime.datetime.now()))

        f.write('metadata\n')
        if metadata:
            for k, v in metadata.items():
                if isinstance(k, bytes):
                    k = k.decode(encoding)
                if isinstance(v, bytes):
                    v = v.decode(encoding)
                f.write(f"    {k} = {v}\n")

        f.write('curve\n')
        f.write(f"    public-key = \"{public_key}\"\n")

        if secret_key:
            f.write(f"    secret-key = \"{secret_key}\"\n")


def create_certificates(
    key_dir: Union[str, os.PathLike],
    name: str,
    metadata: Optional[Dict[str, str]] = None,
) -> Tuple[str, str]:
    """Create zmq certificates.

    Returns the file paths to the public and secret certificate files.
    """
    public_key, secret_key = zmq.curve_keypair()
    base_filename = os.path.join(key_dir, name)
    secret_key_file = f"{base_filename}.key_secret"
    public_key_file = f"{base_filename}.key"
    now = datetime.datetime.now()

    _write_key_file(public_key_file, _cert_public_banner.format(now), public_key)

    _write_key_file(
        secret_key_file,
        _cert_secret_banner.format(now),
        public_key,
        secret_key=secret_key,
        metadata=metadata,
    )

    return public_key_file, secret_key_file


def load_certificate(
    filename: Union[str, os.PathLike],
) -> Tuple[bytes, Optional[bytes]]:
    """Load public and secret key from a zmq certificate.

    Returns (public_key, secret_key)

    If the certificate file only contains the public key,
    secret_key will be None.

    If there is no public key found in the file, ValueError will be raised.
    """
    public_key = None
    secret_key = None
    if not os.path.exists(filename):
        raise OSError(f"Invalid certificate file: {filename}")

    with open(filename, 'rb') as f:
        for line in f:
            line = line.strip()
            if line.startswith(b'#'):
                continue
            if line.startswith(b'public-key'):
                public_key = line.split(b"=", 1)[1].strip(b' \t\'"')
            if line.startswith(b'secret-key'):
                secret_key = line.split(b"=", 1)[1].strip(b' \t\'"')
            if public_key and secret_key:
                break

    if public_key is None:
        raise ValueError(f"No public key found in {filename}")

    return public_key, secret_key


def load_certificates(directory: Union[str, os.PathLike] = '.') -> Dict[bytes, bool]:
    """Load public keys from all certificates in a directory"""
    certs = {}
    if not os.path.isdir(directory):
        raise OSError(f"Invalid certificate directory: {directory}")
    # Follow czmq pattern of public keys stored in *.key files.
    glob_string = os.path.join(directory, "*.key")

    cert_files = glob.glob(glob_string)
    for cert_file in cert_files:
        public_key, _ = load_certificate(cert_file)
        if public_key:
            certs[public_key] = True
    return certs


__all__ = ['create_certificates', 'load_certificate', 'load_certificates']
pyzmq-26.4.0/zmq/auth/ioloop.py000066400000000000000000000024221477374370200164240ustar00rootroot00000000000000"""ZAP Authenticator integrated with the tornado IOLoop.

.. versionadded:: 14.1
.. deprecated:: 25
    Use asyncio.AsyncioAuthenticator instead.
    Since tornado runs on asyncio, the asyncio authenticator
    offers the same functionality in tornado.
"""

import warnings

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from typing import Any, Optional

import zmq

from .asyncio import AsyncioAuthenticator

warnings.warn(
    "zmq.auth.ioloop.IOLoopAuthenticator is deprecated. Use zmq.auth.asyncio.AsyncioAuthenticator",
    DeprecationWarning,
    stacklevel=2,
)


class IOLoopAuthenticator(AsyncioAuthenticator):
    """ZAP authentication for use in the tornado IOLoop"""

    def __init__(
        self,
        context: Optional["zmq.Context"] = None,
        encoding: str = 'utf-8',
        log: Any = None,
        io_loop: Any = None,
    ):
        loop = None
        if io_loop is not None:
            warnings.warn(
                f"{self.__class__.__name__}(io_loop) is deprecated and ignored",
                DeprecationWarning,
                stacklevel=2,
            )
            loop = io_loop.asyncio_loop
        super().__init__(context=context, encoding=encoding, log=log, loop=loop)


__all__ = ['IOLoopAuthenticator']
pyzmq-26.4.0/zmq/auth/thread.py000066400000000000000000000100071477374370200163700ustar00rootroot00000000000000"""ZAP Authenticator in a Python Thread.

.. versionadded:: 14.1
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import asyncio
from threading import Event, Thread
from typing import Any, List, Optional

import zmq
import zmq.asyncio

from .base import Authenticator


class AuthenticationThread(Thread):
    """A Thread for running a zmq Authenticator

    This is run in the background by ThreadAuthenticator
    """

    pipe: zmq.Socket
    loop: asyncio.AbstractEventLoop
    authenticator: Authenticator
    poller: Optional[zmq.asyncio.Poller] = None

    def __init__(
        self,
        authenticator: Authenticator,
        pipe: zmq.Socket,
    ) -> None:
        super().__init__(daemon=True)
        self.authenticator = authenticator
        self.log = authenticator.log
        self.pipe = pipe

        self.started = Event()

    def run(self) -> None:
        """Start the Authentication Agent thread task"""

        loop = asyncio.new_event_loop()
        try:
            loop.run_until_complete(self._run())
        finally:
            if self.pipe:
                self.pipe.close()
                self.pipe = None  # type: ignore

            loop.close()

    async def _run(self):
        self.poller = zmq.asyncio.Poller()
        self.poller.register(self.pipe, zmq.POLLIN)
        self.poller.register(self.authenticator.zap_socket, zmq.POLLIN)
        self.started.set()

        while True:
            events = dict(await self.poller.poll())
            if self.pipe in events:
                msg = self.pipe.recv_multipart()
                if self._handle_pipe_message(msg):
                    return
            if self.authenticator.zap_socket in events:
                msg = self.authenticator.zap_socket.recv_multipart()
                await self.authenticator.handle_zap_message(msg)

    def _handle_pipe_message(self, msg: List[bytes]) -> bool:
        command = msg[0]
        self.log.debug("auth received API command %r", command)

        if command == b'TERMINATE':
            return True

        else:
            self.log.error("Invalid auth command from API: %r", command)
            self.pipe.send(b'ERROR')

        return False


class ThreadAuthenticator(Authenticator):
    """Run ZAP authentication in a background thread"""

    pipe: "zmq.Socket"
    pipe_endpoint: str = ''
    thread: AuthenticationThread

    def __init__(
        self,
        context: Optional["zmq.Context"] = None,
        encoding: str = 'utf-8',
        log: Any = None,
    ):
        super().__init__(context=context, encoding=encoding, log=log)
        self.pipe = None  # type: ignore
        self.pipe_endpoint = f"inproc://{id(self)}.inproc"
        self.thread = None  # type: ignore

    def start(self) -> None:
        """Start the authentication thread"""
        # start the Authenticator
        super().start()

        # create a socket pair to communicate with auth thread.
        self.pipe = self.context.socket(zmq.PAIR, socket_class=zmq.Socket)
        self.pipe.linger = 1
        self.pipe.bind(self.pipe_endpoint)
        thread_pipe = self.context.socket(zmq.PAIR, socket_class=zmq.Socket)
        thread_pipe.linger = 1
        thread_pipe.connect(self.pipe_endpoint)
        self.thread = AuthenticationThread(authenticator=self, pipe=thread_pipe)
        self.thread.start()
        if not self.thread.started.wait(timeout=10):
            raise RuntimeError("Authenticator thread failed to start")

    def stop(self) -> None:
        """Stop the authentication thread"""
        if self.pipe:
            self.pipe.send(b'TERMINATE')
            if self.is_alive():
                self.thread.join()
            self.thread = None  # type: ignore
            self.pipe.close()
            self.pipe = None  # type: ignore
        super().stop()

    def is_alive(self) -> bool:
        """Is the ZAP thread currently running?"""
        return bool(self.thread and self.thread.is_alive())

    def __del__(self) -> None:
        self.stop()


__all__ = ['ThreadAuthenticator']
pyzmq-26.4.0/zmq/backend/000077500000000000000000000000001477374370200151775ustar00rootroot00000000000000pyzmq-26.4.0/zmq/backend/__init__.py000066400000000000000000000016541477374370200173160ustar00rootroot00000000000000"""Import basic exposure of libzmq C API as a backend"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import os
import platform

from .select import public_api, select_backend

if 'PYZMQ_BACKEND' in os.environ:
    backend = os.environ['PYZMQ_BACKEND']
    if backend in ('cython', 'cffi'):
        backend = f'zmq.backend.{backend}'
    _ns = select_backend(backend)
else:
    # default to cython, fallback to cffi
    # (reverse on PyPy)
    if platform.python_implementation() == 'PyPy':
        first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
    else:
        first, second = ('zmq.backend.cython', 'zmq.backend.cffi')

    try:
        _ns = select_backend(first)
    except Exception as original_error:
        try:
            _ns = select_backend(second)
        except ImportError:
            raise original_error from None

globals().update(_ns)

__all__ = public_api
pyzmq-26.4.0/zmq/backend/__init__.pyi000066400000000000000000000064521477374370200174700ustar00rootroot00000000000000from typing import Any, Callable, List, Optional, Set, Tuple, TypeVar, Union, overload

from typing_extensions import Literal

import zmq

from .select import select_backend

# avoid collision in Frame.bytes
_bytestr = bytes

T = TypeVar("T")

class Frame:
    buffer: Any
    bytes: bytes
    more: bool
    tracker: Any
    def __init__(
        self,
        data: Any = None,
        track: bool = False,
        copy: bool | None = None,
        copy_threshold: int | None = None,
    ): ...
    def copy_fast(self: T) -> T: ...
    def get(self, option: int) -> int | _bytestr | str: ...
    def set(self, option: int, value: int | _bytestr | str) -> None: ...

class Socket:
    underlying: int
    context: zmq.Context
    copy_threshold: int

    # specific option types
    FD: int

    def __init__(
        self,
        context: Context | None = None,
        socket_type: int = 0,
        shadow: int = 0,
        copy_threshold: int | None = zmq.COPY_THRESHOLD,
    ) -> None: ...
    def close(self, linger: int | None = ...) -> None: ...
    def get(self, option: int) -> int | bytes | str: ...
    def set(self, option: int, value: int | bytes | str) -> None: ...
    def connect(self, url: str): ...
    def disconnect(self, url: str) -> None: ...
    def bind(self, url: str): ...
    def unbind(self, url: str) -> None: ...
    def send(
        self,
        data: Any,
        flags: int = ...,
        copy: bool = ...,
        track: bool = ...,
    ) -> zmq.MessageTracker | None: ...
    @overload
    def recv(
        self,
        flags: int = ...,
        *,
        copy: Literal[False],
        track: bool = ...,
    ) -> zmq.Frame: ...
    @overload
    def recv(
        self,
        flags: int = ...,
        *,
        copy: Literal[True],
        track: bool = ...,
    ) -> bytes: ...
    @overload
    def recv(
        self,
        flags: int = ...,
        track: bool = False,
    ) -> bytes: ...
    @overload
    def recv(
        self,
        flags: int | None = ...,
        copy: bool = ...,
        track: bool | None = False,
    ) -> zmq.Frame | bytes: ...
    def recv_into(self, buf, /, *, nbytes: int = 0, flags: int = 0) -> int: ...
    def monitor(self, addr: str | None, events: int) -> None: ...
    # draft methods
    def join(self, group: str) -> None: ...
    def leave(self, group: str) -> None: ...

class Context:
    underlying: int
    def __init__(self, io_threads: int = 1, shadow: int = 0): ...
    def get(self, option: int) -> int | bytes | str: ...
    def set(self, option: int, value: int | bytes | str) -> None: ...
    def socket(self, socket_type: int) -> Socket: ...
    def term(self) -> None: ...

IPC_PATH_MAX_LEN: int

def has(capability: str) -> bool: ...
def curve_keypair() -> tuple[bytes, bytes]: ...
def curve_public(secret_key: bytes) -> bytes: ...
def strerror(errno: int | None = ...) -> str: ...
def zmq_errno() -> int: ...
def zmq_version() -> str: ...
def zmq_version_info() -> tuple[int, int, int]: ...
def zmq_poll(
    sockets: list[Any], timeout: int | None = ...
) -> list[tuple[Socket, int]]: ...
def proxy(frontend: Socket, backend: Socket, capture: Socket | None = None) -> int: ...
def proxy_steerable(
    frontend: Socket,
    backend: Socket,
    capture: Socket | None = ...,
    control: Socket | None = ...,
) -> int: ...

monitored_queue = Callable | None
pyzmq-26.4.0/zmq/backend/cffi/000077500000000000000000000000001477374370200161065ustar00rootroot00000000000000pyzmq-26.4.0/zmq/backend/cffi/README.md000066400000000000000000000001371477374370200173660ustar00rootroot00000000000000PyZMQ's CFFI support is designed only for (Unix) systems conforming to `have_sys_un_h = True`.
pyzmq-26.4.0/zmq/backend/cffi/__init__.py000066400000000000000000000016021477374370200202160ustar00rootroot00000000000000"""CFFI backend (for PyPy)"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

# for clearer error message on missing cffi
import cffi  # noqa

from zmq.backend.cffi import _poll, context, devices, error, message, socket, utils

from ._cffi import ffi
from ._cffi import lib as C


def zmq_version_info():
    """Get libzmq version as tuple of ints"""
    major = ffi.new('int*')
    minor = ffi.new('int*')
    patch = ffi.new('int*')

    C.zmq_version(major, minor, patch)

    return (int(major[0]), int(minor[0]), int(patch[0]))


__all__ = ["zmq_version_info"]
for submod in (error, message, context, socket, _poll, devices, utils):
    __all__.extend(submod.__all__)

from ._poll import *
from .context import *
from .devices import *
from .error import *
from .message import *
from .socket import *
from .utils import *

monitored_queue = None
pyzmq-26.4.0/zmq/backend/cffi/_cdefs.h000066400000000000000000000051211477374370200175010ustar00rootroot00000000000000void zmq_version(int *major, int *minor, int *patch);

void* zmq_socket(void *context, int type);
int zmq_close(void *socket);

int zmq_bind(void *socket, const char *endpoint);
int zmq_connect(void *socket, const char *endpoint);

int zmq_errno(void);
const char * zmq_strerror(int errnum);

int zmq_unbind(void *socket, const char *endpoint);
int zmq_disconnect(void *socket, const char *endpoint);
void* zmq_ctx_new();
int zmq_ctx_destroy(void *context);
int zmq_ctx_get(void *context, int opt);
int zmq_ctx_set(void *context, int opt, int optval);
int zmq_proxy(void *frontend, void *backend, void *capture);
int zmq_proxy_steerable(void *frontend,
                        void *backend,
                        void *capture,
                        void *control);
int zmq_socket_monitor(void *socket, const char *addr, int events);

int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);
int zmq_curve_public (char *z85_public_key, char *z85_secret_key);
int zmq_has (const char *capability);

typedef struct { ...; } zmq_msg_t;
typedef ... zmq_free_fn;

int zmq_msg_init(zmq_msg_t *msg);
int zmq_msg_init_size(zmq_msg_t *msg, size_t size);
int zmq_msg_init_data(zmq_msg_t *msg,
                      void *data,
                      size_t size,
                      zmq_free_fn *ffn,
                      void *hint);

size_t zmq_msg_size(zmq_msg_t *msg);
void *zmq_msg_data(zmq_msg_t *msg);
int zmq_msg_close(zmq_msg_t *msg);

int zmq_msg_copy(zmq_msg_t *dst, zmq_msg_t *src);
int zmq_msg_send(zmq_msg_t *msg, void *socket, int flags);
int zmq_msg_recv(zmq_msg_t *msg, void *socket, int flags);
int zmq_recv(void *socket, void *buf, int nbytes, int flags);

int zmq_getsockopt(void *socket,
                   int option_name,
                   void *option_value,
                   size_t *option_len);

int zmq_setsockopt(void *socket,
                   int option_name,
                   const void *option_value,
                   size_t option_len);

typedef int... ZMQ_FD_T;

typedef struct
{
    void *socket;
    ZMQ_FD_T fd;
    short events;
    short revents;
} zmq_pollitem_t;

int zmq_poll(zmq_pollitem_t *items, int nitems, long timeout);

// miscellany
void * memcpy(void *restrict s1, const void *restrict s2, size_t n);
void * malloc(size_t sz);
void free(void *p);
int get_ipc_path_max_len(void);

typedef struct { ...; } mutex_t;

typedef struct _zhint {
    void *sock;
    mutex_t *mutex;
    size_t id;
} zhint;

mutex_t* mutex_allocate();

int zmq_wrap_msg_init_data(zmq_msg_t *msg,
                      void *data,
                      size_t size,
                      void *hint);
pyzmq-26.4.0/zmq/backend/cffi/_cffi_src.c000066400000000000000000000024421477374370200201710ustar00rootroot00000000000000#include 
#include 

#include "pyversion_compat.h"
#include "mutex.h"
#include "ipcmaxlen.h"
#include "zmq_compat.h"
#include 

typedef struct _zhint {
  void *sock;
  mutex_t *mutex;
  size_t id;
} zhint;

void free_python_msg(void *data, void *vhint) {
  zmq_msg_t msg;
  zhint *hint = (zhint *)vhint;
  int rc;
  if (hint != NULL) {
    zmq_msg_init_size(&msg, sizeof(size_t));
    memcpy(zmq_msg_data(&msg), &hint->id, sizeof(size_t));
    rc = mutex_lock(hint->mutex);
    if (rc != 0) {
      fprintf(stderr, "pyzmq-gc mutex lock failed rc=%d\n", rc);
    }
    rc = zmq_msg_send(&msg, hint->sock, 0);
    if (rc < 0) {
      /*
       * gc socket could have been closed, e.g. during process teardown.
       * If so, ignore the failure because there's nothing to do.
       */
      if (zmq_errno() != ENOTSOCK) {
        fprintf(stderr, "pyzmq-gc send failed: %s\n",
                zmq_strerror(zmq_errno()));
      }
    }
    rc = mutex_unlock(hint->mutex);
    if (rc != 0) {
      fprintf(stderr, "pyzmq-gc mutex unlock failed rc=%d\n", rc);
    }
    zmq_msg_close(&msg);
    free(hint);
  }
}

int zmq_wrap_msg_init_data(zmq_msg_t *msg, void *data, size_t size,
                           void *hint) {
  return zmq_msg_init_data(msg, data, size, free_python_msg, hint);
}
pyzmq-26.4.0/zmq/backend/cffi/_poll.py000066400000000000000000000055041477374370200175710ustar00rootroot00000000000000"""zmq poll function"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

try:
    from time import monotonic
except ImportError:
    from time import clock as monotonic

import warnings

from zmq.error import InterruptedSystemCall, _check_rc

from ._cffi import ffi
from ._cffi import lib as C


def _make_zmq_pollitem(socket, flags):
    zmq_socket = socket._zmq_socket
    zmq_pollitem = ffi.new('zmq_pollitem_t*')
    zmq_pollitem.socket = zmq_socket
    zmq_pollitem.fd = 0
    zmq_pollitem.events = flags
    zmq_pollitem.revents = 0
    return zmq_pollitem[0]


def _make_zmq_pollitem_fromfd(socket_fd, flags):
    zmq_pollitem = ffi.new('zmq_pollitem_t*')
    zmq_pollitem.socket = ffi.NULL
    zmq_pollitem.fd = socket_fd
    zmq_pollitem.events = flags
    zmq_pollitem.revents = 0
    return zmq_pollitem[0]


def zmq_poll(sockets, timeout):
    cffi_pollitem_list = []
    low_level_to_socket_obj = {}
    from zmq import Socket

    for item in sockets:
        if isinstance(item[0], Socket):
            low_level_to_socket_obj[item[0]._zmq_socket] = item
            cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1]))
        else:
            if not isinstance(item[0], int):
                # not an FD, get it from fileno()
                item = (item[0].fileno(), item[1])
            low_level_to_socket_obj[item[0]] = item
            cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1]))
    items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list)
    list_length = ffi.cast('int', len(cffi_pollitem_list))
    while True:
        c_timeout = ffi.cast('long', timeout)
        start = monotonic()
        rc = C.zmq_poll(items, list_length, c_timeout)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            if timeout > 0:
                ms_passed = int(1000 * (monotonic() - start))
                if ms_passed < 0:
                    # don't allow negative ms_passed,
                    # which can happen on old Python versions without time.monotonic.
                    warnings.warn(
                        f"Negative elapsed time for interrupted poll: {ms_passed}."
                        "  Did the clock change?",
                        RuntimeWarning,
                    )
                    ms_passed = 0
                timeout = max(0, timeout - ms_passed)
            continue
        else:
            break
    result = []
    for item in items:
        if item.revents > 0:
            if item.socket != ffi.NULL:
                result.append(
                    (
                        low_level_to_socket_obj[item.socket][0],
                        item.revents,
                    )
                )
            else:
                result.append((item.fd, item.revents))
    return result


__all__ = ['zmq_poll']
pyzmq-26.4.0/zmq/backend/cffi/context.py000066400000000000000000000035531477374370200201520ustar00rootroot00000000000000"""zmq Context class"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from zmq.constants import EINVAL, IO_THREADS
from zmq.error import InterruptedSystemCall, ZMQError, _check_rc

from ._cffi import ffi
from ._cffi import lib as C


class Context:
    _zmq_ctx = None
    _iothreads = None
    _closed = True
    _shadow = False

    def __init__(self, io_threads=1, shadow=None):
        if shadow:
            self._zmq_ctx = ffi.cast("void *", shadow)
            self._shadow = True
        else:
            self._shadow = False
            if not io_threads >= 0:
                raise ZMQError(EINVAL)

            self._zmq_ctx = C.zmq_ctx_new()
        if self._zmq_ctx == ffi.NULL:
            raise ZMQError(C.zmq_errno())
        if not shadow:
            C.zmq_ctx_set(self._zmq_ctx, IO_THREADS, io_threads)
        self._closed = False

    @property
    def underlying(self):
        """The address of the underlying libzmq context"""
        return int(ffi.cast('size_t', self._zmq_ctx))

    @property
    def closed(self):
        return self._closed

    def set(self, option, value):
        """set a context option

        see zmq_ctx_set
        """
        rc = C.zmq_ctx_set(self._zmq_ctx, option, value)
        _check_rc(rc)

    def get(self, option):
        """get context option

        see zmq_ctx_get
        """
        rc = C.zmq_ctx_get(self._zmq_ctx, option)
        _check_rc(rc, error_without_errno=False)
        return rc

    def term(self):
        if self.closed:
            return

        rc = C.zmq_ctx_destroy(self._zmq_ctx)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            # ignore interrupted term
            # see PEP 475 notes about close & EINTR for why
            pass

        self._zmq_ctx = None
        self._closed = True


__all__ = ['Context']
pyzmq-26.4.0/zmq/backend/cffi/devices.py000066400000000000000000000027101477374370200201020ustar00rootroot00000000000000"""zmq device functions"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from ._cffi import ffi
from ._cffi import lib as C
from .socket import Socket
from .utils import _retry_sys_call


def proxy(frontend, backend, capture=None):
    if isinstance(capture, Socket):
        capture = capture._zmq_socket
    else:
        capture = ffi.NULL

    _retry_sys_call(C.zmq_proxy, frontend._zmq_socket, backend._zmq_socket, capture)


def proxy_steerable(frontend, backend, capture=None, control=None):
    """proxy_steerable(frontend, backend, capture, control)

    Start a zeromq proxy with control flow.

    .. versionadded:: libzmq-4.1
    .. versionadded:: 18.0

    Parameters
    ----------
    frontend : Socket
        The Socket instance for the incoming traffic.
    backend : Socket
        The Socket instance for the outbound traffic.
    capture : Socket (optional)
        The Socket instance for capturing traffic.
    control : Socket (optional)
        The Socket instance for control flow.
    """
    if isinstance(capture, Socket):
        capture = capture._zmq_socket
    else:
        capture = ffi.NULL

    if isinstance(control, Socket):
        control = control._zmq_socket
    else:
        control = ffi.NULL

    _retry_sys_call(
        C.zmq_proxy_steerable,
        frontend._zmq_socket,
        backend._zmq_socket,
        capture,
        control,
    )


__all__ = ['proxy', 'proxy_steerable']
pyzmq-26.4.0/zmq/backend/cffi/error.py000066400000000000000000000004671477374370200176200ustar00rootroot00000000000000"""zmq error functions"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from ._cffi import ffi
from ._cffi import lib as C


def strerror(errno):
    return ffi.string(C.zmq_strerror(errno)).decode()


zmq_errno = C.zmq_errno

__all__ = ['strerror', 'zmq_errno']
pyzmq-26.4.0/zmq/backend/cffi/message.py000066400000000000000000000150231477374370200201050ustar00rootroot00000000000000"""Dummy Frame object"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import errno
from threading import Event

import zmq
import zmq.error
from zmq.constants import ETERM

from ._cffi import ffi
from ._cffi import lib as C

zmq_gc = None

try:
    from __pypy__.bufferable import bufferable as maybe_bufferable
except ImportError:
    maybe_bufferable = object


def _content(obj):
    """Return content of obj as bytes"""
    if type(obj) is bytes:
        return obj
    if not isinstance(obj, memoryview):
        obj = memoryview(obj)
    return obj.tobytes()


def _check_rc(rc):
    err = C.zmq_errno()
    if rc == -1:
        if err == errno.EINTR:
            raise zmq.error.InterrruptedSystemCall(err)
        elif err == errno.EAGAIN:
            raise zmq.error.Again(errno)
        elif err == ETERM:
            raise zmq.error.ContextTerminated(err)
        else:
            raise zmq.error.ZMQError(err)
    return 0


class Frame(maybe_bufferable):
    _data = None
    tracker = None
    closed = False
    more = False
    _buffer = None
    _bytes = None
    _failed_init = False
    tracker_event = None
    zmq_msg = None

    def __init__(self, data=None, track=False, copy=None, copy_threshold=None):
        self._failed_init = True

        self.zmq_msg = ffi.cast('zmq_msg_t[1]', C.malloc(ffi.sizeof("zmq_msg_t")))

        # self.tracker should start finished
        # except in the case where we are sharing memory with libzmq
        if track:
            self.tracker = zmq._FINISHED_TRACKER

        if isinstance(data, str):
            raise TypeError(
                "Unicode strings are not allowed. Only: bytes, buffer interfaces."
            )

        if data is None:
            rc = C.zmq_msg_init(self.zmq_msg)
            _check_rc(rc)
            self._failed_init = False
            return

        self._data = data
        if type(data) is bytes:
            # avoid unnecessary copy on .bytes access
            self._bytes = data

        self._buffer = memoryview(data)
        if not self._buffer.contiguous:
            raise BufferError("memoryview: underlying buffer is not contiguous")
        # from_buffer silently copies if memory is not contiguous
        c_data = ffi.from_buffer(self._buffer)
        data_len_c = self._buffer.nbytes

        if copy is None:
            if copy_threshold and data_len_c < copy_threshold:
                copy = True
            else:
                copy = False

        if copy:
            # copy message data instead of sharing memory
            rc = C.zmq_msg_init_size(self.zmq_msg, data_len_c)
            _check_rc(rc)
            ffi.buffer(C.zmq_msg_data(self.zmq_msg), data_len_c)[:] = self._buffer
            self._failed_init = False
            return

        # Getting here means that we are doing a true zero-copy Frame,
        # where libzmq and Python are sharing memory.
        # Hook up garbage collection with MessageTracker and zmq_free_fn

        # Event and MessageTracker for monitoring when zmq is done with data:
        if track:
            evt = Event()
            self.tracker_event = evt
            self.tracker = zmq.MessageTracker(evt)
        # create the hint for zmq_free_fn
        # two pointers: the zmq_gc context and a message to be sent to the zmq_gc PULL socket
        # allows libzmq to signal to Python when it is done with Python-owned memory.
        global zmq_gc
        if zmq_gc is None:
            from zmq.utils.garbage import gc as zmq_gc
        # can't use ffi.new because it will be freed at the wrong time!
        hint = ffi.cast("zhint[1]", C.malloc(ffi.sizeof("zhint")))
        hint[0].id = zmq_gc.store(data, self.tracker_event)
        if not zmq_gc._push_mutex:
            zmq_gc._push_mutex = C.mutex_allocate()

        hint[0].mutex = ffi.cast("mutex_t*", zmq_gc._push_mutex)
        hint[0].sock = ffi.cast("void*", zmq_gc._push_socket.underlying)

        # calls zmq_wrap_msg_init_data with the C.free_python_msg callback
        rc = C.zmq_wrap_msg_init_data(
            self.zmq_msg,
            c_data,
            data_len_c,
            hint,
        )
        if rc != 0:
            C.free(hint)
            C.free(self.zmq_msg)
            _check_rc(rc)
        self._failed_init = False

    def __del__(self):
        if not self.closed and not self._failed_init:
            self.close()

    def close(self):
        if self.closed or self._failed_init or self.zmq_msg is None:
            return
        self.closed = True
        rc = C.zmq_msg_close(self.zmq_msg)
        C.free(self.zmq_msg)
        self.zmq_msg = None
        if rc != 0:
            _check_rc(rc)

    def _buffer_from_zmq_msg(self):
        """one-time extract buffer from zmq_msg

        for Frames created by recv
        """
        if self._data is None:
            self._data = ffi.buffer(
                C.zmq_msg_data(self.zmq_msg), C.zmq_msg_size(self.zmq_msg)
            )
        if self._buffer is None:
            self._buffer = memoryview(self._data)

    @property
    def buffer(self):
        if self._buffer is None:
            self._buffer_from_zmq_msg()
        return self._buffer

    @property
    def bytes(self):
        if self._bytes is None:
            self._bytes = self.buffer.tobytes()
        return self._bytes

    def __len__(self):
        return self.buffer.nbytes

    def __eq__(self, other):
        return self.bytes == _content(other)

    @property
    def done(self):
        return self.tracker.done()

    def __buffer__(self, flags):
        return self.buffer

    def __copy__(self):
        """Create a shallow copy of the message.

        This does not copy the contents of the Frame, just the pointer.
        This will increment the 0MQ ref count of the message, but not
        the ref count of the Python object. That is only done once when
        the Python is first turned into a 0MQ message.
        """
        return self.fast_copy()

    def fast_copy(self):
        """Fast shallow copy of the Frame.

        Does not copy underlying data.
        """
        new_msg = Frame()
        # This does not copy the contents, but just increases the ref-count
        # of the zmq_msg by one.
        C.zmq_msg_copy(new_msg.zmq_msg, self.zmq_msg)
        # Copy the ref to underlying data
        new_msg._data = self._data
        new_msg._buffer = self._buffer

        # Frame copies share the tracker and tracker_event
        new_msg.tracker_event = self.tracker_event
        new_msg.tracker = self.tracker

        return new_msg


Message = Frame

__all__ = ['Frame', 'Message']
pyzmq-26.4.0/zmq/backend/cffi/socket.py000066400000000000000000000274141477374370200177600ustar00rootroot00000000000000"""zmq Socket class"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import errno as errno_mod

import zmq
from zmq.constants import SocketOption, _OptType
from zmq.error import ZMQError, _check_rc

from ._cffi import ffi
from ._cffi import lib as C
from .message import Frame
from .utils import _retry_sys_call

nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length)


def new_uint64_pointer():
    return ffi.new('uint64_t*'), nsp(ffi.sizeof('uint64_t'))


def new_int64_pointer():
    return ffi.new('int64_t*'), nsp(ffi.sizeof('int64_t'))


def new_int_pointer():
    return ffi.new('int*'), nsp(ffi.sizeof('int'))


def new_binary_data(length):
    return ffi.new(f'char[{length:d}]'), nsp(ffi.sizeof('char') * length)


def value_uint64_pointer(val):
    return ffi.new('uint64_t*', val), ffi.sizeof('uint64_t')


def value_int64_pointer(val):
    return ffi.new('int64_t*', val), ffi.sizeof('int64_t')


def value_int_pointer(val):
    return ffi.new('int*', val), ffi.sizeof('int')


def value_binary_data(val, length):
    return ffi.new(f'char[{length + 1:d}]', val), ffi.sizeof('char') * length


ZMQ_FD_64BIT = ffi.sizeof('ZMQ_FD_T') == 8

IPC_PATH_MAX_LEN = C.get_ipc_path_max_len()


def new_pointer_from_opt(option, length=0):
    opt_type = getattr(option, "_opt_type", _OptType.int)

    if opt_type == _OptType.int64 or (ZMQ_FD_64BIT and opt_type == _OptType.fd):
        return new_int64_pointer()
    elif opt_type == _OptType.bytes:
        return new_binary_data(length)
    else:
        # default
        return new_int_pointer()


def value_from_opt_pointer(option, opt_pointer, length=0):
    try:
        option = SocketOption(option)
    except ValueError:
        # unrecognized option,
        # assume from the future,
        # let EINVAL raise
        opt_type = _OptType.int
    else:
        opt_type = option._opt_type

    if opt_type == _OptType.bytes:
        return ffi.buffer(opt_pointer, length)[:]
    else:
        return int(opt_pointer[0])


def initialize_opt_pointer(option, value, length=0):
    opt_type = getattr(option, "_opt_type", _OptType.int)
    if opt_type == _OptType.int64 or (ZMQ_FD_64BIT and opt_type == _OptType.fd):
        return value_int64_pointer(value)
    elif opt_type == _OptType.bytes:
        return value_binary_data(value, length)
    else:
        return value_int_pointer(value)


class Socket:
    context = None
    socket_type = None
    _zmq_socket = None
    _closed = None
    _ref = None
    _shadow = False
    copy_threshold = 0

    def __init__(self, context=None, socket_type=None, shadow=0, copy_threshold=None):
        if copy_threshold is None:
            copy_threshold = zmq.COPY_THRESHOLD
        self.copy_threshold = copy_threshold

        self.context = context
        if shadow:
            self._zmq_socket = ffi.cast("void *", shadow)
            self._shadow = True
        else:
            self._shadow = False
            self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type)
        if self._zmq_socket == ffi.NULL:
            raise ZMQError()
        self._closed = False

    @property
    def underlying(self):
        """The address of the underlying libzmq socket"""
        return int(ffi.cast('size_t', self._zmq_socket))

    def _check_closed_deep(self):
        """thorough check of whether the socket has been closed,
        even if by another entity (e.g. ctx.destroy).

        Only used by the `closed` property.

        returns True if closed, False otherwise
        """
        if self._closed:
            return True
        try:
            self.get(zmq.TYPE)
        except ZMQError as e:
            if e.errno == zmq.ENOTSOCK:
                self._closed = True
                return True
            elif e.errno == zmq.ETERM:
                pass
            else:
                raise
        return False

    @property
    def closed(self):
        return self._check_closed_deep()

    def close(self, linger=None):
        rc = 0
        if not self._closed and hasattr(self, '_zmq_socket'):
            if self._zmq_socket is not None:
                if linger is not None:
                    self.set(zmq.LINGER, linger)
                rc = C.zmq_close(self._zmq_socket)
            self._closed = True
        if rc < 0:
            _check_rc(rc)

    def bind(self, address):
        if isinstance(address, str):
            address_b = address.encode('utf8')
        else:
            address_b = address
        if isinstance(address, bytes):
            address = address_b.decode('utf8')
        rc = C.zmq_bind(self._zmq_socket, address_b)
        if rc < 0:
            if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG:
                path = address.split('://', 1)[-1]
                msg = (
                    f'ipc path "{path}" is longer than {IPC_PATH_MAX_LEN} '
                    'characters (sizeof(sockaddr_un.sun_path)).'
                )
                raise ZMQError(C.zmq_errno(), msg=msg)
            elif C.zmq_errno() == errno_mod.ENOENT:
                path = address.split('://', 1)[-1]
                msg = f'No such file or directory for ipc path "{path}".'
                raise ZMQError(C.zmq_errno(), msg=msg)
            else:
                _check_rc(rc)

    def unbind(self, address):
        if isinstance(address, str):
            address = address.encode('utf8')
        rc = C.zmq_unbind(self._zmq_socket, address)
        _check_rc(rc)

    def connect(self, address):
        if isinstance(address, str):
            address = address.encode('utf8')
        rc = C.zmq_connect(self._zmq_socket, address)
        _check_rc(rc)

    def disconnect(self, address):
        if isinstance(address, str):
            address = address.encode('utf8')
        rc = C.zmq_disconnect(self._zmq_socket, address)
        _check_rc(rc)

    def set(self, option, value):
        length = None
        if isinstance(value, str):
            raise TypeError("unicode not allowed, use bytes")

        try:
            option = SocketOption(option)
        except ValueError:
            # unrecognized option,
            # assume from the future,
            # let EINVAL raise
            opt_type = _OptType.int
        else:
            opt_type = option._opt_type

        if isinstance(value, bytes):
            if opt_type != _OptType.bytes:
                raise TypeError(f"not a bytes sockopt: {option}")
            length = len(value)

        c_value_pointer, c_sizet = initialize_opt_pointer(option, value, length)

        _retry_sys_call(
            C.zmq_setsockopt,
            self._zmq_socket,
            option,
            ffi.cast('void*', c_value_pointer),
            c_sizet,
        )

    def get(self, option):
        try:
            option = SocketOption(option)
        except ValueError:
            # unrecognized option,
            # assume from the future,
            # let EINVAL raise
            opt_type = _OptType.int
        else:
            opt_type = option._opt_type

        c_value_pointer, c_sizet_pointer = new_pointer_from_opt(option, length=255)

        _retry_sys_call(
            C.zmq_getsockopt, self._zmq_socket, option, c_value_pointer, c_sizet_pointer
        )

        sz = c_sizet_pointer[0]
        v = value_from_opt_pointer(option, c_value_pointer, sz)
        if (
            option != zmq.SocketOption.ROUTING_ID
            and opt_type == _OptType.bytes
            and v.endswith(b'\0')
        ):
            v = v[:-1]
        return v

    def _send_copy(self, buf, flags):
        """Send a copy of a bufferable"""
        zmq_msg = ffi.new('zmq_msg_t*')
        if not isinstance(buf, bytes):
            # cast any bufferable data to bytes via memoryview
            buf = memoryview(buf).tobytes()

        c_message = ffi.new('char[]', buf)
        rc = C.zmq_msg_init_size(zmq_msg, len(buf))
        _check_rc(rc)
        C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(buf))
        _retry_sys_call(C.zmq_msg_send, zmq_msg, self._zmq_socket, flags)
        rc2 = C.zmq_msg_close(zmq_msg)
        _check_rc(rc2)

    def _send_frame(self, frame, flags):
        """Send a Frame on this socket in a non-copy manner."""
        # Always copy the Frame so the original message isn't garbage collected.
        # This doesn't do a real copy, just a reference.
        frame_copy = frame.fast_copy()
        zmq_msg = frame_copy.zmq_msg
        _retry_sys_call(C.zmq_msg_send, zmq_msg, self._zmq_socket, flags)
        tracker = frame_copy.tracker
        frame_copy.close()
        return tracker

    def send(self, data, flags=0, copy=False, track=False):
        if isinstance(data, str):
            raise TypeError("Message must be in bytes, not a unicode object")

        if copy and not isinstance(data, Frame):
            return self._send_copy(data, flags)
        else:
            close_frame = False
            if isinstance(data, Frame):
                if track and not data.tracker:
                    raise ValueError('Not a tracked message')
                frame = data
            else:
                if self.copy_threshold:
                    buf = memoryview(data)
                    # always copy messages smaller than copy_threshold
                    if buf.nbytes < self.copy_threshold:
                        self._send_copy(buf, flags)
                        return zmq._FINISHED_TRACKER
                frame = Frame(data, track=track, copy_threshold=self.copy_threshold)
                close_frame = True

            tracker = self._send_frame(frame, flags)
            if close_frame:
                frame.close()
            return tracker

    def recv(self, flags=0, copy=True, track=False):
        if copy:
            zmq_msg = ffi.new('zmq_msg_t*')
            C.zmq_msg_init(zmq_msg)
        else:
            frame = zmq.Frame(track=track)
            zmq_msg = frame.zmq_msg

        try:
            _retry_sys_call(C.zmq_msg_recv, zmq_msg, self._zmq_socket, flags)
        except Exception:
            if copy:
                C.zmq_msg_close(zmq_msg)
            raise

        if not copy:
            return frame

        _buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg))
        _bytes = _buffer[:]
        rc = C.zmq_msg_close(zmq_msg)
        _check_rc(rc)
        return _bytes

    def recv_into(self, buffer, /, *, nbytes: int = 0, flags: int = 0) -> int:
        view = memoryview(buffer)
        if not view.contiguous:
            raise BufferError("Can only recv_into contiguous buffers")
        if view.readonly:
            raise BufferError("Cannot recv_into readonly buffer")
        if nbytes < 0:
            raise ValueError(f"{nbytes=} must be non-negative")
        view_bytes = view.nbytes
        if nbytes == 0:
            nbytes = view_bytes
        elif nbytes > view_bytes:
            raise ValueError(f"{nbytes=} too big for memoryview of {view_bytes}B")
        c_buf = ffi.from_buffer(view)
        rc: int = _retry_sys_call(C.zmq_recv, self._zmq_socket, c_buf, nbytes, flags)
        _check_rc(rc)
        return rc

    def monitor(self, addr, events=-1):
        """s.monitor(addr, flags)

        Start publishing socket events on inproc.
        See libzmq docs for zmq_monitor for details.

        Note: requires libzmq >= 3.2

        Parameters
        ----------
        addr : str
            The inproc url used for monitoring. Passing None as
            the addr will cause an existing socket monitor to be
            deregistered.
        events : int [default: zmq.EVENT_ALL]
            The zmq event bitmask for which events will be sent to the monitor.
        """
        if events < 0:
            events = zmq.EVENT_ALL
        if addr is None:
            addr = ffi.NULL
        if isinstance(addr, str):
            addr = addr.encode('utf8')
        C.zmq_socket_monitor(self._zmq_socket, addr, events)


__all__ = ['Socket', 'IPC_PATH_MAX_LEN']
pyzmq-26.4.0/zmq/backend/cffi/utils.py000066400000000000000000000040101477374370200176130ustar00rootroot00000000000000"""miscellaneous zmq_utils wrapping"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from zmq.error import InterruptedSystemCall, _check_rc, _check_version

from ._cffi import ffi
from ._cffi import lib as C


def has(capability):
    """Check for zmq capability by name (e.g. 'ipc', 'curve')

    .. versionadded:: libzmq-4.1
    .. versionadded:: 14.1
    """
    _check_version((4, 1), 'zmq.has')
    if isinstance(capability, str):
        capability = capability.encode('utf8')
    return bool(C.zmq_has(capability))


def curve_keypair():
    """generate a Z85 key pair for use with zmq.CURVE security

    Requires libzmq (≥ 4.0) to have been built with CURVE support.

    Returns
    -------
    (public, secret) : two bytestrings
        The public and private key pair as 40 byte z85-encoded bytestrings.
    """
    public = ffi.new('char[64]')
    private = ffi.new('char[64]')
    rc = C.zmq_curve_keypair(public, private)
    _check_rc(rc)
    return ffi.buffer(public)[:40], ffi.buffer(private)[:40]


def curve_public(private):
    """Compute the public key corresponding to a private key for use
    with zmq.CURVE security

    Requires libzmq (≥ 4.2) to have been built with CURVE support.

    Parameters
    ----------
    private
        The private key as a 40 byte z85-encoded bytestring
    Returns
    -------
    bytestring
        The public key as a 40 byte z85-encoded bytestring.
    """
    if isinstance(private, str):
        private = private.encode('utf8')
    _check_version((4, 2), "curve_public")
    public = ffi.new('char[64]')
    rc = C.zmq_curve_public(public, private)
    _check_rc(rc)
    return ffi.buffer(public)[:40]


def _retry_sys_call(f, *args, **kwargs):
    """make a call, retrying if interrupted with EINTR"""
    while True:
        rc = f(*args)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break
    return rc


__all__ = ['has', 'curve_keypair', 'curve_public']
pyzmq-26.4.0/zmq/backend/cython/000077500000000000000000000000001477374370200165035ustar00rootroot00000000000000pyzmq-26.4.0/zmq/backend/cython/__init__.pxd000066400000000000000000000000741477374370200207600ustar00rootroot00000000000000from zmq.backend.cython._zmq cimport Context, Frame, Socket
pyzmq-26.4.0/zmq/backend/cython/__init__.py000066400000000000000000000005021477374370200206110ustar00rootroot00000000000000"""Python bindings for core 0MQ objects."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from . import _zmq

# mq not in __all__
from ._zmq import *  # noqa
from ._zmq import monitored_queue  # noqa

Message = _zmq.Frame

__all__ = ["Message"]
__all__.extend(_zmq.__all__)
pyzmq-26.4.0/zmq/backend/cython/_externs.pxd000066400000000000000000000005351477374370200210520ustar00rootroot00000000000000cdef extern from "mutex.h" nogil:
    ctypedef struct mutex_t:
        pass
    cdef mutex_t* mutex_allocate()
    cdef void mutex_dallocate(mutex_t*)
    cdef int mutex_lock(mutex_t*)
    cdef int mutex_unlock(mutex_t*)

cdef extern from "getpid_compat.h":
    cdef int getpid()

cdef extern from "ipcmaxlen.h":
    cdef int get_ipc_path_max_len()
pyzmq-26.4.0/zmq/backend/cython/_zmq.pxd000066400000000000000000000043521477374370200201720ustar00rootroot00000000000000# cython: language_level = 3str
"""zmq Cython backend augmented declarations"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from zmq.backend.cython.libzmq cimport zmq_msg_t

cdef class Context:

    cdef object __weakref__  # enable weakref
    cdef void *handle  # The C handle for the underlying zmq object.
    cdef bint _shadow  # whether the Context is a shadow wrapper of another
    cdef int _pid  # the pid of the process which created me (for fork safety)

    cdef public bint closed  # bool property for a closed context.
    cdef inline int _term(self)

cdef class MessageTracker(object):
    cdef set events  # Message Event objects to track.
    cdef set peers  # Other Message or MessageTracker objects.

cdef class Frame:

    cdef zmq_msg_t zmq_msg
    cdef object _data      # The actual message data as a Python object.
    cdef object _buffer    # A Python memoryview of the message contents
    cdef object _bytes     # A bytes copy of the message.
    cdef bint _failed_init # flag to hold failed init
    cdef public object tracker_event  # Event for use with zmq_free_fn.
    cdef public object tracker        # MessageTracker object.
    cdef public bint more             # whether RCVMORE was set

    cdef Frame fast_copy(self) # Create shallow copy of Message object.

cdef class Socket:

    cdef object __weakref__     # enable weakref
    cdef void *handle           # The C handle for the underlying zmq object.
    cdef bint _shadow           # whether the Socket is a shadow wrapper of another
    # Hold on to a reference to the context to make sure it is not garbage
    # collected until the socket it done with it.
    cdef public Context context # The zmq Context object that owns this.
    cdef public bint _closed    # bool property for a closed socket.
    cdef public int copy_threshold # threshold below which pyzmq will always copy messages
    cdef int _pid               # the pid of the process which created me (for fork safety)

    # cpdef methods for direct-cython access:
    cpdef object send(self, data, int flags=*, bint copy=*, bint track=*)
    cpdef object recv(self, int flags=*, bint copy=*, bint track=*)
    cpdef int recv_into(self, buffer, int nbytes=*, int flags=*)
pyzmq-26.4.0/zmq/backend/cython/_zmq.py000066400000000000000000001642021477374370200200300ustar00rootroot00000000000000# cython: language_level = 3str
# cython: freethreading_compatible = True
"""Cython backend for pyzmq"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

try:
    import cython

    if not cython.compiled:
        raise ImportError()
except ImportError:
    from pathlib import Path

    zmq_root = Path(__file__).parents[3]
    msg = f"""
    Attempting to import zmq Cython backend, which has not been compiled.

    This probably means you are importing zmq from its source tree.
    if this is what you want, make sure to do an in-place build first:

        pip install -e '{zmq_root}'

    If it is not, then '{zmq_root}' is probably on your sys.path,
    when it shouldn't be. Is that your current working directory?

    If neither of those is true and this file is actually installed,
    something seems to have gone wrong with the install!
    Please report at https://github.com/zeromq/pyzmq/issues
    """
    raise ImportError(msg)

import warnings
from threading import Event
from time import monotonic
from weakref import ref

import cython as C
from cython import (
    NULL,
    Py_ssize_t,
    address,
    bint,
    cast,
    cclass,
    cfunc,
    char,
    declare,
    inline,
    nogil,
    p_char,
    p_void,
    pointer,
    size_t,
    sizeof,
)
from cython.cimports.cpython.buffer import (
    Py_buffer,
    PyBUF_ANY_CONTIGUOUS,
    PyBUF_WRITABLE,
    PyBuffer_Release,
    PyObject_GetBuffer,
)
from cython.cimports.cpython.bytes import (
    PyBytes_AsString,
    PyBytes_FromStringAndSize,
    PyBytes_Size,
)
from cython.cimports.cpython.exc import PyErr_CheckSignals
from cython.cimports.libc.errno import EAGAIN, EINTR, ENAMETOOLONG, ENOENT, ENOTSOCK
from cython.cimports.libc.stdint import uint32_t
from cython.cimports.libc.stdio import fprintf
from cython.cimports.libc.stdio import stderr as cstderr
from cython.cimports.libc.stdlib import free, malloc
from cython.cimports.libc.string import memcpy
from cython.cimports.zmq.backend.cython._externs import (
    get_ipc_path_max_len,
    getpid,
    mutex_allocate,
    mutex_lock,
    mutex_t,
    mutex_unlock,
)
from cython.cimports.zmq.backend.cython.libzmq import (
    ZMQ_ENOTSOCK,
    ZMQ_ETERM,
    ZMQ_EVENT_ALL,
    ZMQ_IDENTITY,
    ZMQ_IO_THREADS,
    ZMQ_LINGER,
    ZMQ_POLLIN,
    ZMQ_RCVMORE,
    ZMQ_ROUTER,
    ZMQ_SNDMORE,
    ZMQ_TYPE,
    _zmq_version,
    fd_t,
    int64_t,
    zmq_bind,
    zmq_close,
    zmq_connect,
    zmq_ctx_destroy,
    zmq_ctx_get,
    zmq_ctx_new,
    zmq_ctx_set,
    zmq_curve_keypair,
    zmq_curve_public,
    zmq_disconnect,
    zmq_free_fn,
    zmq_getsockopt,
    zmq_has,
    zmq_join,
    zmq_leave,
    zmq_msg_close,
    zmq_msg_copy,
    zmq_msg_data,
    zmq_msg_get,
    zmq_msg_gets,
    zmq_msg_group,
    zmq_msg_init,
    zmq_msg_init_data,
    zmq_msg_init_size,
    zmq_msg_recv,
    zmq_msg_routing_id,
    zmq_msg_send,
    zmq_msg_set,
    zmq_msg_set_group,
    zmq_msg_set_routing_id,
    zmq_msg_size,
    zmq_msg_t,
    zmq_pollitem_t,
    zmq_proxy,
    zmq_proxy_steerable,
    zmq_recv,
    zmq_setsockopt,
    zmq_socket,
    zmq_socket_monitor,
    zmq_strerror,
    zmq_unbind,
)
from cython.cimports.zmq.backend.cython.libzmq import zmq_errno as _zmq_errno
from cython.cimports.zmq.backend.cython.libzmq import zmq_poll as zmq_poll_c

import zmq
from zmq.constants import SocketOption, _OptType
from zmq.error import (
    Again,
    ContextTerminated,
    InterruptedSystemCall,
    ZMQError,
    _check_version,
)

IPC_PATH_MAX_LEN: int = get_ipc_path_max_len()


@cfunc
@inline
@C.exceptval(-1)
def _check_rc(rc: C.int, error_without_errno: bint = False) -> C.int:
    """internal utility for checking zmq return condition

    and raising the appropriate Exception class
    """
    errno: C.int = _zmq_errno()
    PyErr_CheckSignals()
    if errno == 0 and not error_without_errno:
        return 0
    if rc == -1:  # if rc < -1, it's a bug in libzmq. Should we warn?
        if errno == EINTR:
            raise InterruptedSystemCall(errno)
        elif errno == EAGAIN:
            raise Again(errno)
        elif errno == ZMQ_ETERM:
            raise ContextTerminated(errno)
        else:
            raise ZMQError(errno)
    return 0


# message Frame class

_zhint = C.struct(
    sock=p_void,
    mutex=pointer(mutex_t),
    id=size_t,
)


@cfunc
@nogil
def free_python_msg(data: p_void, vhint: p_void) -> C.int:
    """A pure-C function for DECREF'ing Python-owned message data.

    Sends a message on a PUSH socket

    The hint is a `zhint` struct with two values:

    sock (void *): pointer to the Garbage Collector's PUSH socket
    id (size_t): the id to be used to construct a zmq_msg_t that should be sent on a PUSH socket,
       signaling the Garbage Collector to remove its reference to the object.

    When the Garbage Collector's PULL socket receives the message,
    it deletes its reference to the object,
    allowing Python to free the memory.
    """
    msg = declare(zmq_msg_t)
    msg_ptr: pointer(zmq_msg_t) = address(msg)
    hint: pointer(_zhint) = cast(pointer(_zhint), vhint)
    rc: C.int

    if hint != NULL:
        zmq_msg_init_size(msg_ptr, sizeof(size_t))
        memcpy(zmq_msg_data(msg_ptr), address(hint.id), sizeof(size_t))
        rc = mutex_lock(hint.mutex)
        if rc != 0:
            fprintf(cstderr, "pyzmq-gc mutex lock failed rc=%d\n", rc)
        rc = zmq_msg_send(msg_ptr, hint.sock, 0)
        if rc < 0:
            # gc socket could have been closed, e.g. during process teardown.
            # If so, ignore the failure because there's nothing to do.
            if _zmq_errno() != ZMQ_ENOTSOCK:
                fprintf(
                    cstderr, "pyzmq-gc send failed: %s\n", zmq_strerror(_zmq_errno())
                )
        rc = mutex_unlock(hint.mutex)
        if rc != 0:
            fprintf(cstderr, "pyzmq-gc mutex unlock failed rc=%d\n", rc)

        zmq_msg_close(msg_ptr)
        free(hint)
        return 0


@cfunc
@inline
def _copy_zmq_msg_bytes(zmq_msg: pointer(zmq_msg_t)) -> bytes:
    """Copy the data from a zmq_msg_t"""
    data_c: p_char = NULL
    data_len_c: Py_ssize_t
    data_c = cast(p_char, zmq_msg_data(zmq_msg))
    data_len_c = zmq_msg_size(zmq_msg)
    return PyBytes_FromStringAndSize(data_c, data_len_c)


@cfunc
@inline
def _asbuffer(obj, data_c: pointer(p_void), writable: bint = False) -> size_t:
    """Get a C buffer from a memoryview"""
    pybuf = declare(Py_buffer)
    flags: C.int = PyBUF_ANY_CONTIGUOUS
    if writable:
        flags |= PyBUF_WRITABLE
    rc: C.int = PyObject_GetBuffer(obj, address(pybuf), flags)
    if rc < 0:
        raise ValueError("Couldn't create buffer")
    data_c[0] = pybuf.buf
    data_size: size_t = pybuf.len
    PyBuffer_Release(address(pybuf))
    return data_size


_gc = None


@cclass
class Frame:
    def __init__(
        self, data=None, track=False, copy=None, copy_threshold=None, **kwargs
    ):
        rc: C.int
        data_c: p_char = NULL
        data_len_c: Py_ssize_t = 0
        hint: pointer(_zhint)
        if copy_threshold is None:
            copy_threshold = zmq.COPY_THRESHOLD

        c_copy_threshold: C.size_t = 0
        if copy_threshold is not None:
            c_copy_threshold = copy_threshold

        zmq_msg_ptr: pointer(zmq_msg_t) = address(self.zmq_msg)
        # init more as False
        self.more = False

        # Save the data object in case the user wants the the data as a str.
        self._data = data
        self._failed_init = True  # bool switch for dealloc
        self._buffer = None  # buffer view of data
        self._bytes = None  # bytes copy of data

        self.tracker_event = None
        self.tracker = None
        # self.tracker should start finished
        # except in the case where we are sharing memory with libzmq
        if track:
            self.tracker = zmq._FINISHED_TRACKER

        if isinstance(data, str):
            raise TypeError("Str objects not allowed. Only: bytes, buffer interfaces.")

        if data is None:
            rc = zmq_msg_init(zmq_msg_ptr)
            _check_rc(rc)
            self._failed_init = False
            return

        data_len_c = _asbuffer(data, cast(pointer(p_void), address(data_c)))

        # copy unspecified, apply copy_threshold
        c_copy: bint = True
        if copy is None:
            if c_copy_threshold and data_len_c < c_copy_threshold:
                c_copy = True
            else:
                c_copy = False
        else:
            c_copy = copy

        if c_copy:
            # copy message data instead of sharing memory
            rc = zmq_msg_init_size(zmq_msg_ptr, data_len_c)
            _check_rc(rc)
            memcpy(zmq_msg_data(zmq_msg_ptr), data_c, data_len_c)
            self._failed_init = False
            return

        # Getting here means that we are doing a true zero-copy Frame,
        # where libzmq and Python are sharing memory.
        # Hook up garbage collection with MessageTracker and zmq_free_fn

        # Event and MessageTracker for monitoring when zmq is done with data:
        if track:
            evt = Event()
            self.tracker_event = evt
            self.tracker = zmq.MessageTracker(evt)
        # create the hint for zmq_free_fn
        # two pointers: the gc context and a message to be sent to the gc PULL socket
        # allows libzmq to signal to Python when it is done with Python-owned memory.
        global _gc
        if _gc is None:
            from zmq.utils.garbage import gc as _gc

        hint: pointer(_zhint) = cast(pointer(_zhint), malloc(sizeof(_zhint)))
        hint.id = _gc.store(data, self.tracker_event)
        if not _gc._push_mutex:
            hint.mutex = mutex_allocate()
            _gc._push_mutex = cast(size_t, hint.mutex)
        else:
            hint.mutex = cast(pointer(mutex_t), cast(size_t, _gc._push_mutex))
        hint.sock = cast(p_void, cast(size_t, _gc._push_socket.underlying))

        rc = zmq_msg_init_data(
            zmq_msg_ptr,
            cast(p_void, data_c),
            data_len_c,
            cast(pointer(zmq_free_fn), free_python_msg),
            cast(p_void, hint),
        )
        if rc != 0:
            free(hint)
            _check_rc(rc)
        self._failed_init = False

    def __dealloc__(self):
        if self._failed_init:
            return
        # decrease the 0MQ ref-count of zmq_msg
        with nogil:
            rc: C.int = zmq_msg_close(address(self.zmq_msg))
        _check_rc(rc)

    def __copy__(self):
        return self.fast_copy()

    def fast_copy(self) -> Frame:
        new_msg: Frame = Frame()
        # This does not copy the contents, but just increases the ref-count
        # of the zmq_msg by one.
        zmq_msg_copy(address(new_msg.zmq_msg), address(self.zmq_msg))
        # Copy the ref to data so the copy won't create a copy when str is
        # called.
        if self._data is not None:
            new_msg._data = self._data
        if self._buffer is not None:
            new_msg._buffer = self._buffer
        if self._bytes is not None:
            new_msg._bytes = self._bytes

        # Frame copies share the tracker and tracker_event
        new_msg.tracker_event = self.tracker_event
        new_msg.tracker = self.tracker

        return new_msg

    # buffer interface code adapted from petsc4py by Lisandro Dalcin, a BSD project

    def __getbuffer__(self, buffer: pointer(Py_buffer), flags: C.int):  # noqa: F821
        # new-style (memoryview) buffer interface
        buffer.buf = zmq_msg_data(address(self.zmq_msg))
        buffer.len = zmq_msg_size(address(self.zmq_msg))

        buffer.obj = self
        buffer.readonly = 0
        buffer.format = "B"
        buffer.ndim = 1
        buffer.shape = address(buffer.len)
        buffer.strides = NULL
        buffer.suboffsets = NULL
        buffer.itemsize = 1
        buffer.internal = NULL

    def __len__(self) -> size_t:
        """Return the length of the message in bytes."""
        sz: size_t = zmq_msg_size(address(self.zmq_msg))
        return sz

    @property
    def buffer(self):
        """A memoryview of the message contents."""
        _buffer = self._buffer and self._buffer()
        if _buffer is not None:
            return _buffer
        _buffer = memoryview(self)
        self._buffer = ref(_buffer)
        return _buffer

    @property
    def bytes(self):
        """The message content as a Python bytes object.

        The first time this property is accessed, a copy of the message
        contents is made. From then on that same copy of the message is
        returned.
        """
        if self._bytes is None:
            self._bytes = _copy_zmq_msg_bytes(address(self.zmq_msg))
        return self._bytes

    def get(self, option):
        """
        Get a Frame option or property.

        See the 0MQ API documentation for zmq_msg_get and zmq_msg_gets
        for details on specific options.

        .. versionadded:: libzmq-3.2
        .. versionadded:: 13.0

        .. versionchanged:: 14.3
            add support for zmq_msg_gets (requires libzmq-4.1)
            All message properties are strings.

        .. versionchanged:: 17.0
            Added support for `routing_id` and `group`.
            Only available if draft API is enabled
            with libzmq >= 4.2.
        """
        rc: C.int = 0
        property_c: p_char = NULL

        # zmq_msg_get
        if isinstance(option, int):
            rc = zmq_msg_get(address(self.zmq_msg), option)
            _check_rc(rc)
            return rc

        if option == 'routing_id':
            routing_id: uint32_t = zmq_msg_routing_id(address(self.zmq_msg))
            if routing_id == 0:
                _check_rc(-1)
            return routing_id
        elif option == 'group':
            buf = zmq_msg_group(address(self.zmq_msg))
            if buf == NULL:
                _check_rc(-1)
            return buf.decode('utf8')

        # zmq_msg_gets
        _check_version((4, 1), "get string properties")
        if isinstance(option, str):
            option = option.encode('utf8')

        if not isinstance(option, bytes):
            raise TypeError(f"expected str, got: {option!r}")

        property_c = option

        result: p_char = cast(p_char, zmq_msg_gets(address(self.zmq_msg), property_c))
        if result == NULL:
            _check_rc(-1)
        return result.decode('utf8')

    def set(self, option, value):
        """Set a Frame option.

        See the 0MQ API documentation for zmq_msg_set
        for details on specific options.

        .. versionadded:: libzmq-3.2
        .. versionadded:: 13.0
        .. versionchanged:: 17.0
            Added support for `routing_id` and `group`.
            Only available if draft API is enabled
            with libzmq >= 4.2.
        """
        rc: C.int

        if option == 'routing_id':
            routing_id: uint32_t = value
            rc = zmq_msg_set_routing_id(address(self.zmq_msg), routing_id)
            _check_rc(rc)
            return
        elif option == 'group':
            if isinstance(value, str):
                value = value.encode('utf8')
            rc = zmq_msg_set_group(address(self.zmq_msg), value)
            _check_rc(rc)
            return

        rc = zmq_msg_set(address(self.zmq_msg), option, value)
        _check_rc(rc)


@cclass
class Context:
    """
    Manage the lifecycle of a 0MQ context.

    Parameters
    ----------
    io_threads : int
        The number of IO threads.
    """

    def __init__(self, io_threads: C.int = 1, shadow: size_t = 0):
        self.handle = NULL
        self._pid = 0
        self._shadow = False

        if shadow:
            self.handle = cast(p_void, shadow)
            self._shadow = True
        else:
            self._shadow = False
            self.handle = zmq_ctx_new()

        if self.handle == NULL:
            raise ZMQError()

        rc: C.int = 0
        if not self._shadow:
            rc = zmq_ctx_set(self.handle, ZMQ_IO_THREADS, io_threads)
            _check_rc(rc)

        self.closed = False
        self._pid = getpid()

    @property
    def underlying(self):
        """The address of the underlying libzmq context"""
        return cast(size_t, self.handle)

    @cfunc
    @inline
    def _term(self) -> C.int:
        rc: C.int = 0
        if self.handle != NULL and not self.closed and getpid() == self._pid:
            with nogil:
                rc = zmq_ctx_destroy(self.handle)
        self.handle = NULL
        return rc

    def term(self):
        """
        Close or terminate the context.

        This can be called to close the context by hand. If this is not called,
        the context will automatically be closed when it is garbage collected.
        """
        rc: C.int = self._term()
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            # ignore interrupted term
            # see PEP 475 notes about close & EINTR for why
            pass

        self.closed = True

    def set(self, option: C.int, optval):
        """
        Set a context option.

        See the 0MQ API documentation for zmq_ctx_set
        for details on specific options.

        .. versionadded:: libzmq-3.2
        .. versionadded:: 13.0

        Parameters
        ----------
        option : int
            The option to set.  Available values will depend on your
            version of libzmq.  Examples include::

                zmq.IO_THREADS, zmq.MAX_SOCKETS

        optval : int
            The value of the option to set.
        """
        optval_int_c: C.int
        rc: C.int

        if self.closed:
            raise RuntimeError("Context has been destroyed")

        if not isinstance(optval, int):
            raise TypeError(f'expected int, got: {optval!r}')
        optval_int_c = optval
        rc = zmq_ctx_set(self.handle, option, optval_int_c)
        _check_rc(rc)

    def get(self, option: C.int):
        """
        Get the value of a context option.

        See the 0MQ API documentation for zmq_ctx_get
        for details on specific options.

        .. versionadded:: libzmq-3.2
        .. versionadded:: 13.0

        Parameters
        ----------
        option : int
            The option to get.  Available values will depend on your
            version of libzmq.  Examples include::

                zmq.IO_THREADS, zmq.MAX_SOCKETS

        Returns
        -------
        optval : int
            The value of the option as an integer.
        """
        rc: C.int

        if self.closed:
            raise RuntimeError("Context has been destroyed")

        rc = zmq_ctx_get(self.handle, option)
        _check_rc(rc, error_without_errno=False)
        return rc


@cfunc
@inline
def _c_addr(addr) -> p_char:
    if isinstance(addr, str):
        addr = addr.encode('utf-8')
    try:
        c_addr: p_char = addr
    except TypeError:
        raise TypeError(f"Expected addr to be str, got addr={addr!r}")
    return c_addr


@cclass
class Socket:
    """
    A 0MQ socket.

    These objects will generally be constructed via the socket() method of a Context object.

    Note: 0MQ Sockets are *not* threadsafe. **DO NOT** share them across threads.

    Parameters
    ----------
    context : Context
        The 0MQ Context this Socket belongs to.
    socket_type : int
        The socket type, which can be any of the 0MQ socket types:
        REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, XPUB, XSUB.

    See Also
    --------
    .Context.socket : method for creating a socket bound to a Context.
    """

    def __init__(
        self,
        context=None,
        socket_type: C.int = -1,
        shadow: size_t = 0,
        copy_threshold=None,
    ):
        # pre-init
        self.handle = NULL
        self._pid = 0
        self._shadow = False
        self.context = None

        if copy_threshold is None:
            copy_threshold = zmq.COPY_THRESHOLD
        self.copy_threshold = copy_threshold

        self.handle = NULL
        self.context = context
        if shadow:
            self._shadow = True
            self.handle = cast(p_void, shadow)
        else:
            if context is None:
                raise TypeError("context must be specified")
            if socket_type < 0:
                raise TypeError("socket_type must be specified")
            self._shadow = False
            self.handle = zmq_socket(self.context.handle, socket_type)
        if self.handle == NULL:
            raise ZMQError()
        self._closed = False
        self._pid = getpid()

    @property
    def underlying(self):
        """The address of the underlying libzmq socket"""
        return cast(size_t, self.handle)

    @property
    def closed(self):
        """Whether the socket is closed"""
        return _check_closed_deep(self)

    def close(self, linger: int | None = None):
        """
        Close the socket.

        If linger is specified, LINGER sockopt will be set prior to closing.

        This can be called to close the socket by hand. If this is not
        called, the socket will automatically be closed when it is
        garbage collected.
        """
        rc: C.int = 0
        linger_c: C.int
        setlinger: bint = False

        if linger is not None:
            linger_c = linger
            setlinger = True

        if self.handle != NULL and not self._closed and getpid() == self._pid:
            if setlinger:
                zmq_setsockopt(self.handle, ZMQ_LINGER, address(linger_c), sizeof(int))
            rc = zmq_close(self.handle)
            if rc < 0 and _zmq_errno() != ENOTSOCK:
                # ignore ENOTSOCK (closed by Context)
                _check_rc(rc)
            self._closed = True
            self.handle = NULL

    def set(self, option: C.int, optval):
        """
        Set socket options.

        See the 0MQ API documentation for details on specific options.

        Parameters
        ----------
        option : int
            The option to set.  Available values will depend on your
            version of libzmq.  Examples include::

                zmq.SUBSCRIBE, UNSUBSCRIBE, IDENTITY, HWM, LINGER, FD

        optval : int or bytes
            The value of the option to set.

        Notes
        -----
        .. warning::

            All options other than zmq.SUBSCRIBE, zmq.UNSUBSCRIBE and
            zmq.LINGER only take effect for subsequent socket bind/connects.
        """
        optval_int64_c: int64_t
        optval_int_c: C.int
        optval_c: p_char
        sz: Py_ssize_t

        _check_closed(self)
        if isinstance(optval, str):
            raise TypeError("unicode not allowed, use setsockopt_string")

        try:
            sopt = SocketOption(option)
        except ValueError:
            # unrecognized option,
            # assume from the future,
            # let EINVAL raise
            opt_type = _OptType.int
        else:
            opt_type = sopt._opt_type

        if opt_type == _OptType.bytes:
            if not isinstance(optval, bytes):
                raise TypeError(f'expected bytes, got: {optval!r}')
            optval_c = PyBytes_AsString(optval)
            sz = PyBytes_Size(optval)
            _setsockopt(self.handle, option, optval_c, sz)
        elif opt_type == _OptType.int64:
            if not isinstance(optval, int):
                raise TypeError(f'expected int, got: {optval!r}')
            optval_int64_c = optval
            _setsockopt(self.handle, option, address(optval_int64_c), sizeof(int64_t))
        else:
            # default is to assume int, which is what most new sockopts will be
            # this lets pyzmq work with newer libzmq which may add constants
            # pyzmq has not yet added, rather than artificially raising. Invalid
            # sockopts will still raise just the same, but it will be libzmq doing
            # the raising.
            if not isinstance(optval, int):
                raise TypeError(f'expected int, got: {optval!r}')
            optval_int_c = optval
            _setsockopt(self.handle, option, address(optval_int_c), sizeof(int))

    def get(self, option: C.int):
        """
        Get the value of a socket option.

        See the 0MQ API documentation for details on specific options.

        Parameters
        ----------
        option : int
            The option to get.  Available values will depend on your
            version of libzmq.  Examples include::

                zmq.IDENTITY, HWM, LINGER, FD, EVENTS

        Returns
        -------
        optval : int or bytes
            The value of the option as a bytestring or int.
        """
        optval_int64_c = declare(int64_t)
        optval_int_c = declare(C.int)
        optval_fd_c = declare(fd_t)
        identity_str_c = declare(char[255])
        sz: size_t

        _check_closed(self)

        try:
            sopt = SocketOption(option)
        except ValueError:
            # unrecognized option,
            # assume from the future,
            # let EINVAL raise
            opt_type = _OptType.int
        else:
            opt_type = sopt._opt_type

        if opt_type == _OptType.bytes:
            sz = 255
            _getsockopt(self.handle, option, cast(p_void, identity_str_c), address(sz))
            # strip null-terminated strings *except* identity
            if (
                option != ZMQ_IDENTITY
                and sz > 0
                and (cast(p_char, identity_str_c))[sz - 1] == b'\0'
            ):
                sz -= 1
            result = PyBytes_FromStringAndSize(cast(p_char, identity_str_c), sz)
        elif opt_type == _OptType.int64:
            sz = sizeof(int64_t)
            _getsockopt(
                self.handle, option, cast(p_void, address(optval_int64_c)), address(sz)
            )
            result = optval_int64_c
        elif opt_type == _OptType.fd:
            sz = sizeof(fd_t)
            _getsockopt(
                self.handle, option, cast(p_void, address(optval_fd_c)), address(sz)
            )
            result = optval_fd_c
        else:
            # default is to assume int, which is what most new sockopts will be
            # this lets pyzmq work with newer libzmq which may add constants
            # pyzmq has not yet added, rather than artificially raising. Invalid
            # sockopts will still raise just the same, but it will be libzmq doing
            # the raising.
            sz = sizeof(int)
            _getsockopt(
                self.handle, option, cast(p_void, address(optval_int_c)), address(sz)
            )
            result = optval_int_c

        return result

    def bind(self, addr: str | bytes):
        """
        Bind the socket to an address.

        This causes the socket to listen on a network port. Sockets on the
        other side of this connection will use ``Socket.connect(addr)`` to
        connect to this socket.

        Parameters
        ----------
        addr : str
            The address string. This has the form 'protocol://interface:port',
            for example 'tcp://127.0.0.1:5555'. Protocols supported include
            tcp, udp, pgm, epgm, inproc and ipc. If the address is unicode, it is
            encoded to utf-8 first.
        """
        c_addr: p_char = _c_addr(addr)
        _check_closed(self)
        rc: C.int = zmq_bind(self.handle, c_addr)
        if rc != 0:
            _errno: C.int = _zmq_errno()
            _ipc_max: C.int = get_ipc_path_max_len()
            if _ipc_max and _errno == ENAMETOOLONG:
                path = addr.split('://', 1)[-1]
                msg = (
                    f'ipc path "{path}" is longer than {_ipc_max} '
                    'characters (sizeof(sockaddr_un.sun_path)). '
                    'zmq.IPC_PATH_MAX_LEN constant can be used '
                    'to check addr length (if it is defined).'
                )
                raise ZMQError(msg=msg)
            elif _errno == ENOENT:
                path = addr.split('://', 1)[-1]
                msg = f'No such file or directory for ipc path "{path}".'
                raise ZMQError(msg=msg)
        while True:
            try:
                _check_rc(rc)
            except InterruptedSystemCall:
                rc = zmq_bind(self.handle, c_addr)
                continue
            else:
                break

    def connect(self, addr: str | bytes) -> None:
        """
        Connect to a remote 0MQ socket.

        Parameters
        ----------
        addr : str
            The address string. This has the form 'protocol://interface:port',
            for example 'tcp://127.0.0.1:5555'. Protocols supported are
            tcp, udp, pgm, inproc and ipc. If the address is unicode, it is
            encoded to utf-8 first.
        """
        rc: C.int
        c_addr: p_char = _c_addr(addr)
        _check_closed(self)

        while True:
            try:
                rc = zmq_connect(self.handle, c_addr)
                _check_rc(rc)
            except InterruptedSystemCall:
                # retry syscall
                continue
            else:
                break

    def unbind(self, addr: str | bytes):
        """
        Unbind from an address (undoes a call to bind).

        .. versionadded:: libzmq-3.2
        .. versionadded:: 13.0

        Parameters
        ----------
        addr : str
            The address string. This has the form 'protocol://interface:port',
            for example 'tcp://127.0.0.1:5555'. Protocols supported are
            tcp, udp, pgm, inproc and ipc. If the address is unicode, it is
            encoded to utf-8 first.
        """
        c_addr: p_char = _c_addr(addr)
        _check_closed(self)
        rc: C.int = zmq_unbind(self.handle, c_addr)
        if rc != 0:
            raise ZMQError()

    def disconnect(self, addr: str | bytes):
        """
        Disconnect from a remote 0MQ socket (undoes a call to connect).

        .. versionadded:: libzmq-3.2
        .. versionadded:: 13.0

        Parameters
        ----------
        addr : str
            The address string. This has the form 'protocol://interface:port',
            for example 'tcp://127.0.0.1:5555'. Protocols supported are
            tcp, udp, pgm, inproc and ipc. If the address is unicode, it is
            encoded to utf-8 first.
        """
        c_addr: p_char = _c_addr(addr)
        _check_closed(self)

        rc: C.int = zmq_disconnect(self.handle, c_addr)
        if rc != 0:
            raise ZMQError()

    def monitor(self, addr: str | bytes | None, events: C.int = ZMQ_EVENT_ALL):
        """
        Start publishing socket events on inproc.
        See libzmq docs for zmq_monitor for details.

        While this function is available from libzmq 3.2,
        pyzmq cannot parse monitor messages from libzmq prior to 4.0.

        .. versionadded: libzmq-3.2
        .. versionadded: 14.0

        Parameters
        ----------
        addr : str | None
            The inproc url used for monitoring. Passing None as
            the addr will cause an existing socket monitor to be
            deregistered.
        events : int
            default: zmq.EVENT_ALL
            The zmq event bitmask for which events will be sent to the monitor.
        """
        c_addr: p_char = NULL
        if addr is not None:
            c_addr = _c_addr(addr)
        _check_closed(self)

        _check_rc(zmq_socket_monitor(self.handle, c_addr, events))

    def join(self, group: str | bytes):
        """
        Join a RADIO-DISH group

        Only for DISH sockets.

        libzmq and pyzmq must have been built with ZMQ_BUILD_DRAFT_API

        .. versionadded:: 17
        """
        _check_version((4, 2), "RADIO-DISH")
        if not zmq.has('draft'):
            raise RuntimeError("libzmq must be built with draft support")
        if isinstance(group, str):
            group = group.encode('utf8')
        c_group: bytes = group
        rc: C.int = zmq_join(self.handle, c_group)
        _check_rc(rc)

    def leave(self, group):
        """
        Leave a RADIO-DISH group

        Only for DISH sockets.

        libzmq and pyzmq must have been built with ZMQ_BUILD_DRAFT_API

        .. versionadded:: 17
        """
        _check_version((4, 2), "RADIO-DISH")
        if not zmq.has('draft'):
            raise RuntimeError("libzmq must be built with draft support")
        rc: C.int = zmq_leave(self.handle, group)
        _check_rc(rc)

    def send(self, data, flags=0, copy: bint = True, track: bint = False):
        """
        Send a single zmq message frame on this socket.

        This queues the message to be sent by the IO thread at a later time.

        With flags=NOBLOCK, this raises :class:`ZMQError` if the queue is full;
        otherwise, this waits until space is available.
        See :class:`Poller` for more general non-blocking I/O.

        Parameters
        ----------
        data : bytes, Frame, memoryview
            The content of the message. This can be any object that provides
            the Python buffer API (`memoryview(data)` can be called).
        flags : int
            0, NOBLOCK, SNDMORE, or NOBLOCK|SNDMORE.
        copy : bool
            Should the message be sent in a copying or non-copying manner.
        track : bool
            Should the message be tracked for notification that ZMQ has
            finished with it? (ignored if copy=True)

        Returns
        -------
        None : if `copy` or not track
            None if message was sent, raises an exception otherwise.
        MessageTracker : if track and not copy
            a MessageTracker object, whose `done` property will
            be False until the send is completed.

        Raises
        ------
        TypeError
            If a unicode object is passed
        ValueError
            If `track=True`, but an untracked Frame is passed.
        ZMQError
            for any of the reasons zmq_msg_send might fail (including
            if NOBLOCK is set and the outgoing queue is full).

        """
        _check_closed(self)

        if isinstance(data, str):
            raise TypeError("unicode not allowed, use send_string")

        if copy and not isinstance(data, Frame):
            return _send_copy(self.handle, data, flags)
        else:
            if isinstance(data, Frame):
                if track and not data.tracker:
                    raise ValueError('Not a tracked message')
                msg = data
            else:
                if self.copy_threshold:
                    buf = memoryview(data)
                    nbytes: size_t = buf.nbytes
                    copy_threshold: size_t = self.copy_threshold
                    # always copy messages smaller than copy_threshold
                    if nbytes < copy_threshold:
                        _send_copy(self.handle, buf, flags)
                        return zmq._FINISHED_TRACKER
                msg = Frame(data, track=track, copy_threshold=self.copy_threshold)
            return _send_frame(self.handle, msg, flags)

    def recv(self, flags=0, copy: bint = True, track: bint = False):
        """
        Receive a message.

        With flags=NOBLOCK, this raises :class:`ZMQError` if no messages have
        arrived; otherwise, this waits until a message arrives.
        See :class:`Poller` for more general non-blocking I/O.

        Parameters
        ----------
        flags : int
            0 or NOBLOCK.
        copy : bool
            Should the message be received in a copying or non-copying manner?
            If False a Frame object is returned, if True a string copy of
            message is returned.
        track : bool
            Should the message be tracked for notification that ZMQ has
            finished with it? (ignored if copy=True)

        Returns
        -------
        msg : bytes or Frame
            The received message frame.  If `copy` is False, then it will be a Frame,
            otherwise it will be bytes.

        Raises
        ------
        ZMQError
            for any of the reasons zmq_msg_recv might fail (including if
            NOBLOCK is set and no new messages have arrived).
        """
        _check_closed(self)

        if copy:
            return _recv_copy(self.handle, flags)
        else:
            frame = _recv_frame(self.handle, flags, track)
            more: bint = False
            sz: size_t = sizeof(bint)
            _getsockopt(
                self.handle, ZMQ_RCVMORE, cast(p_void, address(more)), address(sz)
            )
            frame.more = more
            return frame

    def recv_into(self, buffer, /, *, nbytes=0, flags=0) -> C.int:
        """
        Receive up to nbytes bytes from the socket,
        storing the data into a buffer rather than allocating a new Frame.

        The next message frame can be discarded by receiving into an empty buffer::

            sock.recv_into(bytearray())

        .. versionadded:: 26.4

        Parameters
        ----------
        buffer : memoryview
            Any object providing the buffer interface (i.e. `memoryview(buffer)` works),
            where the memoryview is contiguous and writable.
        nbytes: int, default=0
            The maximum number of bytes to receive.
            If nbytes is not specified (or 0), receive up to the size available in the given buffer.
            If the next frame is larger than this, the frame will be truncated and message content discarded.
        flags: int, default=0
            See `socket.recv`

        Returns
        -------
        bytes_received: int
            Returns the number of bytes received.
            This is always the size of the received frame.
            If the returned `bytes_received` is larger than `nbytes` (or size of `buffer` if `nbytes=0`),
            the message has been truncated and the rest of the frame discarded.
            Truncated data cannot be recovered.

        Raises
        ------
        ZMQError
            for any of the reasons `zmq_recv` might fail.
        BufferError
            for invalid buffers, such as readonly or not contiguous.
        """
        c_flags: C.int = flags
        _check_closed(self)
        c_nbytes: size_t = nbytes
        if c_nbytes < 0:
            raise ValueError(f"{nbytes=} must be non-negative")
        view = memoryview(buffer)
        c_data = declare(pointer(C.void))
        view_bytes: C.size_t = _asbuffer(view, address(c_data), True)
        if nbytes == 0:
            c_nbytes = view_bytes
        elif c_nbytes > view_bytes:
            raise ValueError(f"{nbytes=} too big for memoryview of {view_bytes}B")

        # call zmq_recv, with retries
        while True:
            with nogil:
                rc: C.int = zmq_recv(self.handle, c_data, c_nbytes, c_flags)
            try:
                _check_rc(rc)
            except InterruptedSystemCall:
                continue
            else:
                return rc


# inline socket methods


@inline
@cfunc
def _check_closed(s: Socket):
    """raise ENOTSUP if socket is closed

    Does not do a deep check
    """
    if s._closed:
        raise ZMQError(ENOTSOCK)


@inline
@cfunc
def _check_closed_deep(s: Socket) -> bint:
    """thorough check of whether the socket has been closed,
    even if by another entity (e.g. ctx.destroy).

    Only used by the `closed` property.

    returns True if closed, False otherwise
    """
    rc: C.int
    errno: C.int
    stype = declare(C.int)
    sz: size_t = sizeof(int)

    if s._closed:
        return True
    else:
        rc = zmq_getsockopt(
            s.handle, ZMQ_TYPE, cast(p_void, address(stype)), address(sz)
        )
        if rc < 0:
            errno = _zmq_errno()
            if errno == ENOTSOCK:
                s._closed = True
                return True
            elif errno == ZMQ_ETERM:
                # don't raise ETERM when checking if we're closed
                return False
        else:
            _check_rc(rc)
    return False


@cfunc
@inline
def _recv_frame(handle: p_void, flags: C.int = 0, track: bint = False) -> Frame:
    """Receive a message in a non-copying manner and return a Frame."""
    rc: C.int
    msg = zmq.Frame(track=track)
    cmsg: Frame = msg

    while True:
        with nogil:
            rc = zmq_msg_recv(address(cmsg.zmq_msg), handle, flags)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break
    return msg


@cfunc
@inline
def _recv_copy(handle: p_void, flags: C.int = 0):
    """Receive a message and return a copy"""
    zmq_msg = declare(zmq_msg_t)
    zmq_msg_p: pointer(zmq_msg_t) = address(zmq_msg)
    rc: C.int = zmq_msg_init(zmq_msg_p)
    _check_rc(rc)
    while True:
        with nogil:
            rc = zmq_msg_recv(zmq_msg_p, handle, flags)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        except Exception:
            zmq_msg_close(zmq_msg_p)  # ensure msg is closed on failure
            raise
        else:
            break

    msg_bytes = _copy_zmq_msg_bytes(zmq_msg_p)
    zmq_msg_close(zmq_msg_p)
    return msg_bytes


@cfunc
@inline
def _send_frame(handle: p_void, msg: Frame, flags: C.int = 0):
    """Send a Frame on this socket in a non-copy manner."""
    rc: C.int
    msg_copy: Frame

    # Always copy so the original message isn't garbage collected.
    # This doesn't do a real copy, just a reference.
    msg_copy = msg.fast_copy()

    while True:
        with nogil:
            rc = zmq_msg_send(address(msg_copy.zmq_msg), handle, flags)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break

    return msg.tracker


@cfunc
@inline
def _send_copy(handle: p_void, buf, flags: C.int = 0):
    """Send a message on this socket by copying its content."""
    rc: C.int
    msg = declare(zmq_msg_t)
    c_bytes = declare(p_void)

    # copy to c array:
    c_bytes_len = _asbuffer(buf, address(c_bytes))

    # Copy the msg before sending. This avoids any complications with
    # the GIL, etc.
    # If zmq_msg_init_* fails we must not call zmq_msg_close (Bus Error)
    rc = zmq_msg_init_size(address(msg), c_bytes_len)
    _check_rc(rc)

    while True:
        with nogil:
            memcpy(zmq_msg_data(address(msg)), c_bytes, zmq_msg_size(address(msg)))
            rc = zmq_msg_send(address(msg), handle, flags)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        except Exception:
            zmq_msg_close(address(msg))  # close the unused msg
            raise  # raise original exception
        else:
            rc = zmq_msg_close(address(msg))
            _check_rc(rc)
            break


@cfunc
@inline
def _getsockopt(handle: p_void, option: C.int, optval: p_void, sz: pointer(size_t)):
    """getsockopt, retrying interrupted calls

    checks rc, raising ZMQError on failure.
    """
    rc: C.int = 0
    while True:
        rc = zmq_getsockopt(handle, option, optval, sz)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break


@cfunc
@inline
def _setsockopt(handle: p_void, option: C.int, optval: p_void, sz: size_t):
    """setsockopt, retrying interrupted calls

    checks rc, raising ZMQError on failure.
    """
    rc: C.int = 0
    while True:
        rc = zmq_setsockopt(handle, option, optval, sz)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break


# General utility functions


def zmq_errno() -> C.int:
    """Return the integer errno of the most recent zmq error."""
    return _zmq_errno()


def strerror(errno: C.int) -> str:
    """
    Return the error string given the error number.
    """
    str_e: bytes = zmq_strerror(errno)
    return str_e.decode("utf8", "replace")


def zmq_version_info() -> tuple[int, int, int]:
    """Return the version of ZeroMQ itself as a 3-tuple of ints."""
    major: C.int = 0
    minor: C.int = 0
    patch: C.int = 0
    _zmq_version(address(major), address(minor), address(patch))
    return (major, minor, patch)


def has(capability: str) -> bool:
    """Check for zmq capability by name (e.g. 'ipc', 'curve')

    .. versionadded:: libzmq-4.1
    .. versionadded:: 14.1
    """
    _check_version((4, 1), 'zmq.has')
    ccap: bytes = capability.encode('utf8')
    return bool(zmq_has(ccap))


def curve_keypair() -> tuple[bytes, bytes]:
    """generate a Z85 key pair for use with zmq.CURVE security

    Requires libzmq (≥ 4.0) to have been built with CURVE support.

    .. versionadded:: libzmq-4.0
    .. versionadded:: 14.0

    Returns
    -------
    public: bytes
        The public key as 40 byte z85-encoded bytestring.
    private: bytes
        The private key as 40 byte z85-encoded bytestring.
    """
    rc: C.int
    public_key = declare(char[64])
    secret_key = declare(char[64])
    _check_version((4, 0), "curve_keypair")
    # see huge comment in libzmq/src/random.cpp
    # about threadsafety of random initialization
    rc = zmq_curve_keypair(public_key, secret_key)
    _check_rc(rc)
    return public_key, secret_key


def curve_public(secret_key) -> bytes:
    """Compute the public key corresponding to a secret key for use
    with zmq.CURVE security

    Requires libzmq (≥ 4.2) to have been built with CURVE support.

    Parameters
    ----------
    private
        The private key as a 40 byte z85-encoded bytestring

    Returns
    -------
    bytes
        The public key as a 40 byte z85-encoded bytestring
    """
    if isinstance(secret_key, str):
        secret_key = secret_key.encode('utf8')
    if not len(secret_key) == 40:
        raise ValueError('secret key must be a 40 byte z85 encoded string')

    rc: C.int
    public_key = declare(char[64])
    c_secret_key: pointer(char) = secret_key
    _check_version((4, 2), "curve_public")
    # see huge comment in libzmq/src/random.cpp
    # about threadsafety of random initialization
    rc = zmq_curve_public(public_key, c_secret_key)
    _check_rc(rc)
    return public_key[:40]


# polling
def zmq_poll(sockets, timeout: C.int = -1):
    """zmq_poll(sockets, timeout=-1)

    Poll a set of 0MQ sockets, native file descs. or sockets.

    Parameters
    ----------
    sockets : list of tuples of (socket, flags)
        Each element of this list is a two-tuple containing a socket
        and a flags. The socket may be a 0MQ socket or any object with
        a ``fileno()`` method. The flags can be zmq.POLLIN (for detecting
        for incoming messages), zmq.POLLOUT (for detecting that send is OK)
        or zmq.POLLIN|zmq.POLLOUT for detecting both.
    timeout : int
        The number of milliseconds to poll for. Negative means no timeout.
    """
    rc: C.int
    i: C.int
    fileno: fd_t
    events: C.int
    pollitems: pointer(zmq_pollitem_t) = NULL
    nsockets: C.int = len(sockets)

    if nsockets == 0:
        return []

    pollitems = cast(pointer(zmq_pollitem_t), malloc(nsockets * sizeof(zmq_pollitem_t)))
    if pollitems == NULL:
        raise MemoryError("Could not allocate poll items")

    for i in range(nsockets):
        s, events = sockets[i]
        if isinstance(s, Socket):
            pollitems[i].socket = cast(Socket, s).handle
            pollitems[i].fd = 0
            pollitems[i].events = events
            pollitems[i].revents = 0
        elif isinstance(s, int):
            fileno = s
            pollitems[i].socket = NULL
            pollitems[i].fd = fileno
            pollitems[i].events = events
            pollitems[i].revents = 0
        elif hasattr(s, 'fileno'):
            try:
                fileno = int(s.fileno())
            except Exception:
                free(pollitems)
                raise ValueError('fileno() must return a valid integer fd')
            else:
                pollitems[i].socket = NULL
                pollitems[i].fd = fileno
                pollitems[i].events = events
                pollitems[i].revents = 0
        else:
            free(pollitems)
            raise TypeError(
                "Socket must be a 0MQ socket, an integer fd or have "
                f"a fileno() method: {s!r}"
            )

    ms_passed: C.int = 0
    tic: C.int
    try:
        while True:
            start: C.int = monotonic()
            with nogil:
                rc = zmq_poll_c(pollitems, nsockets, timeout)
            try:
                _check_rc(rc)
            except InterruptedSystemCall:
                if timeout > 0:
                    tic = monotonic()
                    ms_passed = int(1000 * (tic - start))
                    if ms_passed < 0:
                        # don't allow negative ms_passed,
                        # which can happen on old Python versions without time.monotonic.
                        warnings.warn(
                            f"Negative elapsed time for interrupted poll: {ms_passed}."
                            "  Did the clock change?",
                            RuntimeWarning,
                        )
                        # treat this case the same as no time passing,
                        # since it should be rare and not happen twice in a row.
                        ms_passed = 0
                    timeout = max(0, timeout - ms_passed)
                continue
            else:
                break
    except Exception:
        free(pollitems)
        raise

    results = []
    for i in range(nsockets):
        revents = pollitems[i].revents
        # for compatibility with select.poll:
        # - only return sockets with non-zero status
        # - return the fd for plain sockets
        if revents > 0:
            if pollitems[i].socket != NULL:
                s = sockets[i][0]
            else:
                s = pollitems[i].fd
            results.append((s, revents))

    free(pollitems)
    return results


def proxy(frontend: Socket, backend: Socket, capture: Socket = None):
    """
    Start a zeromq proxy (replacement for device).

    .. versionadded:: libzmq-3.2
    .. versionadded:: 13.0

    Parameters
    ----------
    frontend : Socket
        The Socket instance for the incoming traffic.
    backend : Socket
        The Socket instance for the outbound traffic.
    capture : Socket (optional)
        The Socket instance for capturing traffic.
    """
    rc: C.int = 0
    capture_handle: p_void
    if isinstance(capture, Socket):
        capture_handle = capture.handle
    else:
        capture_handle = NULL
    while True:
        with nogil:
            rc = zmq_proxy(frontend.handle, backend.handle, capture_handle)
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break
    return rc


def proxy_steerable(
    frontend: Socket,
    backend: Socket,
    capture: Socket = None,
    control: Socket = None,
):
    """
    Start a zeromq proxy with control flow.

    .. versionadded:: libzmq-4.1
    .. versionadded:: 18.0

    Parameters
    ----------
    frontend : Socket
        The Socket instance for the incoming traffic.
    backend : Socket
        The Socket instance for the outbound traffic.
    capture : Socket (optional)
        The Socket instance for capturing traffic.
    control : Socket (optional)
        The Socket instance for control flow.
    """
    rc: C.int = 0
    capture_handle: p_void
    if isinstance(capture, Socket):
        capture_handle = capture.handle
    else:
        capture_handle = NULL
    if isinstance(control, Socket):
        control_handle = control.handle
    else:
        control_handle = NULL
    while True:
        with nogil:
            rc = zmq_proxy_steerable(
                frontend.handle, backend.handle, capture_handle, control_handle
            )
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break
    return rc


# monitored queue - like proxy (predates libzmq proxy)
# but supports ROUTER-ROUTER devices
@cfunc
@inline
@nogil
def _mq_relay(
    in_socket: p_void,
    out_socket: p_void,
    side_socket: p_void,
    msg: zmq_msg_t,
    side_msg: zmq_msg_t,
    id_msg: zmq_msg_t,
    swap_ids: bint,
) -> C.int:
    rc: C.int
    flags: C.int
    flagsz = declare(size_t)
    more = declare(int)
    flagsz = sizeof(int)

    if swap_ids:  # both router, must send second identity first
        # recv two ids into msg, id_msg
        rc = zmq_msg_recv(address(msg), in_socket, 0)
        if rc < 0:
            return rc

        rc = zmq_msg_recv(address(id_msg), in_socket, 0)
        if rc < 0:
            return rc

        # send second id (id_msg) first
        # !!!! always send a copy before the original !!!!
        rc = zmq_msg_copy(address(side_msg), address(id_msg))
        if rc < 0:
            return rc
        rc = zmq_msg_send(address(side_msg), out_socket, ZMQ_SNDMORE)
        if rc < 0:
            return rc
        rc = zmq_msg_send(address(id_msg), side_socket, ZMQ_SNDMORE)
        if rc < 0:
            return rc
        # send first id (msg) second
        rc = zmq_msg_copy(address(side_msg), address(msg))
        if rc < 0:
            return rc
        rc = zmq_msg_send(address(side_msg), out_socket, ZMQ_SNDMORE)
        if rc < 0:
            return rc
        rc = zmq_msg_send(address(msg), side_socket, ZMQ_SNDMORE)
        if rc < 0:
            return rc
    while True:
        rc = zmq_msg_recv(address(msg), in_socket, 0)
        if rc < 0:
            return rc
        # assert (rc == 0)
        rc = zmq_getsockopt(in_socket, ZMQ_RCVMORE, address(more), address(flagsz))
        if rc < 0:
            return rc
        flags = 0
        if more:
            flags |= ZMQ_SNDMORE

        rc = zmq_msg_copy(address(side_msg), address(msg))
        if rc < 0:
            return rc
        if flags:
            rc = zmq_msg_send(address(side_msg), out_socket, flags)
            if rc < 0:
                return rc
            # only SNDMORE for side-socket
            rc = zmq_msg_send(address(msg), side_socket, ZMQ_SNDMORE)
            if rc < 0:
                return rc
        else:
            rc = zmq_msg_send(address(side_msg), out_socket, 0)
            if rc < 0:
                return rc
            rc = zmq_msg_send(address(msg), side_socket, 0)
            if rc < 0:
                return rc
            break
    return rc


@cfunc
@inline
@nogil
def _mq_inline(
    in_socket: p_void,
    out_socket: p_void,
    side_socket: p_void,
    in_msg_ptr: pointer(zmq_msg_t),
    out_msg_ptr: pointer(zmq_msg_t),
    swap_ids: bint,
) -> C.int:
    """
    inner C function for monitored_queue
    """

    msg: zmq_msg_t = declare(zmq_msg_t)
    rc: C.int = zmq_msg_init(address(msg))
    id_msg = declare(zmq_msg_t)
    rc = zmq_msg_init(address(id_msg))
    if rc < 0:
        return rc
    side_msg = declare(zmq_msg_t)
    rc = zmq_msg_init(address(side_msg))
    if rc < 0:
        return rc

    items = declare(zmq_pollitem_t[2])
    items[0].socket = in_socket
    items[0].events = ZMQ_POLLIN
    items[0].fd = items[0].revents = 0
    items[1].socket = out_socket
    items[1].events = ZMQ_POLLIN
    items[1].fd = items[1].revents = 0

    while True:
        # wait for the next message to process
        rc = zmq_poll_c(address(items[0]), 2, -1)
        if rc < 0:
            return rc
        if items[0].revents & ZMQ_POLLIN:
            # send in_prefix to side socket
            rc = zmq_msg_copy(address(side_msg), in_msg_ptr)
            if rc < 0:
                return rc
            rc = zmq_msg_send(address(side_msg), side_socket, ZMQ_SNDMORE)
            if rc < 0:
                return rc
            # relay the rest of the message
            rc = _mq_relay(
                in_socket, out_socket, side_socket, msg, side_msg, id_msg, swap_ids
            )
            if rc < 0:
                return rc
        if items[1].revents & ZMQ_POLLIN:
            # send out_prefix to side socket
            rc = zmq_msg_copy(address(side_msg), out_msg_ptr)
            if rc < 0:
                return rc
            rc = zmq_msg_send(address(side_msg), side_socket, ZMQ_SNDMORE)
            if rc < 0:
                return rc
            # relay the rest of the message
            rc = _mq_relay(
                out_socket, in_socket, side_socket, msg, side_msg, id_msg, swap_ids
            )
            if rc < 0:
                return rc
    return rc


def monitored_queue(
    in_socket: Socket,
    out_socket: Socket,
    mon_socket: Socket,
    in_prefix: bytes = b'in',
    out_prefix: bytes = b'out',
):
    """
    Start a monitored queue device.

    A monitored queue is very similar to the zmq.proxy device (monitored queue came first).

    Differences from zmq.proxy:

    - monitored_queue supports both in and out being ROUTER sockets
      (via swapping IDENTITY prefixes).
    - monitor messages are prefixed, making in and out messages distinguishable.

    Parameters
    ----------
    in_socket : zmq.Socket
        One of the sockets to the Queue. Its messages will be prefixed with
        'in'.
    out_socket : zmq.Socket
        One of the sockets to the Queue. Its messages will be prefixed with
        'out'. The only difference between in/out socket is this prefix.
    mon_socket : zmq.Socket
        This socket sends out every message received by each of the others
        with an in/out prefix specifying which one it was.
    in_prefix : str
        Prefix added to broadcast messages from in_socket.
    out_prefix : str
        Prefix added to broadcast messages from out_socket.
    """
    ins: p_void = in_socket.handle
    outs: p_void = out_socket.handle
    mons: p_void = mon_socket.handle
    in_msg = declare(zmq_msg_t)
    out_msg = declare(zmq_msg_t)
    swap_ids: bint
    msg_c: p_void = NULL
    msg_c_len = declare(Py_ssize_t)
    rc: C.int

    # force swap_ids if both ROUTERs
    swap_ids = in_socket.type == ZMQ_ROUTER and out_socket.type == ZMQ_ROUTER

    # build zmq_msg objects from str prefixes
    msg_c_len = _asbuffer(in_prefix, address(msg_c))
    rc = zmq_msg_init_size(address(in_msg), msg_c_len)
    _check_rc(rc)

    memcpy(zmq_msg_data(address(in_msg)), msg_c, zmq_msg_size(address(in_msg)))

    msg_c_len = _asbuffer(out_prefix, address(msg_c))

    rc = zmq_msg_init_size(address(out_msg), msg_c_len)
    _check_rc(rc)

    while True:
        with nogil:
            memcpy(
                zmq_msg_data(address(out_msg)), msg_c, zmq_msg_size(address(out_msg))
            )
            rc = _mq_inline(
                ins, outs, mons, address(in_msg), address(out_msg), swap_ids
            )
        try:
            _check_rc(rc)
        except InterruptedSystemCall:
            continue
        else:
            break
    return rc


__all__ = [
    'IPC_PATH_MAX_LEN',
    'Context',
    'Socket',
    'Frame',
    'has',
    'curve_keypair',
    'curve_public',
    'zmq_version_info',
    'zmq_errno',
    'zmq_poll',
    'strerror',
    'proxy',
    'proxy_steerable',
]
pyzmq-26.4.0/zmq/backend/cython/constant_enums.pxi000066400000000000000000000166121477374370200222730ustar00rootroot00000000000000cdef extern from "zmq.h" nogil:
    enum: PYZMQ_DRAFT_API
    enum: ZMQ_VERSION
    enum: ZMQ_VERSION_MAJOR
    enum: ZMQ_VERSION_MINOR
    enum: ZMQ_VERSION_PATCH
    enum: ZMQ_IO_THREADS
    enum: ZMQ_MAX_SOCKETS
    enum: ZMQ_SOCKET_LIMIT
    enum: ZMQ_THREAD_PRIORITY
    enum: ZMQ_THREAD_SCHED_POLICY
    enum: ZMQ_MAX_MSGSZ
    enum: ZMQ_MSG_T_SIZE
    enum: ZMQ_THREAD_AFFINITY_CPU_ADD
    enum: ZMQ_THREAD_AFFINITY_CPU_REMOVE
    enum: ZMQ_THREAD_NAME_PREFIX
    enum: ZMQ_STREAMER
    enum: ZMQ_FORWARDER
    enum: ZMQ_QUEUE
    enum: ZMQ_EAGAIN "EAGAIN"
    enum: ZMQ_EFAULT "EFAULT"
    enum: ZMQ_EINVAL "EINVAL"
    enum: ZMQ_ENOTSUP "ENOTSUP"
    enum: ZMQ_EPROTONOSUPPORT "EPROTONOSUPPORT"
    enum: ZMQ_ENOBUFS "ENOBUFS"
    enum: ZMQ_ENETDOWN "ENETDOWN"
    enum: ZMQ_EADDRINUSE "EADDRINUSE"
    enum: ZMQ_EADDRNOTAVAIL "EADDRNOTAVAIL"
    enum: ZMQ_ECONNREFUSED "ECONNREFUSED"
    enum: ZMQ_EINPROGRESS "EINPROGRESS"
    enum: ZMQ_ENOTSOCK "ENOTSOCK"
    enum: ZMQ_EMSGSIZE "EMSGSIZE"
    enum: ZMQ_EAFNOSUPPORT "EAFNOSUPPORT"
    enum: ZMQ_ENETUNREACH "ENETUNREACH"
    enum: ZMQ_ECONNABORTED "ECONNABORTED"
    enum: ZMQ_ECONNRESET "ECONNRESET"
    enum: ZMQ_ENOTCONN "ENOTCONN"
    enum: ZMQ_ETIMEDOUT "ETIMEDOUT"
    enum: ZMQ_EHOSTUNREACH "EHOSTUNREACH"
    enum: ZMQ_ENETRESET "ENETRESET"
    enum: ZMQ_EFSM "EFSM"
    enum: ZMQ_ENOCOMPATPROTO "ENOCOMPATPROTO"
    enum: ZMQ_ETERM "ETERM"
    enum: ZMQ_EMTHREAD "EMTHREAD"
    enum: ZMQ_PROTOCOL_ERROR_WS_UNSPECIFIED
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_UNSPECIFIED
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC
    enum: ZMQ_PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH
    enum: ZMQ_PROTOCOL_ERROR_ZAP_UNSPECIFIED
    enum: ZMQ_PROTOCOL_ERROR_ZAP_MALFORMED_REPLY
    enum: ZMQ_PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID
    enum: ZMQ_PROTOCOL_ERROR_ZAP_BAD_VERSION
    enum: ZMQ_PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE
    enum: ZMQ_PROTOCOL_ERROR_ZAP_INVALID_METADATA
    enum: ZMQ_EVENT_CONNECTED
    enum: ZMQ_EVENT_CONNECT_DELAYED
    enum: ZMQ_EVENT_CONNECT_RETRIED
    enum: ZMQ_EVENT_LISTENING
    enum: ZMQ_EVENT_BIND_FAILED
    enum: ZMQ_EVENT_ACCEPTED
    enum: ZMQ_EVENT_ACCEPT_FAILED
    enum: ZMQ_EVENT_CLOSED
    enum: ZMQ_EVENT_CLOSE_FAILED
    enum: ZMQ_EVENT_DISCONNECTED
    enum: ZMQ_EVENT_MONITOR_STOPPED
    enum: ZMQ_EVENT_HANDSHAKE_FAILED_NO_DETAIL
    enum: ZMQ_EVENT_HANDSHAKE_SUCCEEDED
    enum: ZMQ_EVENT_HANDSHAKE_FAILED_PROTOCOL
    enum: ZMQ_EVENT_HANDSHAKE_FAILED_AUTH
    enum: ZMQ_EVENT_ALL_V1
    enum: ZMQ_EVENT_ALL
    enum: ZMQ_EVENT_PIPES_STATS
    enum: ZMQ_EVENT_ALL_V2
    enum: ZMQ_DONTWAIT
    enum: ZMQ_SNDMORE
    enum: ZMQ_NOBLOCK
    enum: ZMQ_MORE
    enum: ZMQ_SHARED
    enum: ZMQ_SRCFD
    enum: ZMQ_NORM_FIXED
    enum: ZMQ_NORM_CC
    enum: ZMQ_NORM_CCL
    enum: ZMQ_NORM_CCE
    enum: ZMQ_NORM_CCE_ECNONLY
    enum: ZMQ_POLLIN
    enum: ZMQ_POLLOUT
    enum: ZMQ_POLLERR
    enum: ZMQ_POLLPRI
    enum: ZMQ_RECONNECT_STOP_CONN_REFUSED
    enum: ZMQ_RECONNECT_STOP_HANDSHAKE_FAILED
    enum: ZMQ_RECONNECT_STOP_AFTER_DISCONNECT
    enum: ZMQ_NOTIFY_CONNECT
    enum: ZMQ_NOTIFY_DISCONNECT
    enum: ZMQ_NULL
    enum: ZMQ_PLAIN
    enum: ZMQ_CURVE
    enum: ZMQ_GSSAPI
    enum: ZMQ_HWM
    enum: ZMQ_AFFINITY
    enum: ZMQ_ROUTING_ID
    enum: ZMQ_SUBSCRIBE
    enum: ZMQ_UNSUBSCRIBE
    enum: ZMQ_RATE
    enum: ZMQ_RECOVERY_IVL
    enum: ZMQ_SNDBUF
    enum: ZMQ_RCVBUF
    enum: ZMQ_RCVMORE
    enum: ZMQ_FD
    enum: ZMQ_EVENTS
    enum: ZMQ_TYPE
    enum: ZMQ_LINGER
    enum: ZMQ_RECONNECT_IVL
    enum: ZMQ_BACKLOG
    enum: ZMQ_RECONNECT_IVL_MAX
    enum: ZMQ_MAXMSGSIZE
    enum: ZMQ_SNDHWM
    enum: ZMQ_RCVHWM
    enum: ZMQ_MULTICAST_HOPS
    enum: ZMQ_RCVTIMEO
    enum: ZMQ_SNDTIMEO
    enum: ZMQ_LAST_ENDPOINT
    enum: ZMQ_ROUTER_MANDATORY
    enum: ZMQ_TCP_KEEPALIVE
    enum: ZMQ_TCP_KEEPALIVE_CNT
    enum: ZMQ_TCP_KEEPALIVE_IDLE
    enum: ZMQ_TCP_KEEPALIVE_INTVL
    enum: ZMQ_IMMEDIATE
    enum: ZMQ_XPUB_VERBOSE
    enum: ZMQ_ROUTER_RAW
    enum: ZMQ_IPV6
    enum: ZMQ_MECHANISM
    enum: ZMQ_PLAIN_SERVER
    enum: ZMQ_PLAIN_USERNAME
    enum: ZMQ_PLAIN_PASSWORD
    enum: ZMQ_CURVE_SERVER
    enum: ZMQ_CURVE_PUBLICKEY
    enum: ZMQ_CURVE_SECRETKEY
    enum: ZMQ_CURVE_SERVERKEY
    enum: ZMQ_PROBE_ROUTER
    enum: ZMQ_REQ_CORRELATE
    enum: ZMQ_REQ_RELAXED
    enum: ZMQ_CONFLATE
    enum: ZMQ_ZAP_DOMAIN
    enum: ZMQ_ROUTER_HANDOVER
    enum: ZMQ_TOS
    enum: ZMQ_CONNECT_ROUTING_ID
    enum: ZMQ_GSSAPI_SERVER
    enum: ZMQ_GSSAPI_PRINCIPAL
    enum: ZMQ_GSSAPI_SERVICE_PRINCIPAL
    enum: ZMQ_GSSAPI_PLAINTEXT
    enum: ZMQ_HANDSHAKE_IVL
    enum: ZMQ_SOCKS_PROXY
    enum: ZMQ_XPUB_NODROP
    enum: ZMQ_BLOCKY
    enum: ZMQ_XPUB_MANUAL
    enum: ZMQ_XPUB_WELCOME_MSG
    enum: ZMQ_STREAM_NOTIFY
    enum: ZMQ_INVERT_MATCHING
    enum: ZMQ_HEARTBEAT_IVL
    enum: ZMQ_HEARTBEAT_TTL
    enum: ZMQ_HEARTBEAT_TIMEOUT
    enum: ZMQ_XPUB_VERBOSER
    enum: ZMQ_CONNECT_TIMEOUT
    enum: ZMQ_TCP_MAXRT
    enum: ZMQ_THREAD_SAFE
    enum: ZMQ_MULTICAST_MAXTPDU
    enum: ZMQ_VMCI_BUFFER_SIZE
    enum: ZMQ_VMCI_BUFFER_MIN_SIZE
    enum: ZMQ_VMCI_BUFFER_MAX_SIZE
    enum: ZMQ_VMCI_CONNECT_TIMEOUT
    enum: ZMQ_USE_FD
    enum: ZMQ_GSSAPI_PRINCIPAL_NAMETYPE
    enum: ZMQ_GSSAPI_SERVICE_PRINCIPAL_NAMETYPE
    enum: ZMQ_BINDTODEVICE
    enum: ZMQ_IDENTITY
    enum: ZMQ_CONNECT_RID
    enum: ZMQ_TCP_ACCEPT_FILTER
    enum: ZMQ_IPC_FILTER_PID
    enum: ZMQ_IPC_FILTER_UID
    enum: ZMQ_IPC_FILTER_GID
    enum: ZMQ_IPV4ONLY
    enum: ZMQ_DELAY_ATTACH_ON_CONNECT
    enum: ZMQ_FAIL_UNROUTABLE
    enum: ZMQ_ROUTER_BEHAVIOR
    enum: ZMQ_ZAP_ENFORCE_DOMAIN
    enum: ZMQ_LOOPBACK_FASTPATH
    enum: ZMQ_METADATA
    enum: ZMQ_MULTICAST_LOOP
    enum: ZMQ_ROUTER_NOTIFY
    enum: ZMQ_XPUB_MANUAL_LAST_VALUE
    enum: ZMQ_SOCKS_USERNAME
    enum: ZMQ_SOCKS_PASSWORD
    enum: ZMQ_IN_BATCH_SIZE
    enum: ZMQ_OUT_BATCH_SIZE
    enum: ZMQ_WSS_KEY_PEM
    enum: ZMQ_WSS_CERT_PEM
    enum: ZMQ_WSS_TRUST_PEM
    enum: ZMQ_WSS_HOSTNAME
    enum: ZMQ_WSS_TRUST_SYSTEM
    enum: ZMQ_ONLY_FIRST_SUBSCRIBE
    enum: ZMQ_RECONNECT_STOP
    enum: ZMQ_HELLO_MSG
    enum: ZMQ_DISCONNECT_MSG
    enum: ZMQ_PRIORITY
    enum: ZMQ_BUSY_POLL
    enum: ZMQ_HICCUP_MSG
    enum: ZMQ_XSUB_VERBOSE_UNSUBSCRIBE
    enum: ZMQ_TOPICS_COUNT
    enum: ZMQ_NORM_MODE
    enum: ZMQ_NORM_UNICAST_NACK
    enum: ZMQ_NORM_BUFFER_SIZE
    enum: ZMQ_NORM_SEGMENT_SIZE
    enum: ZMQ_NORM_BLOCK_SIZE
    enum: ZMQ_NORM_NUM_PARITY
    enum: ZMQ_NORM_NUM_AUTOPARITY
    enum: ZMQ_NORM_PUSH
    enum: ZMQ_PAIR
    enum: ZMQ_PUB
    enum: ZMQ_SUB
    enum: ZMQ_REQ
    enum: ZMQ_REP
    enum: ZMQ_DEALER
    enum: ZMQ_ROUTER
    enum: ZMQ_PULL
    enum: ZMQ_PUSH
    enum: ZMQ_XPUB
    enum: ZMQ_XSUB
    enum: ZMQ_STREAM
    enum: ZMQ_XREQ
    enum: ZMQ_XREP
    enum: ZMQ_SERVER
    enum: ZMQ_CLIENT
    enum: ZMQ_RADIO
    enum: ZMQ_DISH
    enum: ZMQ_GATHER
    enum: ZMQ_SCATTER
    enum: ZMQ_DGRAM
    enum: ZMQ_PEER
    enum: ZMQ_CHANNEL
pyzmq-26.4.0/zmq/backend/cython/libzmq.pxd000066400000000000000000000106121477374370200205160ustar00rootroot00000000000000"""All the C imports for 0MQ"""

#
#    Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley
#
#    This file is part of pyzmq.
#
#    pyzmq is free software; you can redistribute it and/or modify it under
#    the terms of the Lesser GNU General Public License as published by
#    the Free Software Foundation; either version 3 of the License, or
#    (at your option) any later version.
#
#    pyzmq is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    Lesser GNU General Public License for more details.
#
#    You should have received a copy of the Lesser GNU General Public License
#    along with this program.  If not, see .
#

#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------

#-----------------------------------------------------------------------------
# Import the C header files
#-----------------------------------------------------------------------------

# common includes, such as zmq compat, pyversion_compat
# make sure we load pyversion compat in every Cython module
cdef extern from "pyversion_compat.h":
    pass

# were it not for Windows,
# we could cimport these from libc.stdint
cdef extern from "zmq_compat.h":
    ctypedef signed long long int64_t "pyzmq_int64_t"
    ctypedef unsigned int uint32_t "pyzmq_uint32_t"

include "constant_enums.pxi"

cdef extern from "zmq.h" nogil:

    void _zmq_version "zmq_version"(int *major, int *minor, int *patch)
    
    ctypedef int fd_t "ZMQ_FD_T"
    
    enum: errno
    const char *zmq_strerror (int errnum)
    int zmq_errno()

    void *zmq_ctx_new ()
    int zmq_ctx_destroy (void *context)
    int zmq_ctx_set (void *context, int option, int optval)
    int zmq_ctx_get (void *context, int option)
    void *zmq_init (int io_threads)
    int zmq_term (void *context)
    
    # blackbox def for zmq_msg_t
    ctypedef void * zmq_msg_t "zmq_msg_t"
    
    ctypedef void zmq_free_fn(void *data, void *hint)
    
    int zmq_msg_init (zmq_msg_t *msg)
    int zmq_msg_init_size (zmq_msg_t *msg, size_t size)
    int zmq_msg_init_data (zmq_msg_t *msg, void *data,
        size_t size, zmq_free_fn *ffn, void *hint)
    int zmq_msg_send (zmq_msg_t *msg, void *s, int flags)
    int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags)
    int zmq_msg_close (zmq_msg_t *msg)
    int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src)
    int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src)
    void *zmq_msg_data (zmq_msg_t *msg)
    size_t zmq_msg_size (zmq_msg_t *msg)
    int zmq_msg_more (zmq_msg_t *msg)
    int zmq_msg_get (zmq_msg_t *msg, int option)
    int zmq_msg_set (zmq_msg_t *msg, int option, int optval)
    const char *zmq_msg_gets (zmq_msg_t *msg, const char *property)
    int zmq_has (const char *capability)

    void *zmq_socket (void *context, int type)
    int zmq_close (void *s)
    int zmq_setsockopt (void *s, int option, void *optval, size_t optvallen)
    int zmq_getsockopt (void *s, int option, void *optval, size_t *optvallen)
    int zmq_bind (void *s, char *addr)
    int zmq_connect (void *s, char *addr)
    int zmq_unbind (void *s, char *addr)
    int zmq_disconnect (void *s, char *addr)

    int zmq_socket_monitor (void *s, char *addr, int flags)
    
    # send/recv
    int zmq_send (void *s, const void *buf, size_t n, int flags)
    int zmq_recv (void *s, void *buf, size_t n, int flags)

    ctypedef struct zmq_pollitem_t:
        void *socket
        fd_t fd
        short events
        short revents

    int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout)

    int zmq_proxy (void *frontend, void *backend, void *capture)
    int zmq_proxy_steerable (void *frontend,
                             void *backend,
                             void *capture,
                             void *control)

    int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key)
    int zmq_curve_public (char *z85_public_key, char *z85_secret_key)

    # 4.2 draft
    int zmq_join (void *s, const char *group)
    int zmq_leave (void *s, const char *group)

    int zmq_msg_set_routing_id(zmq_msg_t *msg, uint32_t routing_id)
    uint32_t zmq_msg_routing_id(zmq_msg_t *msg)
    int zmq_msg_set_group(zmq_msg_t *msg, const char *group)
    const char *zmq_msg_group(zmq_msg_t *msg)
pyzmq-26.4.0/zmq/backend/select.py000066400000000000000000000015701477374370200170330ustar00rootroot00000000000000"""Import basic exposure of libzmq C API as a backend"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from importlib import import_module
from typing import Dict

public_api = [
    'Context',
    'Socket',
    'Frame',
    'Message',
    'proxy',
    'proxy_steerable',
    'zmq_poll',
    'strerror',
    'zmq_errno',
    'has',
    'curve_keypair',
    'curve_public',
    'zmq_version_info',
    'IPC_PATH_MAX_LEN',
]


def select_backend(name: str) -> Dict:
    """Select the pyzmq backend"""
    try:
        mod = import_module(name)
    except ImportError:
        raise
    except Exception as e:
        raise ImportError(f"Importing {name} failed with {e}") from e
    ns = {
        # private API
        'monitored_queue': mod.monitored_queue,
    }
    ns.update({key: getattr(mod, key) for key in public_api})
    return ns
pyzmq-26.4.0/zmq/constants.py000066400000000000000000000672651477374370200162160ustar00rootroot00000000000000"""zmq constants as enums"""

from __future__ import annotations

import errno
import sys
from enum import Enum, IntEnum, IntFlag

_HAUSNUMERO = 156384712


class Errno(IntEnum):
    """libzmq error codes

    .. versionadded:: 23
    """

    EAGAIN = errno.EAGAIN
    EFAULT = errno.EFAULT
    EINVAL = errno.EINVAL

    if sys.platform.startswith("win"):
        # Windows: libzmq uses errno.h
        # while Python errno prefers WSA* variants
        # many of these were introduced to errno.h in vs2010
        # ref: https://github.com/python/cpython/blob/3.9/Modules/errnomodule.c#L10-L37
        # source: https://docs.microsoft.com/en-us/cpp/c-runtime-library/errno-constants
        ENOTSUP = 129
        EPROTONOSUPPORT = 135
        ENOBUFS = 119
        ENETDOWN = 116
        EADDRINUSE = 100
        EADDRNOTAVAIL = 101
        ECONNREFUSED = 107
        EINPROGRESS = 112
        ENOTSOCK = 128
        EMSGSIZE = 115
        EAFNOSUPPORT = 102
        ENETUNREACH = 118
        ECONNABORTED = 106
        ECONNRESET = 108
        ENOTCONN = 126
        ETIMEDOUT = 138
        EHOSTUNREACH = 110
        ENETRESET = 117

    else:
        ENOTSUP = getattr(errno, "ENOTSUP", _HAUSNUMERO + 1)
        EPROTONOSUPPORT = getattr(errno, "EPROTONOSUPPORT", _HAUSNUMERO + 2)
        ENOBUFS = getattr(errno, "ENOBUFS", _HAUSNUMERO + 3)
        ENETDOWN = getattr(errno, "ENETDOWN", _HAUSNUMERO + 4)
        EADDRINUSE = getattr(errno, "EADDRINUSE", _HAUSNUMERO + 5)
        EADDRNOTAVAIL = getattr(errno, "EADDRNOTAVAIL", _HAUSNUMERO + 6)
        ECONNREFUSED = getattr(errno, "ECONNREFUSED", _HAUSNUMERO + 7)
        EINPROGRESS = getattr(errno, "EINPROGRESS", _HAUSNUMERO + 8)
        ENOTSOCK = getattr(errno, "ENOTSOCK", _HAUSNUMERO + 9)
        EMSGSIZE = getattr(errno, "EMSGSIZE", _HAUSNUMERO + 10)
        EAFNOSUPPORT = getattr(errno, "EAFNOSUPPORT", _HAUSNUMERO + 11)
        ENETUNREACH = getattr(errno, "ENETUNREACH", _HAUSNUMERO + 12)
        ECONNABORTED = getattr(errno, "ECONNABORTED", _HAUSNUMERO + 13)
        ECONNRESET = getattr(errno, "ECONNRESET", _HAUSNUMERO + 14)
        ENOTCONN = getattr(errno, "ENOTCONN", _HAUSNUMERO + 15)
        ETIMEDOUT = getattr(errno, "ETIMEDOUT", _HAUSNUMERO + 16)
        EHOSTUNREACH = getattr(errno, "EHOSTUNREACH", _HAUSNUMERO + 17)
        ENETRESET = getattr(errno, "ENETRESET", _HAUSNUMERO + 18)

    # Native 0MQ error codes
    EFSM = _HAUSNUMERO + 51
    ENOCOMPATPROTO = _HAUSNUMERO + 52
    ETERM = _HAUSNUMERO + 53
    EMTHREAD = _HAUSNUMERO + 54


class ContextOption(IntEnum):
    """Options for Context.get/set

    .. versionadded:: 23
    """

    IO_THREADS = 1
    MAX_SOCKETS = 2
    SOCKET_LIMIT = 3
    THREAD_PRIORITY = 3
    THREAD_SCHED_POLICY = 4
    MAX_MSGSZ = 5
    MSG_T_SIZE = 6
    THREAD_AFFINITY_CPU_ADD = 7
    THREAD_AFFINITY_CPU_REMOVE = 8
    THREAD_NAME_PREFIX = 9


class SocketType(IntEnum):
    """zmq socket types

    .. versionadded:: 23
    """

    PAIR = 0
    PUB = 1
    SUB = 2
    REQ = 3
    REP = 4
    DEALER = 5
    ROUTER = 6
    PULL = 7
    PUSH = 8
    XPUB = 9
    XSUB = 10
    STREAM = 11

    # deprecated aliases
    XREQ = DEALER
    XREP = ROUTER

    # DRAFT socket types
    SERVER = 12
    CLIENT = 13
    RADIO = 14
    DISH = 15
    GATHER = 16
    SCATTER = 17
    DGRAM = 18
    PEER = 19
    CHANNEL = 20


class _OptType(Enum):
    int = 'int'
    int64 = 'int64'
    bytes = 'bytes'
    fd = 'fd'


class SocketOption(IntEnum):
    """Options for Socket.get/set

    .. versionadded:: 23
    """

    _opt_type: _OptType

    def __new__(cls, value: int, opt_type: _OptType = _OptType.int):
        """Attach option type as `._opt_type`"""
        obj = int.__new__(cls, value)
        obj._value_ = value
        obj._opt_type = opt_type
        return obj

    HWM = 1
    AFFINITY = 4, _OptType.int64
    ROUTING_ID = 5, _OptType.bytes
    SUBSCRIBE = 6, _OptType.bytes
    UNSUBSCRIBE = 7, _OptType.bytes
    RATE = 8
    RECOVERY_IVL = 9
    SNDBUF = 11
    RCVBUF = 12
    RCVMORE = 13
    FD = 14, _OptType.fd
    EVENTS = 15
    TYPE = 16
    LINGER = 17
    RECONNECT_IVL = 18
    BACKLOG = 19
    RECONNECT_IVL_MAX = 21
    MAXMSGSIZE = 22, _OptType.int64
    SNDHWM = 23
    RCVHWM = 24
    MULTICAST_HOPS = 25
    RCVTIMEO = 27
    SNDTIMEO = 28
    LAST_ENDPOINT = 32, _OptType.bytes
    ROUTER_MANDATORY = 33
    TCP_KEEPALIVE = 34
    TCP_KEEPALIVE_CNT = 35
    TCP_KEEPALIVE_IDLE = 36
    TCP_KEEPALIVE_INTVL = 37
    IMMEDIATE = 39
    XPUB_VERBOSE = 40
    ROUTER_RAW = 41
    IPV6 = 42
    MECHANISM = 43
    PLAIN_SERVER = 44
    PLAIN_USERNAME = 45, _OptType.bytes
    PLAIN_PASSWORD = 46, _OptType.bytes
    CURVE_SERVER = 47
    CURVE_PUBLICKEY = 48, _OptType.bytes
    CURVE_SECRETKEY = 49, _OptType.bytes
    CURVE_SERVERKEY = 50, _OptType.bytes
    PROBE_ROUTER = 51
    REQ_CORRELATE = 52
    REQ_RELAXED = 53
    CONFLATE = 54
    ZAP_DOMAIN = 55, _OptType.bytes
    ROUTER_HANDOVER = 56
    TOS = 57
    CONNECT_ROUTING_ID = 61, _OptType.bytes
    GSSAPI_SERVER = 62
    GSSAPI_PRINCIPAL = 63, _OptType.bytes
    GSSAPI_SERVICE_PRINCIPAL = 64, _OptType.bytes
    GSSAPI_PLAINTEXT = 65
    HANDSHAKE_IVL = 66
    SOCKS_PROXY = 68, _OptType.bytes
    XPUB_NODROP = 69
    BLOCKY = 70
    XPUB_MANUAL = 71
    XPUB_WELCOME_MSG = 72, _OptType.bytes
    STREAM_NOTIFY = 73
    INVERT_MATCHING = 74
    HEARTBEAT_IVL = 75
    HEARTBEAT_TTL = 76
    HEARTBEAT_TIMEOUT = 77
    XPUB_VERBOSER = 78
    CONNECT_TIMEOUT = 79
    TCP_MAXRT = 80
    THREAD_SAFE = 81
    MULTICAST_MAXTPDU = 84
    VMCI_BUFFER_SIZE = 85, _OptType.int64
    VMCI_BUFFER_MIN_SIZE = 86, _OptType.int64
    VMCI_BUFFER_MAX_SIZE = 87, _OptType.int64
    VMCI_CONNECT_TIMEOUT = 88
    USE_FD = 89
    GSSAPI_PRINCIPAL_NAMETYPE = 90
    GSSAPI_SERVICE_PRINCIPAL_NAMETYPE = 91
    BINDTODEVICE = 92, _OptType.bytes

    # Deprecated options and aliases
    # must not use name-assignment, must have the same value
    IDENTITY = ROUTING_ID
    CONNECT_RID = CONNECT_ROUTING_ID
    TCP_ACCEPT_FILTER = 38, _OptType.bytes
    IPC_FILTER_PID = 58
    IPC_FILTER_UID = 59
    IPC_FILTER_GID = 60
    IPV4ONLY = 31
    DELAY_ATTACH_ON_CONNECT = IMMEDIATE
    FAIL_UNROUTABLE = ROUTER_MANDATORY
    ROUTER_BEHAVIOR = ROUTER_MANDATORY

    # Draft socket options
    ZAP_ENFORCE_DOMAIN = 93
    LOOPBACK_FASTPATH = 94
    METADATA = 95, _OptType.bytes
    MULTICAST_LOOP = 96
    ROUTER_NOTIFY = 97
    XPUB_MANUAL_LAST_VALUE = 98
    SOCKS_USERNAME = 99, _OptType.bytes
    SOCKS_PASSWORD = 100, _OptType.bytes
    IN_BATCH_SIZE = 101
    OUT_BATCH_SIZE = 102
    WSS_KEY_PEM = 103, _OptType.bytes
    WSS_CERT_PEM = 104, _OptType.bytes
    WSS_TRUST_PEM = 105, _OptType.bytes
    WSS_HOSTNAME = 106, _OptType.bytes
    WSS_TRUST_SYSTEM = 107
    ONLY_FIRST_SUBSCRIBE = 108
    RECONNECT_STOP = 109
    HELLO_MSG = 110, _OptType.bytes
    DISCONNECT_MSG = 111, _OptType.bytes
    PRIORITY = 112
    # 4.3.5
    BUSY_POLL = 113
    HICCUP_MSG = 114, _OptType.bytes
    XSUB_VERBOSE_UNSUBSCRIBE = 115
    TOPICS_COUNT = 116
    NORM_MODE = 117
    NORM_UNICAST_NACK = 118
    NORM_BUFFER_SIZE = 119
    NORM_SEGMENT_SIZE = 120
    NORM_BLOCK_SIZE = 121
    NORM_NUM_PARITY = 122
    NORM_NUM_AUTOPARITY = 123
    NORM_PUSH = 124


class MessageOption(IntEnum):
    """Options on zmq.Frame objects

    .. versionadded:: 23
    """

    MORE = 1
    SHARED = 3
    # Deprecated message options
    SRCFD = 2


class Flag(IntFlag):
    """Send/recv flags

    .. versionadded:: 23
    """

    DONTWAIT = 1
    SNDMORE = 2
    NOBLOCK = DONTWAIT


class RouterNotify(IntEnum):
    """Values for zmq.ROUTER_NOTIFY socket option

    .. versionadded:: 26
    .. versionadded:: libzmq-4.3.0 (draft)
    """

    @staticmethod
    def _global_name(name):
        return f"NOTIFY_{name}"

    CONNECT = 1
    DISCONNECT = 2


class NormMode(IntEnum):
    """Values for zmq.NORM_MODE socket option

    .. versionadded:: 26
    .. versionadded:: libzmq-4.3.5 (draft)
    """

    @staticmethod
    def _global_name(name):
        return f"NORM_{name}"

    FIXED = 0
    CC = 1
    CCL = 2
    CCE = 3
    CCE_ECNONLY = 4


class SecurityMechanism(IntEnum):
    """Security mechanisms (as returned by ``socket.get(zmq.MECHANISM)``)

    .. versionadded:: 23
    """

    NULL = 0
    PLAIN = 1
    CURVE = 2
    GSSAPI = 3


class ReconnectStop(IntEnum):
    """Select behavior for socket.reconnect_stop

    .. versionadded:: 25
    """

    @staticmethod
    def _global_name(name):
        return f"RECONNECT_STOP_{name}"

    CONN_REFUSED = 0x1
    HANDSHAKE_FAILED = 0x2
    AFTER_DISCONNECT = 0x4


class Event(IntFlag):
    """Socket monitoring events

    .. versionadded:: 23
    """

    @staticmethod
    def _global_name(name):
        if name.startswith("PROTOCOL_ERROR_"):
            return name
        else:
            # add EVENT_ prefix
            return "EVENT_" + name

    PROTOCOL_ERROR_WS_UNSPECIFIED = 0x30000000
    PROTOCOL_ERROR_ZMTP_UNSPECIFIED = 0x10000000
    PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND = 0x10000001
    PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE = 0x10000002
    PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE = 0x10000003
    PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED = 0x10000011
    PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE = 0x10000012
    PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO = 0x10000013
    PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE = 0x10000014
    PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR = 0x10000015
    PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY = 0x10000016
    PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME = 0x10000017
    PROTOCOL_ERROR_ZMTP_INVALID_METADATA = 0x10000018

    PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC = 0x11000001
    PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH = 0x11000002
    PROTOCOL_ERROR_ZAP_UNSPECIFIED = 0x20000000
    PROTOCOL_ERROR_ZAP_MALFORMED_REPLY = 0x20000001
    PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID = 0x20000002
    PROTOCOL_ERROR_ZAP_BAD_VERSION = 0x20000003
    PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE = 0x20000004
    PROTOCOL_ERROR_ZAP_INVALID_METADATA = 0x20000005

    # define event types _after_ overlapping protocol error masks
    CONNECTED = 0x0001
    CONNECT_DELAYED = 0x0002
    CONNECT_RETRIED = 0x0004
    LISTENING = 0x0008
    BIND_FAILED = 0x0010
    ACCEPTED = 0x0020
    ACCEPT_FAILED = 0x0040
    CLOSED = 0x0080
    CLOSE_FAILED = 0x0100
    DISCONNECTED = 0x0200
    MONITOR_STOPPED = 0x0400

    HANDSHAKE_FAILED_NO_DETAIL = 0x0800
    HANDSHAKE_SUCCEEDED = 0x1000
    HANDSHAKE_FAILED_PROTOCOL = 0x2000
    HANDSHAKE_FAILED_AUTH = 0x4000

    ALL_V1 = 0xFFFF
    ALL = ALL_V1

    # DRAFT Socket monitoring events
    PIPES_STATS = 0x10000
    ALL_V2 = ALL_V1 | PIPES_STATS


class PollEvent(IntFlag):
    """Which events to poll for in poll methods

    .. versionadded: 23
    """

    POLLIN = 1
    POLLOUT = 2
    POLLERR = 4
    POLLPRI = 8


class DeviceType(IntEnum):
    """Device type constants for zmq.device

    .. versionadded: 23
    """

    STREAMER = 1
    FORWARDER = 2
    QUEUE = 3


# AUTOGENERATED_BELOW_HERE


IO_THREADS: int = ContextOption.IO_THREADS
MAX_SOCKETS: int = ContextOption.MAX_SOCKETS
SOCKET_LIMIT: int = ContextOption.SOCKET_LIMIT
THREAD_PRIORITY: int = ContextOption.THREAD_PRIORITY
THREAD_SCHED_POLICY: int = ContextOption.THREAD_SCHED_POLICY
MAX_MSGSZ: int = ContextOption.MAX_MSGSZ
MSG_T_SIZE: int = ContextOption.MSG_T_SIZE
THREAD_AFFINITY_CPU_ADD: int = ContextOption.THREAD_AFFINITY_CPU_ADD
THREAD_AFFINITY_CPU_REMOVE: int = ContextOption.THREAD_AFFINITY_CPU_REMOVE
THREAD_NAME_PREFIX: int = ContextOption.THREAD_NAME_PREFIX
STREAMER: int = DeviceType.STREAMER
FORWARDER: int = DeviceType.FORWARDER
QUEUE: int = DeviceType.QUEUE
EAGAIN: int = Errno.EAGAIN
EFAULT: int = Errno.EFAULT
EINVAL: int = Errno.EINVAL
ENOTSUP: int = Errno.ENOTSUP
EPROTONOSUPPORT: int = Errno.EPROTONOSUPPORT
ENOBUFS: int = Errno.ENOBUFS
ENETDOWN: int = Errno.ENETDOWN
EADDRINUSE: int = Errno.EADDRINUSE
EADDRNOTAVAIL: int = Errno.EADDRNOTAVAIL
ECONNREFUSED: int = Errno.ECONNREFUSED
EINPROGRESS: int = Errno.EINPROGRESS
ENOTSOCK: int = Errno.ENOTSOCK
EMSGSIZE: int = Errno.EMSGSIZE
EAFNOSUPPORT: int = Errno.EAFNOSUPPORT
ENETUNREACH: int = Errno.ENETUNREACH
ECONNABORTED: int = Errno.ECONNABORTED
ECONNRESET: int = Errno.ECONNRESET
ENOTCONN: int = Errno.ENOTCONN
ETIMEDOUT: int = Errno.ETIMEDOUT
EHOSTUNREACH: int = Errno.EHOSTUNREACH
ENETRESET: int = Errno.ENETRESET
EFSM: int = Errno.EFSM
ENOCOMPATPROTO: int = Errno.ENOCOMPATPROTO
ETERM: int = Errno.ETERM
EMTHREAD: int = Errno.EMTHREAD
PROTOCOL_ERROR_WS_UNSPECIFIED: int = Event.PROTOCOL_ERROR_WS_UNSPECIFIED
PROTOCOL_ERROR_ZMTP_UNSPECIFIED: int = Event.PROTOCOL_ERROR_ZMTP_UNSPECIFIED
PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND: int = (
    Event.PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND
)
PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE: int = Event.PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE
PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE: int = Event.PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE
PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED: int = (
    Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED
)
PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE: int = (
    Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE
)
PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO: int = (
    Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO
)
PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE: int = (
    Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE
)
PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR: int = (
    Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR
)
PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY: int = (
    Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY
)
PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME: int = (
    Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME
)
PROTOCOL_ERROR_ZMTP_INVALID_METADATA: int = Event.PROTOCOL_ERROR_ZMTP_INVALID_METADATA
PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC: int = Event.PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC
PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH: int = (
    Event.PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH
)
PROTOCOL_ERROR_ZAP_UNSPECIFIED: int = Event.PROTOCOL_ERROR_ZAP_UNSPECIFIED
PROTOCOL_ERROR_ZAP_MALFORMED_REPLY: int = Event.PROTOCOL_ERROR_ZAP_MALFORMED_REPLY
PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID: int = Event.PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID
PROTOCOL_ERROR_ZAP_BAD_VERSION: int = Event.PROTOCOL_ERROR_ZAP_BAD_VERSION
PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE: int = (
    Event.PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE
)
PROTOCOL_ERROR_ZAP_INVALID_METADATA: int = Event.PROTOCOL_ERROR_ZAP_INVALID_METADATA
EVENT_CONNECTED: int = Event.CONNECTED
EVENT_CONNECT_DELAYED: int = Event.CONNECT_DELAYED
EVENT_CONNECT_RETRIED: int = Event.CONNECT_RETRIED
EVENT_LISTENING: int = Event.LISTENING
EVENT_BIND_FAILED: int = Event.BIND_FAILED
EVENT_ACCEPTED: int = Event.ACCEPTED
EVENT_ACCEPT_FAILED: int = Event.ACCEPT_FAILED
EVENT_CLOSED: int = Event.CLOSED
EVENT_CLOSE_FAILED: int = Event.CLOSE_FAILED
EVENT_DISCONNECTED: int = Event.DISCONNECTED
EVENT_MONITOR_STOPPED: int = Event.MONITOR_STOPPED
EVENT_HANDSHAKE_FAILED_NO_DETAIL: int = Event.HANDSHAKE_FAILED_NO_DETAIL
EVENT_HANDSHAKE_SUCCEEDED: int = Event.HANDSHAKE_SUCCEEDED
EVENT_HANDSHAKE_FAILED_PROTOCOL: int = Event.HANDSHAKE_FAILED_PROTOCOL
EVENT_HANDSHAKE_FAILED_AUTH: int = Event.HANDSHAKE_FAILED_AUTH
EVENT_ALL_V1: int = Event.ALL_V1
EVENT_ALL: int = Event.ALL
EVENT_PIPES_STATS: int = Event.PIPES_STATS
EVENT_ALL_V2: int = Event.ALL_V2
DONTWAIT: int = Flag.DONTWAIT
SNDMORE: int = Flag.SNDMORE
NOBLOCK: int = Flag.NOBLOCK
MORE: int = MessageOption.MORE
SHARED: int = MessageOption.SHARED
SRCFD: int = MessageOption.SRCFD
NORM_FIXED: int = NormMode.FIXED
NORM_CC: int = NormMode.CC
NORM_CCL: int = NormMode.CCL
NORM_CCE: int = NormMode.CCE
NORM_CCE_ECNONLY: int = NormMode.CCE_ECNONLY
POLLIN: int = PollEvent.POLLIN
POLLOUT: int = PollEvent.POLLOUT
POLLERR: int = PollEvent.POLLERR
POLLPRI: int = PollEvent.POLLPRI
RECONNECT_STOP_CONN_REFUSED: int = ReconnectStop.CONN_REFUSED
RECONNECT_STOP_HANDSHAKE_FAILED: int = ReconnectStop.HANDSHAKE_FAILED
RECONNECT_STOP_AFTER_DISCONNECT: int = ReconnectStop.AFTER_DISCONNECT
NOTIFY_CONNECT: int = RouterNotify.CONNECT
NOTIFY_DISCONNECT: int = RouterNotify.DISCONNECT
NULL: int = SecurityMechanism.NULL
PLAIN: int = SecurityMechanism.PLAIN
CURVE: int = SecurityMechanism.CURVE
GSSAPI: int = SecurityMechanism.GSSAPI
HWM: int = SocketOption.HWM
AFFINITY: int = SocketOption.AFFINITY
ROUTING_ID: int = SocketOption.ROUTING_ID
SUBSCRIBE: int = SocketOption.SUBSCRIBE
UNSUBSCRIBE: int = SocketOption.UNSUBSCRIBE
RATE: int = SocketOption.RATE
RECOVERY_IVL: int = SocketOption.RECOVERY_IVL
SNDBUF: int = SocketOption.SNDBUF
RCVBUF: int = SocketOption.RCVBUF
RCVMORE: int = SocketOption.RCVMORE
FD: int = SocketOption.FD
EVENTS: int = SocketOption.EVENTS
TYPE: int = SocketOption.TYPE
LINGER: int = SocketOption.LINGER
RECONNECT_IVL: int = SocketOption.RECONNECT_IVL
BACKLOG: int = SocketOption.BACKLOG
RECONNECT_IVL_MAX: int = SocketOption.RECONNECT_IVL_MAX
MAXMSGSIZE: int = SocketOption.MAXMSGSIZE
SNDHWM: int = SocketOption.SNDHWM
RCVHWM: int = SocketOption.RCVHWM
MULTICAST_HOPS: int = SocketOption.MULTICAST_HOPS
RCVTIMEO: int = SocketOption.RCVTIMEO
SNDTIMEO: int = SocketOption.SNDTIMEO
LAST_ENDPOINT: int = SocketOption.LAST_ENDPOINT
ROUTER_MANDATORY: int = SocketOption.ROUTER_MANDATORY
TCP_KEEPALIVE: int = SocketOption.TCP_KEEPALIVE
TCP_KEEPALIVE_CNT: int = SocketOption.TCP_KEEPALIVE_CNT
TCP_KEEPALIVE_IDLE: int = SocketOption.TCP_KEEPALIVE_IDLE
TCP_KEEPALIVE_INTVL: int = SocketOption.TCP_KEEPALIVE_INTVL
IMMEDIATE: int = SocketOption.IMMEDIATE
XPUB_VERBOSE: int = SocketOption.XPUB_VERBOSE
ROUTER_RAW: int = SocketOption.ROUTER_RAW
IPV6: int = SocketOption.IPV6
MECHANISM: int = SocketOption.MECHANISM
PLAIN_SERVER: int = SocketOption.PLAIN_SERVER
PLAIN_USERNAME: int = SocketOption.PLAIN_USERNAME
PLAIN_PASSWORD: int = SocketOption.PLAIN_PASSWORD
CURVE_SERVER: int = SocketOption.CURVE_SERVER
CURVE_PUBLICKEY: int = SocketOption.CURVE_PUBLICKEY
CURVE_SECRETKEY: int = SocketOption.CURVE_SECRETKEY
CURVE_SERVERKEY: int = SocketOption.CURVE_SERVERKEY
PROBE_ROUTER: int = SocketOption.PROBE_ROUTER
REQ_CORRELATE: int = SocketOption.REQ_CORRELATE
REQ_RELAXED: int = SocketOption.REQ_RELAXED
CONFLATE: int = SocketOption.CONFLATE
ZAP_DOMAIN: int = SocketOption.ZAP_DOMAIN
ROUTER_HANDOVER: int = SocketOption.ROUTER_HANDOVER
TOS: int = SocketOption.TOS
CONNECT_ROUTING_ID: int = SocketOption.CONNECT_ROUTING_ID
GSSAPI_SERVER: int = SocketOption.GSSAPI_SERVER
GSSAPI_PRINCIPAL: int = SocketOption.GSSAPI_PRINCIPAL
GSSAPI_SERVICE_PRINCIPAL: int = SocketOption.GSSAPI_SERVICE_PRINCIPAL
GSSAPI_PLAINTEXT: int = SocketOption.GSSAPI_PLAINTEXT
HANDSHAKE_IVL: int = SocketOption.HANDSHAKE_IVL
SOCKS_PROXY: int = SocketOption.SOCKS_PROXY
XPUB_NODROP: int = SocketOption.XPUB_NODROP
BLOCKY: int = SocketOption.BLOCKY
XPUB_MANUAL: int = SocketOption.XPUB_MANUAL
XPUB_WELCOME_MSG: int = SocketOption.XPUB_WELCOME_MSG
STREAM_NOTIFY: int = SocketOption.STREAM_NOTIFY
INVERT_MATCHING: int = SocketOption.INVERT_MATCHING
HEARTBEAT_IVL: int = SocketOption.HEARTBEAT_IVL
HEARTBEAT_TTL: int = SocketOption.HEARTBEAT_TTL
HEARTBEAT_TIMEOUT: int = SocketOption.HEARTBEAT_TIMEOUT
XPUB_VERBOSER: int = SocketOption.XPUB_VERBOSER
CONNECT_TIMEOUT: int = SocketOption.CONNECT_TIMEOUT
TCP_MAXRT: int = SocketOption.TCP_MAXRT
THREAD_SAFE: int = SocketOption.THREAD_SAFE
MULTICAST_MAXTPDU: int = SocketOption.MULTICAST_MAXTPDU
VMCI_BUFFER_SIZE: int = SocketOption.VMCI_BUFFER_SIZE
VMCI_BUFFER_MIN_SIZE: int = SocketOption.VMCI_BUFFER_MIN_SIZE
VMCI_BUFFER_MAX_SIZE: int = SocketOption.VMCI_BUFFER_MAX_SIZE
VMCI_CONNECT_TIMEOUT: int = SocketOption.VMCI_CONNECT_TIMEOUT
USE_FD: int = SocketOption.USE_FD
GSSAPI_PRINCIPAL_NAMETYPE: int = SocketOption.GSSAPI_PRINCIPAL_NAMETYPE
GSSAPI_SERVICE_PRINCIPAL_NAMETYPE: int = SocketOption.GSSAPI_SERVICE_PRINCIPAL_NAMETYPE
BINDTODEVICE: int = SocketOption.BINDTODEVICE
IDENTITY: int = SocketOption.IDENTITY
CONNECT_RID: int = SocketOption.CONNECT_RID
TCP_ACCEPT_FILTER: int = SocketOption.TCP_ACCEPT_FILTER
IPC_FILTER_PID: int = SocketOption.IPC_FILTER_PID
IPC_FILTER_UID: int = SocketOption.IPC_FILTER_UID
IPC_FILTER_GID: int = SocketOption.IPC_FILTER_GID
IPV4ONLY: int = SocketOption.IPV4ONLY
DELAY_ATTACH_ON_CONNECT: int = SocketOption.DELAY_ATTACH_ON_CONNECT
FAIL_UNROUTABLE: int = SocketOption.FAIL_UNROUTABLE
ROUTER_BEHAVIOR: int = SocketOption.ROUTER_BEHAVIOR
ZAP_ENFORCE_DOMAIN: int = SocketOption.ZAP_ENFORCE_DOMAIN
LOOPBACK_FASTPATH: int = SocketOption.LOOPBACK_FASTPATH
METADATA: int = SocketOption.METADATA
MULTICAST_LOOP: int = SocketOption.MULTICAST_LOOP
ROUTER_NOTIFY: int = SocketOption.ROUTER_NOTIFY
XPUB_MANUAL_LAST_VALUE: int = SocketOption.XPUB_MANUAL_LAST_VALUE
SOCKS_USERNAME: int = SocketOption.SOCKS_USERNAME
SOCKS_PASSWORD: int = SocketOption.SOCKS_PASSWORD
IN_BATCH_SIZE: int = SocketOption.IN_BATCH_SIZE
OUT_BATCH_SIZE: int = SocketOption.OUT_BATCH_SIZE
WSS_KEY_PEM: int = SocketOption.WSS_KEY_PEM
WSS_CERT_PEM: int = SocketOption.WSS_CERT_PEM
WSS_TRUST_PEM: int = SocketOption.WSS_TRUST_PEM
WSS_HOSTNAME: int = SocketOption.WSS_HOSTNAME
WSS_TRUST_SYSTEM: int = SocketOption.WSS_TRUST_SYSTEM
ONLY_FIRST_SUBSCRIBE: int = SocketOption.ONLY_FIRST_SUBSCRIBE
RECONNECT_STOP: int = SocketOption.RECONNECT_STOP
HELLO_MSG: int = SocketOption.HELLO_MSG
DISCONNECT_MSG: int = SocketOption.DISCONNECT_MSG
PRIORITY: int = SocketOption.PRIORITY
BUSY_POLL: int = SocketOption.BUSY_POLL
HICCUP_MSG: int = SocketOption.HICCUP_MSG
XSUB_VERBOSE_UNSUBSCRIBE: int = SocketOption.XSUB_VERBOSE_UNSUBSCRIBE
TOPICS_COUNT: int = SocketOption.TOPICS_COUNT
NORM_MODE: int = SocketOption.NORM_MODE
NORM_UNICAST_NACK: int = SocketOption.NORM_UNICAST_NACK
NORM_BUFFER_SIZE: int = SocketOption.NORM_BUFFER_SIZE
NORM_SEGMENT_SIZE: int = SocketOption.NORM_SEGMENT_SIZE
NORM_BLOCK_SIZE: int = SocketOption.NORM_BLOCK_SIZE
NORM_NUM_PARITY: int = SocketOption.NORM_NUM_PARITY
NORM_NUM_AUTOPARITY: int = SocketOption.NORM_NUM_AUTOPARITY
NORM_PUSH: int = SocketOption.NORM_PUSH
PAIR: int = SocketType.PAIR
PUB: int = SocketType.PUB
SUB: int = SocketType.SUB
REQ: int = SocketType.REQ
REP: int = SocketType.REP
DEALER: int = SocketType.DEALER
ROUTER: int = SocketType.ROUTER
PULL: int = SocketType.PULL
PUSH: int = SocketType.PUSH
XPUB: int = SocketType.XPUB
XSUB: int = SocketType.XSUB
STREAM: int = SocketType.STREAM
XREQ: int = SocketType.XREQ
XREP: int = SocketType.XREP
SERVER: int = SocketType.SERVER
CLIENT: int = SocketType.CLIENT
RADIO: int = SocketType.RADIO
DISH: int = SocketType.DISH
GATHER: int = SocketType.GATHER
SCATTER: int = SocketType.SCATTER
DGRAM: int = SocketType.DGRAM
PEER: int = SocketType.PEER
CHANNEL: int = SocketType.CHANNEL

__all__: list[str] = [
    "ContextOption",
    "IO_THREADS",
    "MAX_SOCKETS",
    "SOCKET_LIMIT",
    "THREAD_PRIORITY",
    "THREAD_SCHED_POLICY",
    "MAX_MSGSZ",
    "MSG_T_SIZE",
    "THREAD_AFFINITY_CPU_ADD",
    "THREAD_AFFINITY_CPU_REMOVE",
    "THREAD_NAME_PREFIX",
    "DeviceType",
    "STREAMER",
    "FORWARDER",
    "QUEUE",
    "Enum",
    "Errno",
    "EAGAIN",
    "EFAULT",
    "EINVAL",
    "ENOTSUP",
    "EPROTONOSUPPORT",
    "ENOBUFS",
    "ENETDOWN",
    "EADDRINUSE",
    "EADDRNOTAVAIL",
    "ECONNREFUSED",
    "EINPROGRESS",
    "ENOTSOCK",
    "EMSGSIZE",
    "EAFNOSUPPORT",
    "ENETUNREACH",
    "ECONNABORTED",
    "ECONNRESET",
    "ENOTCONN",
    "ETIMEDOUT",
    "EHOSTUNREACH",
    "ENETRESET",
    "EFSM",
    "ENOCOMPATPROTO",
    "ETERM",
    "EMTHREAD",
    "Event",
    "PROTOCOL_ERROR_WS_UNSPECIFIED",
    "PROTOCOL_ERROR_ZMTP_UNSPECIFIED",
    "PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND",
    "PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE",
    "PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE",
    "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED",
    "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE",
    "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO",
    "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE",
    "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR",
    "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY",
    "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME",
    "PROTOCOL_ERROR_ZMTP_INVALID_METADATA",
    "PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC",
    "PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH",
    "PROTOCOL_ERROR_ZAP_UNSPECIFIED",
    "PROTOCOL_ERROR_ZAP_MALFORMED_REPLY",
    "PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID",
    "PROTOCOL_ERROR_ZAP_BAD_VERSION",
    "PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE",
    "PROTOCOL_ERROR_ZAP_INVALID_METADATA",
    "EVENT_CONNECTED",
    "EVENT_CONNECT_DELAYED",
    "EVENT_CONNECT_RETRIED",
    "EVENT_LISTENING",
    "EVENT_BIND_FAILED",
    "EVENT_ACCEPTED",
    "EVENT_ACCEPT_FAILED",
    "EVENT_CLOSED",
    "EVENT_CLOSE_FAILED",
    "EVENT_DISCONNECTED",
    "EVENT_MONITOR_STOPPED",
    "EVENT_HANDSHAKE_FAILED_NO_DETAIL",
    "EVENT_HANDSHAKE_SUCCEEDED",
    "EVENT_HANDSHAKE_FAILED_PROTOCOL",
    "EVENT_HANDSHAKE_FAILED_AUTH",
    "EVENT_ALL_V1",
    "EVENT_ALL",
    "EVENT_PIPES_STATS",
    "EVENT_ALL_V2",
    "Flag",
    "DONTWAIT",
    "SNDMORE",
    "NOBLOCK",
    "IntEnum",
    "IntFlag",
    "MessageOption",
    "MORE",
    "SHARED",
    "SRCFD",
    "NormMode",
    "NORM_FIXED",
    "NORM_CC",
    "NORM_CCL",
    "NORM_CCE",
    "NORM_CCE_ECNONLY",
    "PollEvent",
    "POLLIN",
    "POLLOUT",
    "POLLERR",
    "POLLPRI",
    "ReconnectStop",
    "RECONNECT_STOP_CONN_REFUSED",
    "RECONNECT_STOP_HANDSHAKE_FAILED",
    "RECONNECT_STOP_AFTER_DISCONNECT",
    "RouterNotify",
    "NOTIFY_CONNECT",
    "NOTIFY_DISCONNECT",
    "SecurityMechanism",
    "NULL",
    "PLAIN",
    "CURVE",
    "GSSAPI",
    "SocketOption",
    "HWM",
    "AFFINITY",
    "ROUTING_ID",
    "SUBSCRIBE",
    "UNSUBSCRIBE",
    "RATE",
    "RECOVERY_IVL",
    "SNDBUF",
    "RCVBUF",
    "RCVMORE",
    "FD",
    "EVENTS",
    "TYPE",
    "LINGER",
    "RECONNECT_IVL",
    "BACKLOG",
    "RECONNECT_IVL_MAX",
    "MAXMSGSIZE",
    "SNDHWM",
    "RCVHWM",
    "MULTICAST_HOPS",
    "RCVTIMEO",
    "SNDTIMEO",
    "LAST_ENDPOINT",
    "ROUTER_MANDATORY",
    "TCP_KEEPALIVE",
    "TCP_KEEPALIVE_CNT",
    "TCP_KEEPALIVE_IDLE",
    "TCP_KEEPALIVE_INTVL",
    "IMMEDIATE",
    "XPUB_VERBOSE",
    "ROUTER_RAW",
    "IPV6",
    "MECHANISM",
    "PLAIN_SERVER",
    "PLAIN_USERNAME",
    "PLAIN_PASSWORD",
    "CURVE_SERVER",
    "CURVE_PUBLICKEY",
    "CURVE_SECRETKEY",
    "CURVE_SERVERKEY",
    "PROBE_ROUTER",
    "REQ_CORRELATE",
    "REQ_RELAXED",
    "CONFLATE",
    "ZAP_DOMAIN",
    "ROUTER_HANDOVER",
    "TOS",
    "CONNECT_ROUTING_ID",
    "GSSAPI_SERVER",
    "GSSAPI_PRINCIPAL",
    "GSSAPI_SERVICE_PRINCIPAL",
    "GSSAPI_PLAINTEXT",
    "HANDSHAKE_IVL",
    "SOCKS_PROXY",
    "XPUB_NODROP",
    "BLOCKY",
    "XPUB_MANUAL",
    "XPUB_WELCOME_MSG",
    "STREAM_NOTIFY",
    "INVERT_MATCHING",
    "HEARTBEAT_IVL",
    "HEARTBEAT_TTL",
    "HEARTBEAT_TIMEOUT",
    "XPUB_VERBOSER",
    "CONNECT_TIMEOUT",
    "TCP_MAXRT",
    "THREAD_SAFE",
    "MULTICAST_MAXTPDU",
    "VMCI_BUFFER_SIZE",
    "VMCI_BUFFER_MIN_SIZE",
    "VMCI_BUFFER_MAX_SIZE",
    "VMCI_CONNECT_TIMEOUT",
    "USE_FD",
    "GSSAPI_PRINCIPAL_NAMETYPE",
    "GSSAPI_SERVICE_PRINCIPAL_NAMETYPE",
    "BINDTODEVICE",
    "IDENTITY",
    "CONNECT_RID",
    "TCP_ACCEPT_FILTER",
    "IPC_FILTER_PID",
    "IPC_FILTER_UID",
    "IPC_FILTER_GID",
    "IPV4ONLY",
    "DELAY_ATTACH_ON_CONNECT",
    "FAIL_UNROUTABLE",
    "ROUTER_BEHAVIOR",
    "ZAP_ENFORCE_DOMAIN",
    "LOOPBACK_FASTPATH",
    "METADATA",
    "MULTICAST_LOOP",
    "ROUTER_NOTIFY",
    "XPUB_MANUAL_LAST_VALUE",
    "SOCKS_USERNAME",
    "SOCKS_PASSWORD",
    "IN_BATCH_SIZE",
    "OUT_BATCH_SIZE",
    "WSS_KEY_PEM",
    "WSS_CERT_PEM",
    "WSS_TRUST_PEM",
    "WSS_HOSTNAME",
    "WSS_TRUST_SYSTEM",
    "ONLY_FIRST_SUBSCRIBE",
    "RECONNECT_STOP",
    "HELLO_MSG",
    "DISCONNECT_MSG",
    "PRIORITY",
    "BUSY_POLL",
    "HICCUP_MSG",
    "XSUB_VERBOSE_UNSUBSCRIBE",
    "TOPICS_COUNT",
    "NORM_MODE",
    "NORM_UNICAST_NACK",
    "NORM_BUFFER_SIZE",
    "NORM_SEGMENT_SIZE",
    "NORM_BLOCK_SIZE",
    "NORM_NUM_PARITY",
    "NORM_NUM_AUTOPARITY",
    "NORM_PUSH",
    "SocketType",
    "PAIR",
    "PUB",
    "SUB",
    "REQ",
    "REP",
    "DEALER",
    "ROUTER",
    "PULL",
    "PUSH",
    "XPUB",
    "XSUB",
    "STREAM",
    "XREQ",
    "XREP",
    "SERVER",
    "CLIENT",
    "RADIO",
    "DISH",
    "GATHER",
    "SCATTER",
    "DGRAM",
    "PEER",
    "CHANNEL",
]
pyzmq-26.4.0/zmq/decorators.py000066400000000000000000000117531477374370200163360ustar00rootroot00000000000000"""Decorators for running functions with context/sockets.

.. versionadded:: 15.3

Like using Contexts and Sockets as context managers, but with decorator syntax.
Context and sockets are closed at the end of the function.

For example::

    from zmq.decorators import context, socket

    @context()
    @socket(zmq.PUSH)
    def work(ctx, push):
        ...
"""

from __future__ import annotations

# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.

__all__ = (
    'context',
    'socket',
)

from functools import wraps

import zmq


class _Decorator:
    '''The mini decorator factory'''

    def __init__(self, target=None):
        self._target = target

    def __call__(self, *dec_args, **dec_kwargs):
        """
        The main logic of decorator

        Here is how those arguments works::

            @out_decorator(*dec_args, *dec_kwargs)
            def func(*wrap_args, **wrap_kwargs):
                ...

        And in the ``wrapper``, we simply create ``self.target`` instance via
        ``with``::

            target = self.get_target(*args, **kwargs)
            with target(*dec_args, **dec_kwargs) as obj:
                ...

        """
        kw_name, dec_args, dec_kwargs = self.process_decorator_args(
            *dec_args, **dec_kwargs
        )

        def decorator(func):
            @wraps(func)
            def wrapper(*args, **kwargs):
                target = self.get_target(*args, **kwargs)

                with target(*dec_args, **dec_kwargs) as obj:
                    # insert our object into args
                    if kw_name and kw_name not in kwargs:
                        kwargs[kw_name] = obj
                    elif kw_name and kw_name in kwargs:
                        raise TypeError(
                            f"{func.__name__}() got multiple values for"
                            f" argument '{kw_name}'"
                        )
                    else:
                        args = args + (obj,)

                    return func(*args, **kwargs)

            return wrapper

        return decorator

    def get_target(self, *args, **kwargs):
        """Return the target function

        Allows modifying args/kwargs to be passed.
        """
        return self._target

    def process_decorator_args(self, *args, **kwargs):
        """Process args passed to the decorator.

        args not consumed by the decorator will be passed to the target factory
        (Context/Socket constructor).
        """
        kw_name = None

        if isinstance(kwargs.get('name'), str):
            kw_name = kwargs.pop('name')
        elif len(args) >= 1 and isinstance(args[0], str):
            kw_name = args[0]
            args = args[1:]

        return kw_name, args, kwargs


class _ContextDecorator(_Decorator):
    """Decorator subclass for Contexts"""

    def __init__(self):
        super().__init__(zmq.Context)


class _SocketDecorator(_Decorator):
    """Decorator subclass for sockets

    Gets the context from other args.
    """

    def process_decorator_args(self, *args, **kwargs):
        """Also grab context_name out of kwargs"""
        kw_name, args, kwargs = super().process_decorator_args(*args, **kwargs)
        self.context_name = kwargs.pop('context_name', 'context')
        return kw_name, args, kwargs

    def get_target(self, *args, **kwargs):
        """Get context, based on call-time args"""
        context = self._get_context(*args, **kwargs)
        return context.socket

    def _get_context(self, *args, **kwargs):
        """
        Find the ``zmq.Context`` from ``args`` and ``kwargs`` at call time.

        First, if there is an keyword argument named ``context`` and it is a
        ``zmq.Context`` instance , we will take it.

        Second, we check all the ``args``, take the first ``zmq.Context``
        instance.

        Finally, we will provide default Context -- ``zmq.Context.instance``

        :return: a ``zmq.Context`` instance
        """
        if self.context_name in kwargs:
            ctx = kwargs[self.context_name]

            if isinstance(ctx, zmq.Context):
                return ctx

        for arg in args:
            if isinstance(arg, zmq.Context):
                return arg
        # not specified by any decorator
        return zmq.Context.instance()


def context(*args, **kwargs):
    """Decorator for adding a Context to a function.

    Usage::

        @context()
        def foo(ctx):
            ...

    .. versionadded:: 15.3

    :param str name: the keyword argument passed to decorated function
    """
    return _ContextDecorator()(*args, **kwargs)


def socket(*args, **kwargs):
    """Decorator for adding a socket to a function.

    Usage::

        @socket(zmq.PUSH)
        def foo(push):
            ...

    .. versionadded:: 15.3

    :param str name: the keyword argument passed to decorated function
    :param str context_name: the keyword only argument to identify context
                             object
    """
    return _SocketDecorator()(*args, **kwargs)
pyzmq-26.4.0/zmq/devices/000077500000000000000000000000001477374370200152325ustar00rootroot00000000000000pyzmq-26.4.0/zmq/devices/__init__.py000066400000000000000000000014011477374370200173370ustar00rootroot00000000000000"""0MQ Device classes for running in background threads or processes."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

from zmq import DeviceType, proxy
from zmq.devices import (
    basedevice,
    monitoredqueue,
    monitoredqueuedevice,
    proxydevice,
    proxysteerabledevice,
)
from zmq.devices.basedevice import *
from zmq.devices.monitoredqueue import *
from zmq.devices.monitoredqueuedevice import *
from zmq.devices.proxydevice import *
from zmq.devices.proxysteerabledevice import *

__all__ = []
for submod in (
    basedevice,
    proxydevice,
    proxysteerabledevice,
    monitoredqueue,
    monitoredqueuedevice,
):
    __all__.extend(submod.__all__)  # type: ignore
pyzmq-26.4.0/zmq/devices/basedevice.py000066400000000000000000000225061477374370200177030ustar00rootroot00000000000000"""Classes for running 0MQ Devices in the background."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import time
from multiprocessing import Process
from threading import Thread
from typing import Any, Callable, List, Optional, Tuple

import zmq
from zmq import ENOTSOCK, ETERM, PUSH, QUEUE, Context, ZMQBindError, ZMQError, proxy


class Device:
    """A 0MQ Device to be run in the background.

    You do not pass Socket instances to this, but rather Socket types::

        Device(device_type, in_socket_type, out_socket_type)

    For instance::

        dev = Device(zmq.QUEUE, zmq.DEALER, zmq.ROUTER)

    Similar to zmq.device, but socket types instead of sockets themselves are
    passed, and the sockets are created in the work thread, to avoid issues
    with thread safety. As a result, additional bind_{in|out} and
    connect_{in|out} methods and setsockopt_{in|out} allow users to specify
    connections for the sockets.

    Parameters
    ----------
    device_type : int
        The 0MQ Device type
    {in|out}_type : int
        zmq socket types, to be passed later to context.socket(). e.g.
        zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used
        for both in_socket and out_socket.

    Methods
    -------
    bind_{in_out}(iface)
        passthrough for ``{in|out}_socket.bind(iface)``, to be called in the thread
    connect_{in_out}(iface)
        passthrough for ``{in|out}_socket.connect(iface)``, to be called in the
        thread
    setsockopt_{in_out}(opt,value)
        passthrough for ``{in|out}_socket.setsockopt(opt, value)``, to be called in
        the thread

    Attributes
    ----------
    daemon : bool
        sets whether the thread should be run as a daemon
        Default is true, because if it is false, the thread will not
        exit unless it is killed
    context_factory : callable
        This is a class attribute.
        Function for creating the Context. This will be Context.instance
        in ThreadDevices, and Context in ProcessDevices.  The only reason
        it is not instance() in ProcessDevices is that there may be a stale
        Context instance already initialized, and the forked environment
        should *never* try to use it.
    """

    context_factory: Callable[[], zmq.Context] = Context.instance
    """Callable that returns a context. Typically either Context.instance or Context,
    depending on whether the device should share the global instance or not.
    """

    daemon: bool
    device_type: int
    in_type: int
    out_type: int

    _in_binds: List[str]
    _in_connects: List[str]
    _in_sockopts: List[Tuple[int, Any]]
    _out_binds: List[str]
    _out_connects: List[str]
    _out_sockopts: List[Tuple[int, Any]]
    _random_addrs: List[str]
    _sockets: List[zmq.Socket]

    def __init__(
        self,
        device_type: int = QUEUE,
        in_type: Optional[int] = None,
        out_type: Optional[int] = None,
    ) -> None:
        self.device_type = device_type
        if in_type is None:
            raise TypeError("in_type must be specified")
        if out_type is None:
            raise TypeError("out_type must be specified")
        self.in_type = in_type
        self.out_type = out_type
        self._in_binds = []
        self._in_connects = []
        self._in_sockopts = []
        self._out_binds = []
        self._out_connects = []
        self._out_sockopts = []
        self._random_addrs = []
        self.daemon = True
        self.done = False
        self._sockets = []

    def bind_in(self, addr: str) -> None:
        """Enqueue ZMQ address for binding on in_socket.

        See zmq.Socket.bind for details.
        """
        self._in_binds.append(addr)

    def bind_in_to_random_port(self, addr: str, *args, **kwargs) -> int:
        """Enqueue a random port on the given interface for binding on
        in_socket.

        See zmq.Socket.bind_to_random_port for details.

        .. versionadded:: 18.0
        """
        port = self._reserve_random_port(addr, *args, **kwargs)

        self.bind_in(f'{addr}:{port}')

        return port

    def connect_in(self, addr: str) -> None:
        """Enqueue ZMQ address for connecting on in_socket.

        See zmq.Socket.connect for details.
        """
        self._in_connects.append(addr)

    def setsockopt_in(self, opt: int, value: Any) -> None:
        """Enqueue setsockopt(opt, value) for in_socket

        See zmq.Socket.setsockopt for details.
        """
        self._in_sockopts.append((opt, value))

    def bind_out(self, addr: str) -> None:
        """Enqueue ZMQ address for binding on out_socket.

        See zmq.Socket.bind for details.
        """
        self._out_binds.append(addr)

    def bind_out_to_random_port(self, addr: str, *args, **kwargs) -> int:
        """Enqueue a random port on the given interface for binding on
        out_socket.

        See zmq.Socket.bind_to_random_port for details.

        .. versionadded:: 18.0
        """
        port = self._reserve_random_port(addr, *args, **kwargs)

        self.bind_out(f'{addr}:{port}')

        return port

    def connect_out(self, addr: str):
        """Enqueue ZMQ address for connecting on out_socket.

        See zmq.Socket.connect for details.
        """
        self._out_connects.append(addr)

    def setsockopt_out(self, opt: int, value: Any):
        """Enqueue setsockopt(opt, value) for out_socket

        See zmq.Socket.setsockopt for details.
        """
        self._out_sockopts.append((opt, value))

    def _reserve_random_port(self, addr: str, *args, **kwargs) -> int:
        with Context() as ctx:
            with ctx.socket(PUSH) as binder:
                for i in range(5):
                    port = binder.bind_to_random_port(addr, *args, **kwargs)

                    new_addr = f'{addr}:{port}'

                    if new_addr in self._random_addrs:
                        continue
                    else:
                        break
                else:
                    raise ZMQBindError("Could not reserve random port.")

                self._random_addrs.append(new_addr)

        return port

    def _setup_sockets(self) -> Tuple[zmq.Socket, zmq.Socket]:
        ctx: zmq.Context[zmq.Socket] = self.context_factory()  # type: ignore
        self._context = ctx

        # create the sockets
        ins = ctx.socket(self.in_type)
        self._sockets.append(ins)
        if self.out_type < 0:
            outs = ins
        else:
            outs = ctx.socket(self.out_type)
            self._sockets.append(outs)

        # set sockopts (must be done first, in case of zmq.IDENTITY)
        for opt, value in self._in_sockopts:
            ins.setsockopt(opt, value)
        for opt, value in self._out_sockopts:
            outs.setsockopt(opt, value)

        for iface in self._in_binds:
            ins.bind(iface)
        for iface in self._out_binds:
            outs.bind(iface)

        for iface in self._in_connects:
            ins.connect(iface)
        for iface in self._out_connects:
            outs.connect(iface)

        return ins, outs

    def run_device(self) -> None:
        """The runner method.

        Do not call me directly, instead call ``self.start()``, just like a Thread.
        """
        ins, outs = self._setup_sockets()
        proxy(ins, outs)

    def _close_sockets(self):
        """Cleanup sockets we created"""
        for s in self._sockets:
            if s and not s.closed:
                s.close()

    def run(self) -> None:
        """wrap run_device in try/catch ETERM"""
        try:
            self.run_device()
        except ZMQError as e:
            if e.errno in {ETERM, ENOTSOCK}:
                # silence TERM, ENOTSOCK errors, because this should be a clean shutdown
                pass
            else:
                raise
        finally:
            self.done = True
            self._close_sockets()

    def start(self) -> None:
        """Start the device. Override me in subclass for other launchers."""
        return self.run()

    def join(self, timeout: Optional[float] = None) -> None:
        """wait for me to finish, like Thread.join.

        Reimplemented appropriately by subclasses."""
        tic = time.monotonic()
        toc = tic
        while not self.done and not (timeout is not None and toc - tic > timeout):
            time.sleep(0.001)
            toc = time.monotonic()


class BackgroundDevice(Device):
    """Base class for launching Devices in background processes and threads."""

    launcher: Any = None
    _launch_class: Any = None

    def start(self) -> None:
        self.launcher = self._launch_class(target=self.run)
        self.launcher.daemon = self.daemon
        return self.launcher.start()

    def join(self, timeout: Optional[float] = None) -> None:
        return self.launcher.join(timeout=timeout)


class ThreadDevice(BackgroundDevice):
    """A Device that will be run in a background Thread.

    See Device for details.
    """

    _launch_class = Thread


class ProcessDevice(BackgroundDevice):
    """A Device that will be run in a background Process.

    See Device for details.
    """

    _launch_class = Process
    context_factory = Context
    """Callable that returns a context. Typically either Context.instance or Context,
    depending on whether the device should share the global instance or not.
    """


__all__ = ['Device', 'ThreadDevice', 'ProcessDevice']
pyzmq-26.4.0/zmq/devices/monitoredqueue.py000066400000000000000000000024161477374370200206540ustar00rootroot00000000000000"""pure Python monitored_queue function

For use when Cython extension is unavailable (PyPy).

Authors
-------
* MinRK
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from typing import Callable

import zmq
from zmq.backend import monitored_queue as _backend_mq


def _relay(ins, outs, sides, prefix, swap_ids):
    msg = ins.recv_multipart()
    if swap_ids:
        msg[:2] = msg[:2][::-1]
    outs.send_multipart(msg)
    sides.send_multipart([prefix] + msg)


def _monitored_queue(
    in_socket, out_socket, mon_socket, in_prefix=b'in', out_prefix=b'out'
):
    swap_ids = in_socket.type == zmq.ROUTER and out_socket.type == zmq.ROUTER

    poller = zmq.Poller()
    poller.register(in_socket, zmq.POLLIN)
    poller.register(out_socket, zmq.POLLIN)
    while True:
        events = dict(poller.poll())
        if in_socket in events:
            _relay(in_socket, out_socket, mon_socket, in_prefix, swap_ids)
        if out_socket in events:
            _relay(out_socket, in_socket, mon_socket, out_prefix, swap_ids)


monitored_queue: Callable
if _backend_mq is not None:
    monitored_queue = _backend_mq  # type: ignore
else:
    # backend has no monitored_queue
    monitored_queue = _monitored_queue


__all__ = ['monitored_queue']
pyzmq-26.4.0/zmq/devices/monitoredqueuedevice.py000066400000000000000000000036111477374370200220320ustar00rootroot00000000000000"""MonitoredQueue classes and functions."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from zmq import PUB
from zmq.devices.monitoredqueue import monitored_queue
from zmq.devices.proxydevice import ProcessProxy, Proxy, ProxyBase, ThreadProxy


class MonitoredQueueBase(ProxyBase):
    """Base class for overriding methods."""

    _in_prefix = b''
    _out_prefix = b''

    def __init__(
        self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out'
    ):
        ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type)

        self._in_prefix = in_prefix
        self._out_prefix = out_prefix

    def run_device(self):
        ins, outs, mons = self._setup_sockets()
        monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix)


class MonitoredQueue(MonitoredQueueBase, Proxy):
    """Class for running monitored_queue in the background.

    See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy,
    only in that it adds a ``prefix`` to messages sent on the monitor socket,
    with a different prefix for each direction.

    MQ also supports ROUTER on both sides, which zmq.proxy does not.

    If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket.
    If it arrives on out_sock, it will be prefixed with `out_prefix`.

    A PUB socket is the most logical choice for the mon_socket, but it is not required.
    """


class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy):
    """Run zmq.monitored_queue in a background thread.

    See MonitoredQueue and Proxy for details.
    """


class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy):
    """Run zmq.monitored_queue in a separate process.

    See MonitoredQueue and Proxy for details.
    """


__all__ = ['MonitoredQueue', 'ThreadMonitoredQueue', 'ProcessMonitoredQueue']
pyzmq-26.4.0/zmq/devices/proxydevice.py000066400000000000000000000054331477374370200201520ustar00rootroot00000000000000"""Proxy classes and functions."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import zmq
from zmq.devices.basedevice import Device, ProcessDevice, ThreadDevice


class ProxyBase:
    """Base class for overriding methods."""

    def __init__(self, in_type, out_type, mon_type=zmq.PUB):
        Device.__init__(self, in_type=in_type, out_type=out_type)
        self.mon_type = mon_type
        self._mon_binds = []
        self._mon_connects = []
        self._mon_sockopts = []

    def bind_mon(self, addr):
        """Enqueue ZMQ address for binding on mon_socket.

        See zmq.Socket.bind for details.
        """
        self._mon_binds.append(addr)

    def bind_mon_to_random_port(self, addr, *args, **kwargs):
        """Enqueue a random port on the given interface for binding on
        mon_socket.

        See zmq.Socket.bind_to_random_port for details.

        .. versionadded:: 18.0
        """
        port = self._reserve_random_port(addr, *args, **kwargs)

        self.bind_mon(f'{addr}:{port}')

        return port

    def connect_mon(self, addr):
        """Enqueue ZMQ address for connecting on mon_socket.

        See zmq.Socket.connect for details.
        """
        self._mon_connects.append(addr)

    def setsockopt_mon(self, opt, value):
        """Enqueue setsockopt(opt, value) for mon_socket

        See zmq.Socket.setsockopt for details.
        """
        self._mon_sockopts.append((opt, value))

    def _setup_sockets(self):
        ins, outs = Device._setup_sockets(self)
        ctx = self._context
        mons = ctx.socket(self.mon_type)
        self._sockets.append(mons)

        # set sockopts (must be done first, in case of zmq.IDENTITY)
        for opt, value in self._mon_sockopts:
            mons.setsockopt(opt, value)

        for iface in self._mon_binds:
            mons.bind(iface)

        for iface in self._mon_connects:
            mons.connect(iface)

        return ins, outs, mons

    def run_device(self):
        ins, outs, mons = self._setup_sockets()
        zmq.proxy(ins, outs, mons)


class Proxy(ProxyBase, Device):
    """Threadsafe Proxy object.

    See zmq.devices.Device for most of the spec. This subclass adds a
    _mon version of each _{in|out} method, for configuring the
    monitor socket.

    A Proxy is a 3-socket ZMQ Device that functions just like a
    QUEUE, except each message is also sent out on the monitor socket.

    A PUB socket is the most logical choice for the mon_socket, but it is not required.
    """


class ThreadProxy(ProxyBase, ThreadDevice):
    """Proxy in a Thread. See Proxy for more."""


class ProcessProxy(ProxyBase, ProcessDevice):
    """Proxy in a Process. See Proxy for more."""


__all__ = [
    'Proxy',
    'ThreadProxy',
    'ProcessProxy',
]
pyzmq-26.4.0/zmq/devices/proxysteerabledevice.py000066400000000000000000000062061477374370200220400ustar00rootroot00000000000000"""Classes for running a steerable ZMQ proxy"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import zmq
from zmq.devices.proxydevice import ProcessProxy, Proxy, ThreadProxy


class ProxySteerableBase:
    """Base class for overriding methods."""

    def __init__(self, in_type, out_type, mon_type=zmq.PUB, ctrl_type=None):
        super().__init__(in_type=in_type, out_type=out_type, mon_type=mon_type)
        self.ctrl_type = ctrl_type
        self._ctrl_binds = []
        self._ctrl_connects = []
        self._ctrl_sockopts = []

    def bind_ctrl(self, addr):
        """Enqueue ZMQ address for binding on ctrl_socket.

        See zmq.Socket.bind for details.
        """
        self._ctrl_binds.append(addr)

    def bind_ctrl_to_random_port(self, addr, *args, **kwargs):
        """Enqueue a random port on the given interface for binding on
        ctrl_socket.

        See zmq.Socket.bind_to_random_port for details.
        """
        port = self._reserve_random_port(addr, *args, **kwargs)

        self.bind_ctrl(f'{addr}:{port}')

        return port

    def connect_ctrl(self, addr):
        """Enqueue ZMQ address for connecting on ctrl_socket.

        See zmq.Socket.connect for details.
        """
        self._ctrl_connects.append(addr)

    def setsockopt_ctrl(self, opt, value):
        """Enqueue setsockopt(opt, value) for ctrl_socket

        See zmq.Socket.setsockopt for details.
        """
        self._ctrl_sockopts.append((opt, value))

    def _setup_sockets(self):
        ins, outs, mons = super()._setup_sockets()
        ctx = self._context
        ctrls = ctx.socket(self.ctrl_type)
        self._sockets.append(ctrls)

        for opt, value in self._ctrl_sockopts:
            ctrls.setsockopt(opt, value)

        for iface in self._ctrl_binds:
            ctrls.bind(iface)

        for iface in self._ctrl_connects:
            ctrls.connect(iface)

        return ins, outs, mons, ctrls

    def run_device(self):
        ins, outs, mons, ctrls = self._setup_sockets()
        zmq.proxy_steerable(ins, outs, mons, ctrls)


class ProxySteerable(ProxySteerableBase, Proxy):
    """Class for running a steerable proxy in the background.

    See zmq.devices.Proxy for most of the spec.  If the control socket is not
    NULL, the proxy supports control flow, provided by the socket.

    If PAUSE is received on this socket, the proxy suspends its activities. If
    RESUME is received, it goes on. If TERMINATE is received, it terminates
    smoothly.  If the control socket is NULL, the proxy behave exactly as if
    zmq.devices.Proxy had been used.

    This subclass adds a _ctrl version of each _{in|out}
    method, for configuring the control socket.

    .. versionadded:: libzmq-4.1
    .. versionadded:: 18.0
    """


class ThreadProxySteerable(ProxySteerableBase, ThreadProxy):
    """ProxySteerable in a Thread. See ProxySteerable for details."""


class ProcessProxySteerable(ProxySteerableBase, ProcessProxy):
    """ProxySteerable in a Process. See ProxySteerable for details."""


__all__ = [
    'ProxySteerable',
    'ThreadProxySteerable',
    'ProcessProxySteerable',
]
pyzmq-26.4.0/zmq/error.py000066400000000000000000000123371477374370200153210ustar00rootroot00000000000000"""0MQ Error classes and functions."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

from errno import EINTR


class ZMQBaseError(Exception):
    """Base exception class for 0MQ errors in Python."""


class ZMQError(ZMQBaseError):
    """Wrap an errno style error.

    Parameters
    ----------
    errno : int
        The ZMQ errno or None.  If None, then ``zmq_errno()`` is called and
        used.
    msg : str
        Description of the error or None.
    """

    errno: int | None = None
    strerror: str

    def __init__(self, errno: int | None = None, msg: str | None = None):
        """Wrap an errno style error.

        Parameters
        ----------
        errno : int
            The ZMQ errno or None.  If None, then ``zmq_errno()`` is called and
            used.
        msg : string
            Description of the error or None.
        """
        from zmq.backend import strerror, zmq_errno

        if errno is None:
            errno = zmq_errno()
        if isinstance(errno, int):
            self.errno = errno
            if msg is None:
                self.strerror = strerror(errno)
            else:
                self.strerror = msg
        else:
            if msg is None:
                self.strerror = str(errno)
            else:
                self.strerror = msg
        # flush signals, because there could be a SIGINT
        # waiting to pounce, resulting in uncaught exceptions.
        # Doing this here means getting SIGINT during a blocking
        # libzmq call will raise a *catchable* KeyboardInterrupt
        # PyErr_CheckSignals()

    def __str__(self) -> str:
        return self.strerror

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}('{str(self)}')"


class ZMQBindError(ZMQBaseError):
    """An error for ``Socket.bind_to_random_port()``.

    See Also
    --------
    .Socket.bind_to_random_port
    """


class NotDone(ZMQBaseError):
    """Raised when timeout is reached while waiting for 0MQ to finish with a Message

    See Also
    --------
    .MessageTracker.wait : object for tracking when ZeroMQ is done
    """


class ContextTerminated(ZMQError):
    """Wrapper for zmq.ETERM

    .. versionadded:: 13.0
    """

    def __init__(self, errno="ignored", msg="ignored"):
        from zmq import ETERM

        super().__init__(ETERM)


class Again(ZMQError):
    """Wrapper for zmq.EAGAIN

    .. versionadded:: 13.0
    """

    def __init__(self, errno="ignored", msg="ignored"):
        from zmq import EAGAIN

        super().__init__(EAGAIN)


class InterruptedSystemCall(ZMQError, InterruptedError):
    """Wrapper for EINTR

    This exception should be caught internally in pyzmq
    to retry system calls, and not propagate to the user.

    .. versionadded:: 14.7
    """

    errno = EINTR
    strerror: str

    def __init__(self, errno="ignored", msg="ignored"):
        super().__init__(EINTR)

    def __str__(self):
        s = super().__str__()
        return s + ": This call should have been retried. Please report this to pyzmq."


def _check_rc(rc, errno=None, error_without_errno=True):
    """internal utility for checking zmq return condition

    and raising the appropriate Exception class
    """
    if rc == -1:
        if errno is None:
            from zmq.backend import zmq_errno

            errno = zmq_errno()
        if errno == 0 and not error_without_errno:
            return
        from zmq import EAGAIN, ETERM

        if errno == EINTR:
            raise InterruptedSystemCall(errno)
        elif errno == EAGAIN:
            raise Again(errno)
        elif errno == ETERM:
            raise ContextTerminated(errno)
        else:
            raise ZMQError(errno)


_zmq_version_info = None
_zmq_version = None


class ZMQVersionError(NotImplementedError):
    """Raised when a feature is not provided by the linked version of libzmq.

    .. versionadded:: 14.2
    """

    min_version = None

    def __init__(self, min_version: str, msg: str = "Feature"):
        global _zmq_version
        if _zmq_version is None:
            from zmq import zmq_version

            _zmq_version = zmq_version()
        self.msg = msg
        self.min_version = min_version
        self.version = _zmq_version

    def __repr__(self):
        return f"ZMQVersionError('{str(self)}')"

    def __str__(self):
        return f"{self.msg} requires libzmq >= {self.min_version}, have {self.version}"


def _check_version(
    min_version_info: tuple[int] | tuple[int, int] | tuple[int, int, int],
    msg: str = "Feature",
):
    """Check for libzmq

    raises ZMQVersionError if current zmq version is not at least min_version

    min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
    """
    global _zmq_version_info
    if _zmq_version_info is None:
        from zmq import zmq_version_info

        _zmq_version_info = zmq_version_info()
    if _zmq_version_info < min_version_info:
        min_version = ".".join(str(v) for v in min_version_info)
        raise ZMQVersionError(min_version, msg)


__all__ = [
    "ZMQBaseError",
    "ZMQBindError",
    "ZMQError",
    "NotDone",
    "ContextTerminated",
    "InterruptedSystemCall",
    "Again",
    "ZMQVersionError",
]
pyzmq-26.4.0/zmq/eventloop/000077500000000000000000000000001477374370200156235ustar00rootroot00000000000000pyzmq-26.4.0/zmq/eventloop/__init__.py000066400000000000000000000001471477374370200177360ustar00rootroot00000000000000"""Tornado eventloop integration for pyzmq"""

from tornado.ioloop import IOLoop

__all__ = ['IOLoop']
pyzmq-26.4.0/zmq/eventloop/_deprecated.py000066400000000000000000000144531477374370200204430ustar00rootroot00000000000000"""tornado IOLoop API with zmq compatibility

If you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop,
otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado.

The minimal shipped version of tornado's IOLoop does not include
support for concurrent futures - this will only be available if you
have tornado ≥ 3.0.
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import time
import warnings
from typing import Tuple

from zmq import ETERM, POLLERR, POLLIN, POLLOUT, Poller, ZMQError

tornado_version: Tuple = ()
try:
    import tornado

    tornado_version = tornado.version_info
except (ImportError, AttributeError):
    pass

from .minitornado.ioloop import PeriodicCallback, PollIOLoop
from .minitornado.log import gen_log


class DelayedCallback(PeriodicCallback):
    """Schedules the given callback to be called once.

    The callback is called once, after callback_time milliseconds.

    `start` must be called after the DelayedCallback is created.

    The timeout is calculated from when `start` is called.
    """

    def __init__(self, callback, callback_time, io_loop=None):
        # PeriodicCallback require callback_time to be positive
        warnings.warn(
            """DelayedCallback is deprecated.
        Use loop.add_timeout instead.""",
            DeprecationWarning,
        )
        callback_time = max(callback_time, 1e-3)
        super().__init__(callback, callback_time, io_loop)

    def start(self):
        """Starts the timer."""
        self._running = True
        self._firstrun = True
        self._next_timeout = time.time() + self.callback_time / 1000.0
        self.io_loop.add_timeout(self._next_timeout, self._run)

    def _run(self):
        if not self._running:
            return
        self._running = False
        try:
            self.callback()
        except Exception:
            gen_log.error("Error in delayed callback", exc_info=True)


class ZMQPoller:
    """A poller that can be used in the tornado IOLoop.

    This simply wraps a regular zmq.Poller, scaling the timeout
    by 1000, so that it is in seconds rather than milliseconds.
    """

    def __init__(self):
        self._poller = Poller()

    @staticmethod
    def _map_events(events):
        """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
        z_events = 0
        if events & IOLoop.READ:
            z_events |= POLLIN
        if events & IOLoop.WRITE:
            z_events |= POLLOUT
        if events & IOLoop.ERROR:
            z_events |= POLLERR
        return z_events

    @staticmethod
    def _remap_events(z_events):
        """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
        events = 0
        if z_events & POLLIN:
            events |= IOLoop.READ
        if z_events & POLLOUT:
            events |= IOLoop.WRITE
        if z_events & POLLERR:
            events |= IOLoop.ERROR
        return events

    def register(self, fd, events):
        return self._poller.register(fd, self._map_events(events))

    def modify(self, fd, events):
        return self._poller.modify(fd, self._map_events(events))

    def unregister(self, fd):
        return self._poller.unregister(fd)

    def poll(self, timeout):
        """poll in seconds rather than milliseconds.

        Event masks will be IOLoop.READ/WRITE/ERROR
        """
        z_events = self._poller.poll(1000 * timeout)
        return [(fd, self._remap_events(evt)) for (fd, evt) in z_events]

    def close(self):
        pass


class ZMQIOLoop(PollIOLoop):
    """ZMQ subclass of tornado's IOLoop

    Minor modifications, so that .current/.instance return self
    """

    _zmq_impl = ZMQPoller

    def initialize(self, impl=None, **kwargs):
        impl = self._zmq_impl() if impl is None else impl
        super().initialize(impl=impl, **kwargs)

    @classmethod
    def instance(cls, *args, **kwargs):
        """Returns a global `IOLoop` instance.

        Most applications have a single, global `IOLoop` running on the
        main thread.  Use this method to get this instance from
        another thread.  To get the current thread's `IOLoop`, use `current()`.
        """
        # install ZMQIOLoop as the active IOLoop implementation
        # when using tornado 3
        if tornado_version >= (3,):
            PollIOLoop.configure(cls)
        loop = PollIOLoop.instance(*args, **kwargs)
        if not isinstance(loop, cls):
            warnings.warn(
                f"IOLoop.current expected instance of {cls!r}, got {loop!r}",
                RuntimeWarning,
                stacklevel=2,
            )
        return loop

    @classmethod
    def current(cls, *args, **kwargs):
        """Returns the current thread’s IOLoop."""
        # install ZMQIOLoop as the active IOLoop implementation
        # when using tornado 3
        if tornado_version >= (3,):
            PollIOLoop.configure(cls)
        loop = PollIOLoop.current(*args, **kwargs)
        if not isinstance(loop, cls):
            warnings.warn(
                f"IOLoop.current expected instance of {cls!r}, got {loop!r}",
                RuntimeWarning,
                stacklevel=2,
            )
        return loop

    def start(self):
        try:
            super().start()
        except ZMQError as e:
            if e.errno == ETERM:
                # quietly return on ETERM
                pass
            else:
                raise


# public API name
IOLoop = ZMQIOLoop


def install():
    """set the tornado IOLoop instance with the pyzmq IOLoop.

    After calling this function, tornado's IOLoop.instance() and pyzmq's
    IOLoop.instance() will return the same object.

    An assertion error will be raised if tornado's IOLoop has been initialized
    prior to calling this function.
    """
    from tornado import ioloop

    # check if tornado's IOLoop is already initialized to something other
    # than the pyzmq IOLoop instance:
    assert (
        not ioloop.IOLoop.initialized()
    ) or ioloop.IOLoop.instance() is IOLoop.instance(), (
        "tornado IOLoop already initialized"
    )

    if tornado_version >= (3,):
        # tornado 3 has an official API for registering new defaults, yay!
        ioloop.IOLoop.configure(ZMQIOLoop)
    else:
        # we have to set the global instance explicitly
        ioloop.IOLoop._instance = IOLoop.instance()
pyzmq-26.4.0/zmq/eventloop/future.py000066400000000000000000000050641477374370200175140ustar00rootroot00000000000000"""Future-returning APIs for tornado coroutines.

.. seealso::

    :mod:`zmq.asyncio`

"""

# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import asyncio
import warnings
from typing import Any

from tornado.concurrent import Future
from tornado.ioloop import IOLoop

import zmq as _zmq
from zmq._future import _AsyncPoller, _AsyncSocket


class CancelledError(Exception):
    pass


class _TornadoFuture(Future):
    """Subclass Tornado Future, reinstating cancellation."""

    def cancel(self):
        if self.done():
            return False
        self.set_exception(CancelledError())
        return True

    def cancelled(self):
        return self.done() and isinstance(self.exception(), CancelledError)


class _CancellableTornadoTimeout:
    def __init__(self, loop, timeout):
        self.loop = loop
        self.timeout = timeout

    def cancel(self):
        self.loop.remove_timeout(self.timeout)


# mixin for tornado/asyncio compatibility


class _AsyncTornado:
    _Future: type[asyncio.Future] = _TornadoFuture
    _READ = IOLoop.READ
    _WRITE = IOLoop.WRITE

    def _default_loop(self):
        return IOLoop.current()

    def _call_later(self, delay, callback):
        io_loop = self._get_loop()
        timeout = io_loop.call_later(delay, callback)
        return _CancellableTornadoTimeout(io_loop, timeout)


class Poller(_AsyncTornado, _AsyncPoller):
    def _watch_raw_socket(self, loop, socket, evt, f):
        """Schedule callback for a raw socket"""
        loop.add_handler(socket, lambda *args: f(), evt)

    def _unwatch_raw_sockets(self, loop, *sockets):
        """Unschedule callback for a raw socket"""
        for socket in sockets:
            loop.remove_handler(socket)


class Socket(_AsyncTornado, _AsyncSocket):
    _poller_class = Poller


Poller._socket_class = Socket


class Context(_zmq.Context[Socket]):
    # avoid sharing instance with base Context class
    _instance = None

    io_loop = None

    @staticmethod
    def _socket_class(self, socket_type):
        return Socket(self, socket_type)

    def __init__(self: Context, *args: Any, **kwargs: Any) -> None:
        io_loop = kwargs.pop('io_loop', None)
        if io_loop is not None:
            warnings.warn(
                f"{self.__class__.__name__}(io_loop) argument is deprecated in pyzmq 22.2."
                " The currently active loop will always be used.",
                DeprecationWarning,
                stacklevel=2,
            )
        super().__init__(*args, **kwargs)  # type: ignore
pyzmq-26.4.0/zmq/eventloop/ioloop.py000066400000000000000000000013761477374370200175050ustar00rootroot00000000000000"""tornado IOLoop API with zmq compatibility

This module is deprecated in pyzmq 17.
To use zmq with tornado,
eventloop integration is no longer required
and tornado itself should be used.
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import warnings


def _deprecated():
    warnings.warn(
        "zmq.eventloop.ioloop is deprecated in pyzmq 17."
        " pyzmq now works with default tornado and asyncio eventloops.",
        DeprecationWarning,
        stacklevel=3,
    )


_deprecated()

from tornado.ioloop import *  # noqa
from tornado.ioloop import IOLoop

ZMQIOLoop = IOLoop


def install():
    """DEPRECATED

    pyzmq 17 no longer needs any special integration for tornado.
    """
    _deprecated()
pyzmq-26.4.0/zmq/eventloop/zmqstream.py000066400000000000000000000550011477374370200202210ustar00rootroot00000000000000# Derived from iostream.py from tornado 1.0, Copyright 2009 Facebook
# Used under Apache License Version 2.0
#
# Modifications are Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
"""A utility class for event-based messaging on a zmq socket using tornado.

.. seealso::

    - :mod:`zmq.asyncio`
    - :mod:`zmq.eventloop.future`
"""

from __future__ import annotations

import asyncio
import pickle
import warnings
from queue import Queue
from typing import Any, Awaitable, Callable, Literal, Sequence, cast, overload

from tornado.ioloop import IOLoop
from tornado.log import gen_log

import zmq
import zmq._future
from zmq import POLLIN, POLLOUT
from zmq.utils import jsonapi


class ZMQStream:
    """A utility class to register callbacks when a zmq socket sends and receives

    For use with tornado IOLoop.

    There are three main methods

    Methods:

    * **on_recv(callback, copy=True):**
        register a callback to be run every time the socket has something to receive
    * **on_send(callback):**
        register a callback to be run every time you call send
    * **send_multipart(self, msg, flags=0, copy=False, callback=None):**
        perform a send that will trigger the callback
        if callback is passed, on_send is also called.

        There are also send_multipart(), send_json(), send_pyobj()

    Three other methods for deactivating the callbacks:

    * **stop_on_recv():**
        turn off the recv callback
    * **stop_on_send():**
        turn off the send callback

    which simply call ``on_(None)``.

    The entire socket interface, excluding direct recv methods, is also
    provided, primarily through direct-linking the methods.
    e.g.

    >>> stream.bind is stream.socket.bind
    True


    .. versionadded:: 25

        send/recv callbacks can be coroutines.

    .. versionchanged:: 25

        ZMQStreams only support base zmq.Socket classes (this has always been true, but not enforced).
        If ZMQStreams are created with e.g. async Socket subclasses,
        a RuntimeWarning will be shown,
        and the socket cast back to the default zmq.Socket
        before connecting events.

        Previously, using async sockets (or any zmq.Socket subclass) would result in undefined behavior for the
        arguments passed to callback functions.
        Now, the callback functions reliably get the return value of the base `zmq.Socket` send/recv_multipart methods
        (the list of message frames).
    """

    socket: zmq.Socket
    io_loop: IOLoop
    poller: zmq.Poller
    _send_queue: Queue
    _recv_callback: Callable | None
    _send_callback: Callable | None
    _close_callback: Callable | None
    _state: int = 0
    _flushed: bool = False
    _recv_copy: bool = False
    _fd: int

    def __init__(self, socket: zmq.Socket, io_loop: IOLoop | None = None):
        if isinstance(socket, zmq._future._AsyncSocket):
            warnings.warn(
                f"""ZMQStream only supports the base zmq.Socket class.

                Use zmq.Socket(shadow=other_socket)
                or `ctx.socket(zmq.{socket._type_name}, socket_class=zmq.Socket)`
                to create a base zmq.Socket object,
                no matter what other kind of socket your Context creates.
                """,
                RuntimeWarning,
                stacklevel=2,
            )
            # shadow back to base zmq.Socket,
            # otherwise callbacks like `on_recv` will get the wrong types.
            socket = zmq.Socket(shadow=socket)
        self.socket = socket

        # IOLoop.current() is deprecated if called outside the event loop
        # that means
        self.io_loop = io_loop or IOLoop.current()
        self.poller = zmq.Poller()
        self._fd = cast(int, self.socket.FD)

        self._send_queue = Queue()
        self._recv_callback = None
        self._send_callback = None
        self._close_callback = None
        self._recv_copy = False
        self._flushed = False

        self._state = 0
        self._init_io_state()

        # shortcircuit some socket methods
        self.bind = self.socket.bind
        self.bind_to_random_port = self.socket.bind_to_random_port
        self.connect = self.socket.connect
        self.setsockopt = self.socket.setsockopt
        self.getsockopt = self.socket.getsockopt
        self.setsockopt_string = self.socket.setsockopt_string
        self.getsockopt_string = self.socket.getsockopt_string
        self.setsockopt_unicode = self.socket.setsockopt_unicode
        self.getsockopt_unicode = self.socket.getsockopt_unicode

    def stop_on_recv(self):
        """Disable callback and automatic receiving."""
        return self.on_recv(None)

    def stop_on_send(self):
        """Disable callback on sending."""
        return self.on_send(None)

    def stop_on_err(self):
        """DEPRECATED, does nothing"""
        gen_log.warn("on_err does nothing, and will be removed")

    def on_err(self, callback: Callable):
        """DEPRECATED, does nothing"""
        gen_log.warn("on_err does nothing, and will be removed")

    @overload
    def on_recv(
        self,
        callback: Callable[[list[bytes]], Any],
    ) -> None: ...

    @overload
    def on_recv(
        self,
        callback: Callable[[list[bytes]], Any],
        copy: Literal[True],
    ) -> None: ...

    @overload
    def on_recv(
        self,
        callback: Callable[[list[zmq.Frame]], Any],
        copy: Literal[False],
    ) -> None: ...

    @overload
    def on_recv(
        self,
        callback: Callable[[list[zmq.Frame]], Any] | Callable[[list[bytes]], Any],
        copy: bool = ...,
    ): ...

    def on_recv(
        self,
        callback: Callable[[list[zmq.Frame]], Any] | Callable[[list[bytes]], Any],
        copy: bool = True,
    ) -> None:
        """Register a callback for when a message is ready to recv.

        There can be only one callback registered at a time, so each
        call to `on_recv` replaces previously registered callbacks.

        on_recv(None) disables recv event polling.

        Use on_recv_stream(callback) instead, to register a callback that will receive
        both this ZMQStream and the message, instead of just the message.

        Parameters
        ----------

        callback : callable
            callback must take exactly one argument, which will be a
            list, as returned by socket.recv_multipart()
            if callback is None, recv callbacks are disabled.
        copy : bool
            copy is passed directly to recv, so if copy is False,
            callback will receive Message objects. If copy is True,
            then callback will receive bytes/str objects.

        Returns : None
        """

        self._check_closed()
        assert callback is None or callable(callback)
        self._recv_callback = callback
        self._recv_copy = copy
        if callback is None:
            self._drop_io_state(zmq.POLLIN)
        else:
            self._add_io_state(zmq.POLLIN)

    @overload
    def on_recv_stream(
        self,
        callback: Callable[[ZMQStream, list[bytes]], Any],
    ) -> None: ...

    @overload
    def on_recv_stream(
        self,
        callback: Callable[[ZMQStream, list[bytes]], Any],
        copy: Literal[True],
    ) -> None: ...

    @overload
    def on_recv_stream(
        self,
        callback: Callable[[ZMQStream, list[zmq.Frame]], Any],
        copy: Literal[False],
    ) -> None: ...

    @overload
    def on_recv_stream(
        self,
        callback: (
            Callable[[ZMQStream, list[zmq.Frame]], Any]
            | Callable[[ZMQStream, list[bytes]], Any]
        ),
        copy: bool = ...,
    ): ...

    def on_recv_stream(
        self,
        callback: (
            Callable[[ZMQStream, list[zmq.Frame]], Any]
            | Callable[[ZMQStream, list[bytes]], Any]
        ),
        copy: bool = True,
    ):
        """Same as on_recv, but callback will get this stream as first argument

        callback must take exactly two arguments, as it will be called as::

            callback(stream, msg)

        Useful when a single callback should be used with multiple streams.
        """
        if callback is None:
            self.stop_on_recv()
        else:

            def stream_callback(msg):
                return callback(self, msg)

            self.on_recv(stream_callback, copy=copy)

    def on_send(
        self, callback: Callable[[Sequence[Any], zmq.MessageTracker | None], Any]
    ):
        """Register a callback to be called on each send

        There will be two arguments::

            callback(msg, status)

        * `msg` will be the list of sendable objects that was just sent
        * `status` will be the return result of socket.send_multipart(msg) -
          MessageTracker or None.

        Non-copying sends return a MessageTracker object whose
        `done` attribute will be True when the send is complete.
        This allows users to track when an object is safe to write to
        again.

        The second argument will always be None if copy=True
        on the send.

        Use on_send_stream(callback) to register a callback that will be passed
        this ZMQStream as the first argument, in addition to the other two.

        on_send(None) disables recv event polling.

        Parameters
        ----------

        callback : callable
            callback must take exactly two arguments, which will be
            the message being sent (always a list),
            and the return result of socket.send_multipart(msg) -
            MessageTracker or None.

            if callback is None, send callbacks are disabled.
        """

        self._check_closed()
        assert callback is None or callable(callback)
        self._send_callback = callback

    def on_send_stream(
        self,
        callback: Callable[[ZMQStream, Sequence[Any], zmq.MessageTracker | None], Any],
    ):
        """Same as on_send, but callback will get this stream as first argument

        Callback will be passed three arguments::

            callback(stream, msg, status)

        Useful when a single callback should be used with multiple streams.
        """
        if callback is None:
            self.stop_on_send()
        else:
            self.on_send(lambda msg, status: callback(self, msg, status))

    def send(self, msg, flags=0, copy=True, track=False, callback=None, **kwargs):
        """Send a message, optionally also register a new callback for sends.
        See zmq.socket.send for details.
        """
        return self.send_multipart(
            [msg], flags=flags, copy=copy, track=track, callback=callback, **kwargs
        )

    def send_multipart(
        self,
        msg: Sequence[Any],
        flags: int = 0,
        copy: bool = True,
        track: bool = False,
        callback: Callable | None = None,
        **kwargs: Any,
    ) -> None:
        """Send a multipart message, optionally also register a new callback for sends.
        See zmq.socket.send_multipart for details.
        """
        kwargs.update(dict(flags=flags, copy=copy, track=track))
        self._send_queue.put((msg, kwargs))
        callback = callback or self._send_callback
        if callback is not None:
            self.on_send(callback)
        else:
            # noop callback
            self.on_send(lambda *args: None)
        self._add_io_state(zmq.POLLOUT)

    def send_string(
        self,
        u: str,
        flags: int = 0,
        encoding: str = 'utf-8',
        callback: Callable | None = None,
        **kwargs: Any,
    ):
        """Send a unicode message with an encoding.
        See zmq.socket.send_unicode for details.
        """
        if not isinstance(u, str):
            raise TypeError("unicode/str objects only")
        return self.send(u.encode(encoding), flags=flags, callback=callback, **kwargs)

    send_unicode = send_string

    def send_json(
        self,
        obj: Any,
        flags: int = 0,
        callback: Callable | None = None,
        **kwargs: Any,
    ):
        """Send json-serialized version of an object.
        See zmq.socket.send_json for details.
        """
        msg = jsonapi.dumps(obj)
        return self.send(msg, flags=flags, callback=callback, **kwargs)

    def send_pyobj(
        self,
        obj: Any,
        flags: int = 0,
        protocol: int = -1,
        callback: Callable | None = None,
        **kwargs: Any,
    ):
        """Send a Python object as a message using pickle to serialize.

        See zmq.socket.send_json for details.
        """
        msg = pickle.dumps(obj, protocol)
        return self.send(msg, flags, callback=callback, **kwargs)

    def _finish_flush(self):
        """callback for unsetting _flushed flag."""
        self._flushed = False

    def flush(self, flag: int = zmq.POLLIN | zmq.POLLOUT, limit: int | None = None):
        """Flush pending messages.

        This method safely handles all pending incoming and/or outgoing messages,
        bypassing the inner loop, passing them to the registered callbacks.

        A limit can be specified, to prevent blocking under high load.

        flush will return the first time ANY of these conditions are met:
            * No more events matching the flag are pending.
            * the total number of events handled reaches the limit.

        Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
        is registered, unlike normal IOLoop operation. This allows flush to be
        used to remove *and ignore* incoming messages.

        Parameters
        ----------
        flag : int
            default=POLLIN|POLLOUT
            0MQ poll flags.
            If flag|POLLIN,  recv events will be flushed.
            If flag|POLLOUT, send events will be flushed.
            Both flags can be set at once, which is the default.
        limit : None or int, optional
            The maximum number of messages to send or receive.
            Both send and recv count against this limit.

        Returns
        -------
        int :
            count of events handled (both send and recv)
        """
        self._check_closed()
        # unset self._flushed, so callbacks will execute, in case flush has
        # already been called this iteration
        already_flushed = self._flushed
        self._flushed = False
        # initialize counters
        count = 0

        def update_flag():
            """Update the poll flag, to prevent registering POLLOUT events
            if we don't have pending sends."""
            return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)

        flag = update_flag()
        if not flag:
            # nothing to do
            return 0
        self.poller.register(self.socket, flag)
        events = self.poller.poll(0)
        while events and (not limit or count < limit):
            s, event = events[0]
            if event & POLLIN:  # receiving
                self._handle_recv()
                count += 1
                if self.socket is None:
                    # break if socket was closed during callback
                    break
            if event & POLLOUT and self.sending():
                self._handle_send()
                count += 1
                if self.socket is None:
                    # break if socket was closed during callback
                    break

            flag = update_flag()
            if flag:
                self.poller.register(self.socket, flag)
                events = self.poller.poll(0)
            else:
                events = []
        if count:  # only bypass loop if we actually flushed something
            # skip send/recv callbacks this iteration
            self._flushed = True
            # reregister them at the end of the loop
            if not already_flushed:  # don't need to do it again
                self.io_loop.add_callback(self._finish_flush)
        elif already_flushed:
            self._flushed = True

        # update ioloop poll state, which may have changed
        self._rebuild_io_state()
        return count

    def set_close_callback(self, callback: Callable | None):
        """Call the given callback when the stream is closed."""
        self._close_callback = callback

    def close(self, linger: int | None = None) -> None:
        """Close this stream."""
        if self.socket is not None:
            if self.socket.closed:
                # fallback on raw fd for closed sockets
                # hopefully this happened promptly after close,
                # otherwise somebody else may have the FD
                warnings.warn(
                    f"Unregistering FD {self._fd} after closing socket. "
                    "This could result in unregistering handlers for the wrong socket. "
                    "Please use stream.close() instead of closing the socket directly.",
                    stacklevel=2,
                )
                self.io_loop.remove_handler(self._fd)
            else:
                self.io_loop.remove_handler(self.socket)
                self.socket.close(linger)
            self.socket = None  # type: ignore
            if self._close_callback:
                self._run_callback(self._close_callback)

    def receiving(self) -> bool:
        """Returns True if we are currently receiving from the stream."""
        return self._recv_callback is not None

    def sending(self) -> bool:
        """Returns True if we are currently sending to the stream."""
        return not self._send_queue.empty()

    def closed(self) -> bool:
        if self.socket is None:
            return True
        if self.socket.closed:
            # underlying socket has been closed, but not by us!
            # trigger our cleanup
            self.close()
            return True
        return False

    def _run_callback(self, callback, *args, **kwargs):
        """Wrap running callbacks in try/except to allow us to
        close our socket."""
        try:
            f = callback(*args, **kwargs)
            if isinstance(f, Awaitable):
                f = asyncio.ensure_future(f)
            else:
                f = None
        except Exception:
            gen_log.error("Uncaught exception in ZMQStream callback", exc_info=True)
            # Re-raise the exception so that IOLoop.handle_callback_exception
            # can see it and log the error
            raise

        if f is not None:
            # handle async callbacks
            def _log_error(f):
                try:
                    f.result()
                except Exception:
                    gen_log.error(
                        "Uncaught exception in ZMQStream callback", exc_info=True
                    )

            f.add_done_callback(_log_error)

    def _handle_events(self, fd, events):
        """This method is the actual handler for IOLoop, that gets called whenever
        an event on my socket is posted. It dispatches to _handle_recv, etc."""
        if not self.socket:
            gen_log.warning("Got events for closed stream %s", self)
            return
        try:
            zmq_events = self.socket.EVENTS
        except zmq.ContextTerminated:
            gen_log.warning("Got events for stream %s after terminating context", self)
            # trigger close check, this will unregister callbacks
            self.closed()
            return
        except zmq.ZMQError as e:
            # run close check
            # shadow sockets may have been closed elsewhere,
            # which should show up as ENOTSOCK here
            if self.closed():
                gen_log.warning(
                    "Got events for stream %s attached to closed socket: %s", self, e
                )
            else:
                gen_log.error("Error getting events for %s: %s", self, e)
            return
        try:
            # dispatch events:
            if zmq_events & zmq.POLLIN and self.receiving():
                self._handle_recv()
                if not self.socket:
                    return
            if zmq_events & zmq.POLLOUT and self.sending():
                self._handle_send()
                if not self.socket:
                    return

            # rebuild the poll state
            self._rebuild_io_state()
        except Exception:
            gen_log.error("Uncaught exception in zmqstream callback", exc_info=True)
            raise

    def _handle_recv(self):
        """Handle a recv event."""
        if self._flushed:
            return
        try:
            msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
        except zmq.ZMQError as e:
            if e.errno == zmq.EAGAIN:
                # state changed since poll event
                pass
            else:
                raise
        else:
            if self._recv_callback:
                callback = self._recv_callback
                self._run_callback(callback, msg)

    def _handle_send(self):
        """Handle a send event."""
        if self._flushed:
            return
        if not self.sending():
            gen_log.error("Shouldn't have handled a send event")
            return

        msg, kwargs = self._send_queue.get()
        try:
            status = self.socket.send_multipart(msg, **kwargs)
        except zmq.ZMQError as e:
            gen_log.error("SEND Error: %s", e)
            status = e
        if self._send_callback:
            callback = self._send_callback
            self._run_callback(callback, msg, status)

    def _check_closed(self):
        if not self.socket:
            raise OSError("Stream is closed")

    def _rebuild_io_state(self):
        """rebuild io state based on self.sending() and receiving()"""
        if self.socket is None:
            return
        state = 0
        if self.receiving():
            state |= zmq.POLLIN
        if self.sending():
            state |= zmq.POLLOUT

        self._state = state
        self._update_handler(state)

    def _add_io_state(self, state):
        """Add io_state to poller."""
        self._state = self._state | state
        self._update_handler(self._state)

    def _drop_io_state(self, state):
        """Stop poller from watching an io_state."""
        self._state = self._state & (~state)
        self._update_handler(self._state)

    def _update_handler(self, state):
        """Update IOLoop handler with state."""
        if self.socket is None:
            return

        if state & self.socket.events:
            # events still exist that haven't been processed
            # explicitly schedule handling to avoid missing events due to edge-triggered FDs
            self.io_loop.add_callback(lambda: self._handle_events(self.socket, 0))

    def _init_io_state(self):
        """initialize the ioloop event handler"""
        self.io_loop.add_handler(self.socket, self._handle_events, self.io_loop.READ)
pyzmq-26.4.0/zmq/green/000077500000000000000000000000001477374370200147105ustar00rootroot00000000000000pyzmq-26.4.0/zmq/green/__init__.py000066400000000000000000000025271477374370200170270ustar00rootroot00000000000000# -----------------------------------------------------------------------------
#  Copyright (C) 2011-2012 Travis Cline
#
#  This file is part of pyzmq
#  It is adapted from upstream project zeromq_gevent under the New BSD License
#
#  Distributed under the terms of the New BSD License.  The full license is in
#  the file LICENSE.BSD, distributed as part of this software.
# -----------------------------------------------------------------------------

"""zmq.green - gevent compatibility with zeromq.

Usage
-----

Instead of importing zmq directly, do so in the following manner:

..

    import zmq.green as zmq


Any calls that would have blocked the current thread will now only block the
current green thread.

This compatibility is accomplished by ensuring the nonblocking flag is set
before any blocking operation and the ØMQ file descriptor is polled internally
to trigger needed events.
"""

from __future__ import annotations

from typing import List

import zmq as _zmq
from zmq import *
from zmq.green.core import _Context, _Socket
from zmq.green.poll import _Poller

Context = _Context  # type: ignore
Socket = _Socket  # type: ignore
Poller = _Poller  # type: ignore

from zmq.green.device import device  # type: ignore

__all__: list[str] = []
# adding `__all__` to __init__.pyi gets mypy all confused
__all__.extend(_zmq.__all__)  # type: ignore
pyzmq-26.4.0/zmq/green/core.py000066400000000000000000000262671477374370200162270ustar00rootroot00000000000000# -----------------------------------------------------------------------------
#  Copyright (C) 2011-2012 Travis Cline
#
#  This file is part of pyzmq
#  It is adapted from upstream project zeromq_gevent under the New BSD License
#
#  Distributed under the terms of the New BSD License.  The full license is in
#  the file LICENSE.BSD, distributed as part of this software.
# -----------------------------------------------------------------------------

"""This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq ` to be non blocking"""

from __future__ import annotations

import sys
import time
import warnings

import gevent
from gevent.event import AsyncResult
from gevent.hub import get_hub

import zmq
from zmq import Context as _original_Context
from zmq import Socket as _original_Socket

from .poll import _Poller

if hasattr(zmq, 'RCVTIMEO'):
    TIMEOS: tuple = (zmq.RCVTIMEO, zmq.SNDTIMEO)
else:
    TIMEOS = ()


def _stop(evt):
    """simple wrapper for stopping an Event, allowing for method rename in gevent 1.0"""
    try:
        evt.stop()
    except AttributeError:
        # gevent<1.0 compat
        evt.cancel()


class _Socket(_original_Socket):
    """Green version of :class:`zmq.Socket`

    The following methods are overridden:

        * send
        * recv

    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
    is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.

    The `__state_changed` method is triggered when the zmq.FD for the socket is
    marked as readable and triggers the necessary read and write events (which
    are waited for in the recv and send methods).

    Some double underscore prefixes are used to minimize pollution of
    :class:`zmq.Socket`'s namespace.
    """

    __in_send_multipart = False
    __in_recv_multipart = False
    __writable = None
    __readable = None
    _state_event = None
    _gevent_bug_timeout = 11.6  # timeout for not trusting gevent
    _debug_gevent = False  # turn on if you think gevent is missing events
    _poller_class = _Poller
    _repr_cls = "zmq.green.Socket"

    def __init__(self, *a, **kw):
        super().__init__(*a, **kw)
        self.__in_send_multipart = False
        self.__in_recv_multipart = False
        self.__setup_events()

    def __del__(self):
        self.close()

    def close(self, linger=None):
        super().close(linger)
        self.__cleanup_events()

    def __cleanup_events(self):
        # close the _state_event event, keeps the number of active file descriptors down
        if getattr(self, '_state_event', None):
            _stop(self._state_event)
            self._state_event = None
        # if the socket has entered a close state resume any waiting greenlets
        self.__writable.set()
        self.__readable.set()

    def __setup_events(self):
        self.__readable = AsyncResult()
        self.__writable = AsyncResult()
        self.__readable.set()
        self.__writable.set()

        try:
            self._state_event = get_hub().loop.io(
                self.getsockopt(zmq.FD), 1
            )  # read state watcher
            self._state_event.start(self.__state_changed)
        except AttributeError:
            # for gevent<1.0 compatibility
            from gevent.core import read_event

            self._state_event = read_event(
                self.getsockopt(zmq.FD), self.__state_changed, persist=True
            )

    def __state_changed(self, event=None, _evtype=None):
        if self.closed:
            self.__cleanup_events()
            return
        try:
            # avoid triggering __state_changed from inside __state_changed
            events = super().getsockopt(zmq.EVENTS)
        except zmq.ZMQError as exc:
            self.__writable.set_exception(exc)
            self.__readable.set_exception(exc)
        else:
            if events & zmq.POLLOUT:
                self.__writable.set()
            if events & zmq.POLLIN:
                self.__readable.set()

    def _wait_write(self):
        assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
        self.__writable = AsyncResult()
        # timeout is because libzmq cannot be trusted to properly signal a new send event:
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__writable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if (
                self._debug_gevent
                and timeout
                and toc - tic > dt
                and self.getsockopt(zmq.EVENTS) & zmq.POLLOUT
            ):
                print(
                    f"BUG: gevent may have missed a libzmq send event on {self.FD}!",
                    file=sys.stderr,
                )
        finally:
            if timeout:
                timeout.close()
            self.__writable.set()

    def _wait_read(self):
        assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
        self.__readable = AsyncResult()
        # timeout is because libzmq cannot always be trusted to play nice with libevent.
        # I can only confirm that this actually happens for send, but lets be symmetrical
        # with our dirty hacks.
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__readable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if (
                self._debug_gevent
                and timeout
                and toc - tic > dt
                and self.getsockopt(zmq.EVENTS) & zmq.POLLIN
            ):
                print(
                    f"BUG: gevent may have missed a libzmq recv event on {self.FD}!",
                    file=sys.stderr,
                )
        finally:
            if timeout:
                timeout.close()
            self.__readable.set()

    def send(self, data, flags=0, copy=True, track=False, **kwargs):
        """send, which will only block current greenlet

        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """

        # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
        if flags & zmq.NOBLOCK:
            try:
                msg = super().send(data, flags, copy, track, **kwargs)
            finally:
                if not self.__in_send_multipart:
                    self.__state_changed()
            return msg
        # ensure the zmq.NOBLOCK flag is part of flags
        flags |= zmq.NOBLOCK
        while True:  # Attempt to complete this operation indefinitely, blocking the current greenlet
            try:
                # attempt the actual call
                msg = super().send(data, flags, copy, track)
            except zmq.ZMQError as e:
                # if the raised ZMQError is not EAGAIN, reraise
                if e.errno != zmq.EAGAIN:
                    if not self.__in_send_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_send_multipart:
                    self.__state_changed()
                return msg
            # defer to the event loop until we're notified the socket is writable
            self._wait_write()

    def recv(self, flags=0, copy=True, track=False):
        """recv, which will only block current greenlet

        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        if flags & zmq.NOBLOCK:
            try:
                msg = super().recv(flags, copy, track)
            finally:
                if not self.__in_recv_multipart:
                    self.__state_changed()
            return msg

        flags |= zmq.NOBLOCK
        while True:
            try:
                msg = super().recv(flags, copy, track)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    if not self.__in_recv_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_recv_multipart:
                    self.__state_changed()
                return msg
            self._wait_read()

    def recv_into(self, buffer, /, *, nbytes=0, flags=0):
        """recv_into, which will only block current greenlet"""
        if flags & zmq.DONTWAIT:
            return super().recv_into(buffer, nbytes=nbytes, flags=flags)
        flags |= zmq.DONTWAIT
        while True:
            try:
                recvd = super().recv_into(buffer, nbytes=nbytes, flags=flags)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    self.__state_changed()
                    raise
            else:
                self.__state_changed()
                return recvd
            self._wait_read()

    def send_multipart(self, *args, **kwargs):
        """wrap send_multipart to prevent state_changed on each partial send"""
        self.__in_send_multipart = True
        try:
            msg = super().send_multipart(*args, **kwargs)
        finally:
            self.__in_send_multipart = False
            self.__state_changed()
        return msg

    def recv_multipart(self, *args, **kwargs):
        """wrap recv_multipart to prevent state_changed on each partial recv"""
        self.__in_recv_multipart = True
        try:
            msg = super().recv_multipart(*args, **kwargs)
        finally:
            self.__in_recv_multipart = False
            self.__state_changed()
        return msg

    def get(self, opt):
        """trigger state_changed on getsockopt(EVENTS)"""
        if opt in TIMEOS:
            warnings.warn(
                "TIMEO socket options have no effect in zmq.green", UserWarning
            )
        optval = super().get(opt)
        if opt == zmq.EVENTS:
            self.__state_changed()
        return optval

    def set(self, opt, val):
        """set socket option"""
        if opt in TIMEOS:
            warnings.warn(
                "TIMEO socket options have no effect in zmq.green", UserWarning
            )
        return super().set(opt, val)


class _Context(_original_Context[_Socket]):
    """Replacement for :class:`zmq.Context`

    Ensures that the greened Socket above is used in calls to `socket`.
    """

    _socket_class = _Socket
    _repr_cls = "zmq.green.Context"

    # avoid sharing instance with base Context class
    _instance = None
pyzmq-26.4.0/zmq/green/device.py000066400000000000000000000017221477374370200165230ustar00rootroot00000000000000# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import zmq
from zmq.green import Poller


def device(device_type, isocket, osocket):
    """Start a zeromq device (gevent-compatible).

    Unlike the true zmq.device, this does not release the GIL.

    Parameters
    ----------
    device_type : (QUEUE, FORWARDER, STREAMER)
        The type of device to start (ignored).
    isocket : Socket
        The Socket instance for the incoming traffic.
    osocket : Socket
        The Socket instance for the outbound traffic.
    """
    p = Poller()
    if osocket == -1:
        osocket = isocket
    p.register(isocket, zmq.POLLIN)
    p.register(osocket, zmq.POLLIN)

    while True:
        events = dict(p.poll())
        if isocket in events:
            osocket.send_multipart(isocket.recv_multipart())
        if osocket in events:
            isocket.send_multipart(osocket.recv_multipart())
pyzmq-26.4.0/zmq/green/eventloop/000077500000000000000000000000001477374370200167235ustar00rootroot00000000000000pyzmq-26.4.0/zmq/green/eventloop/__init__.py000066400000000000000000000001041477374370200210270ustar00rootroot00000000000000from zmq.green.eventloop.ioloop import IOLoop

__all__ = ['IOLoop']
pyzmq-26.4.0/zmq/green/eventloop/ioloop.py000066400000000000000000000000531477374370200205740ustar00rootroot00000000000000from zmq.eventloop.ioloop import *  # noqa
pyzmq-26.4.0/zmq/green/eventloop/zmqstream.py000066400000000000000000000004431477374370200213210ustar00rootroot00000000000000from zmq.eventloop import zmqstream
from zmq.green.eventloop.ioloop import IOLoop


class ZMQStream(zmqstream.ZMQStream):
    def __init__(self, socket, io_loop=None):
        io_loop = io_loop or IOLoop.instance()
        super().__init__(socket, io_loop=io_loop)


__all__ = ["ZMQStream"]
pyzmq-26.4.0/zmq/green/poll.py000066400000000000000000000056641477374370200162430ustar00rootroot00000000000000from __future__ import annotations

import gevent
from gevent import select

import zmq
from zmq import Poller as _original_Poller


class _Poller(_original_Poller):
    """Replacement for :class:`zmq.Poller`

    Ensures that the greened Poller below is used in calls to
    :meth:`zmq.Poller.poll`.
    """

    _gevent_bug_timeout = 1.33  # minimum poll interval, for working around gevent bug

    def _get_descriptors(self):
        """Returns three elements tuple with socket descriptors ready
        for gevent.select.select
        """
        rlist = []
        wlist = []
        xlist = []

        for socket, flags in self.sockets:
            if isinstance(socket, zmq.Socket):
                rlist.append(socket.getsockopt(zmq.FD))
                continue
            elif isinstance(socket, int):
                fd = socket
            elif hasattr(socket, 'fileno'):
                try:
                    fd = int(socket.fileno())
                except Exception:
                    raise ValueError('fileno() must return an valid integer fd')
            else:
                raise TypeError(
                    'Socket must be a 0MQ socket, an integer fd '
                    f'or have a fileno() method: {socket!r}'
                )

            if flags & zmq.POLLIN:
                rlist.append(fd)
            if flags & zmq.POLLOUT:
                wlist.append(fd)
            if flags & zmq.POLLERR:
                xlist.append(fd)

        return (rlist, wlist, xlist)

    def poll(self, timeout=-1):
        """Overridden method to ensure that the green version of
        Poller is used.

        Behaves the same as :meth:`zmq.core.Poller.poll`
        """

        if timeout is None:
            timeout = -1

        if timeout < 0:
            timeout = -1

        rlist = None
        wlist = None
        xlist = None

        if timeout > 0:
            tout = gevent.Timeout.start_new(timeout / 1000.0)
        else:
            tout = None

        try:
            # Loop until timeout or events available
            rlist, wlist, xlist = self._get_descriptors()
            while True:
                events = super().poll(0)
                if events or timeout == 0:
                    return events

                # wait for activity on sockets in a green way
                # set a minimum poll frequency,
                # because gevent < 1.0 cannot be trusted to catch edge-triggered FD events
                _bug_timeout = gevent.Timeout.start_new(self._gevent_bug_timeout)
                try:
                    select.select(rlist, wlist, xlist)
                except gevent.Timeout as t:
                    if t is not _bug_timeout:
                        raise
                finally:
                    _bug_timeout.cancel()

        except gevent.Timeout as t:
            if t is not tout:
                raise
            return []
        finally:
            if timeout > 0:
                tout.cancel()
pyzmq-26.4.0/zmq/log/000077500000000000000000000000001477374370200143715ustar00rootroot00000000000000pyzmq-26.4.0/zmq/log/__init__.py000066400000000000000000000000001477374370200164700ustar00rootroot00000000000000pyzmq-26.4.0/zmq/log/__main__.py000066400000000000000000000076451477374370200164770ustar00rootroot00000000000000"""pyzmq log watcher.

Easily view log messages published by the PUBHandler in zmq.log.handlers

Designed to be run as an executable module - try this to see options:
    python -m zmq.log -h

Subscribes to the '' (empty string) topic by default which means it will work
out-of-the-box with a PUBHandler object instantiated with default settings.
If you change the root topic with PUBHandler.setRootTopic() you must pass
the value to this script with the --topic argument.

Note that the default formats for the PUBHandler object selectively include
the log level in the message. This creates redundancy in this script as it
always prints the topic of the message, which includes the log level.
Consider overriding the default formats with PUBHandler.setFormat() to
avoid this issue.

"""

# encoding: utf-8

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import argparse
from datetime import datetime
from typing import Dict

import zmq

parser = argparse.ArgumentParser('ZMQ Log Watcher')
parser.add_argument('zmq_pub_url', type=str, help='URL to a ZMQ publisher socket.')
parser.add_argument(
    '-t',
    '--topic',
    type=str,
    default='',
    help='Only receive messages that start with this topic.',
)
parser.add_argument(
    '--timestamp', action='store_true', help='Append local time to the log messages.'
)
parser.add_argument(
    '--separator',
    type=str,
    default=' | ',
    help='String to print between topic and message.',
)
parser.add_argument(
    '--dateformat',
    type=str,
    default='%Y-%d-%m %H:%M',
    help='Set alternative date format for use with --timestamp.',
)
parser.add_argument(
    '--align',
    action='store_true',
    default=False,
    help='Try to align messages by the width of their topics.',
)
parser.add_argument(
    '--color',
    action='store_true',
    default=False,
    help='Color the output based on the error level. Requires the colorama module.',
)
args = parser.parse_args()


if args.color:
    import colorama

    colorama.init()
    colors = {
        'DEBUG': colorama.Fore.LIGHTCYAN_EX,
        'INFO': colorama.Fore.LIGHTWHITE_EX,
        'WARNING': colorama.Fore.YELLOW,
        'ERROR': colorama.Fore.LIGHTRED_EX,
        'CRITICAL': colorama.Fore.LIGHTRED_EX,
        '__RESET__': colorama.Fore.RESET,
    }
else:
    colors = {}


ctx = zmq.Context()
sub = ctx.socket(zmq.SUB)
sub.subscribe(args.topic.encode("utf8"))
sub.connect(args.zmq_pub_url)

topic_widths: Dict[int, int] = {}

while True:
    try:
        if sub.poll(10, zmq.POLLIN):
            topic, msg = sub.recv_multipart()
            topics = topic.decode('utf8').strip().split('.')

            if args.align:
                topics.extend(' ' for extra in range(len(topics), len(topic_widths)))
                aligned_parts = []
                for key, part in enumerate(topics):
                    topic_widths[key] = max(len(part), topic_widths.get(key, 0))
                    fmt = ''.join(('{:<', str(topic_widths[key]), '}'))
                    aligned_parts.append(fmt.format(part))

            if len(topics) == 1:
                level = topics[0]
            else:
                level = topics[1]

            fields = {
                'msg': msg.decode('utf8').strip(),
                'ts': (
                    datetime.now().strftime(args.dateformat) + ' '
                    if args.timestamp
                    else ''
                ),
                'aligned': (
                    '.'.join(aligned_parts)
                    if args.align
                    else topic.decode('utf8').strip()
                ),
                'color': colors.get(level, ''),
                'color_rst': colors.get('__RESET__', ''),
                'sep': args.separator,
            }
            print('{ts}{color}{aligned}{sep}{msg}{color_rst}'.format(**fields))
    except KeyboardInterrupt:
        break

sub.disconnect(args.zmq_pub_url)
if args.color:
    print(colorama.Fore.RESET)
pyzmq-26.4.0/zmq/log/handlers.py000066400000000000000000000160741477374370200165530ustar00rootroot00000000000000"""pyzmq logging handlers.

This mainly defines the PUBHandler object for publishing logging messages over
a zmq.PUB socket.

The PUBHandler can be used with the regular logging module, as in::

    >>> import logging
    >>> handler = PUBHandler('tcp://127.0.0.1:12345')
    >>> handler.root_topic = 'foo'
    >>> logger = logging.getLogger('foobar')
    >>> logger.setLevel(logging.DEBUG)
    >>> logger.addHandler(handler)

Or using ``dictConfig``, as in::

    >>> from logging.config import dictConfig
    >>> socket = Context.instance().socket(PUB)
    >>> socket.connect('tcp://127.0.0.1:12345')
    >>> dictConfig({
    >>>     'version': 1,
    >>>     'handlers': {
    >>>         'zmq': {
    >>>             'class': 'zmq.log.handlers.PUBHandler',
    >>>             'level': logging.DEBUG,
    >>>             'root_topic': 'foo',
    >>>             'interface_or_socket': socket
    >>>         }
    >>>     },
    >>>     'root': {
    >>>         'level': 'DEBUG',
    >>>         'handlers': ['zmq'],
    >>>     }
    >>> })


After this point, all messages logged by ``logger`` will be published on the
PUB socket.

Code adapted from StarCluster:

    https://github.com/jtriley/StarCluster/blob/StarCluster-0.91/starcluster/logger.py
"""

from __future__ import annotations

import logging
from copy import copy

import zmq

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.


TOPIC_DELIM = "::"  # delimiter for splitting topics on the receiving end.


class PUBHandler(logging.Handler):
    """A basic logging handler that emits log messages through a PUB socket.

    Takes a PUB socket already bound to interfaces or an interface to bind to.

    Example::

        sock = context.socket(zmq.PUB)
        sock.bind('inproc://log')
        handler = PUBHandler(sock)

    Or::

        handler = PUBHandler('inproc://loc')

    These are equivalent.

    Log messages handled by this handler are broadcast with ZMQ topics
    ``this.root_topic`` comes first, followed by the log level
    (DEBUG,INFO,etc.), followed by any additional subtopics specified in the
    message by: log.debug("subtopic.subsub::the real message")
    """

    ctx: zmq.Context
    socket: zmq.Socket

    def __init__(
        self,
        interface_or_socket: str | zmq.Socket,
        context: zmq.Context | None = None,
        root_topic: str = '',
    ) -> None:
        logging.Handler.__init__(self)
        self.root_topic = root_topic
        self.formatters = {
            logging.DEBUG: logging.Formatter(
                "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"
            ),
            logging.INFO: logging.Formatter("%(message)s\n"),
            logging.WARN: logging.Formatter(
                "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"
            ),
            logging.ERROR: logging.Formatter(
                "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"
            ),
            logging.CRITICAL: logging.Formatter(
                "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"
            ),
        }
        if isinstance(interface_or_socket, zmq.Socket):
            self.socket = interface_or_socket
            self.ctx = self.socket.context
        else:
            self.ctx = context or zmq.Context()
            self.socket = self.ctx.socket(zmq.PUB)
            self.socket.bind(interface_or_socket)

    @property
    def root_topic(self) -> str:
        return self._root_topic

    @root_topic.setter
    def root_topic(self, value: str):
        self.setRootTopic(value)

    def setRootTopic(self, root_topic: str):
        """Set the root topic for this handler.

        This value is prepended to all messages published by this handler, and it
        defaults to the empty string ''. When you subscribe to this socket, you must
        set your subscription to an empty string, or to at least the first letter of
        the binary representation of this string to ensure you receive any messages
        from this handler.

        If you use the default empty string root topic, messages will begin with
        the binary representation of the log level string (INFO, WARN, etc.).
        Note that ZMQ SUB sockets can have multiple subscriptions.
        """
        if isinstance(root_topic, bytes):
            root_topic = root_topic.decode("utf8")
        self._root_topic = root_topic

    def setFormatter(self, fmt, level=logging.NOTSET):
        """Set the Formatter for this handler.

        If no level is provided, the same format is used for all levels. This
        will overwrite all selective formatters set in the object constructor.
        """
        if level == logging.NOTSET:
            for fmt_level in self.formatters.keys():
                self.formatters[fmt_level] = fmt
        else:
            self.formatters[level] = fmt

    def format(self, record):
        """Format a record."""
        return self.formatters[record.levelno].format(record)

    def emit(self, record):
        """Emit a log message on my socket."""

        # LogRecord.getMessage explicitly allows msg to be anything _castable_ to a str
        try:
            topic, msg = str(record.msg).split(TOPIC_DELIM, 1)
        except ValueError:
            topic = ""
        else:
            # copy to avoid mutating LogRecord in-place
            record = copy(record)
            record.msg = msg

        try:
            bmsg = self.format(record).encode("utf8")
        except Exception:
            self.handleError(record)
            return

        topic_list = []

        if self.root_topic:
            topic_list.append(self.root_topic)

        topic_list.append(record.levelname)

        if topic:
            topic_list.append(topic)

        btopic = '.'.join(topic_list).encode("utf8", "replace")

        self.socket.send_multipart([btopic, bmsg])


class TopicLogger(logging.Logger):
    """A simple wrapper that takes an additional argument to log methods.

    All the regular methods exist, but instead of one msg argument, two
    arguments: topic, msg are passed.

    That is::

        logger.debug('msg')

    Would become::

        logger.debug('topic.sub', 'msg')
    """

    def log(self, level, topic, msg, *args, **kwargs):
        """Log 'msg % args' with level and topic.

        To pass exception information, use the keyword argument exc_info
        with a True value::

            logger.log(level, "zmq.fun", "We have a %s",
                    "mysterious problem", exc_info=1)
        """
        logging.Logger.log(self, level, f'{topic}{TOPIC_DELIM}{msg}', *args, **kwargs)


# Generate the methods of TopicLogger, since they are just adding a
# topic prefix to a message.
for name in "debug warn warning error critical fatal".split():
    try:
        meth = getattr(logging.Logger, name)
    except AttributeError:
        # some methods are missing, e.g. Logger.warn was removed from Python 3.13
        continue
    setattr(
        TopicLogger,
        name,
        lambda self, level, topic, msg, *args, **kwargs: meth(
            self, level, topic + TOPIC_DELIM + msg, *args, **kwargs
        ),
    )
pyzmq-26.4.0/zmq/py.typed000066400000000000000000000000001477374370200152750ustar00rootroot00000000000000pyzmq-26.4.0/zmq/ssh/000077500000000000000000000000001477374370200144055ustar00rootroot00000000000000pyzmq-26.4.0/zmq/ssh/__init__.py000066400000000000000000000000351477374370200165140ustar00rootroot00000000000000from zmq.ssh.tunnel import *
pyzmq-26.4.0/zmq/ssh/forward.py000066400000000000000000000063661477374370200164360ustar00rootroot00000000000000#
# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1.
# Original Copyright (C) 2003-2007  Robey Pointer 
# Edits Copyright (C) 2010 The IPython Team
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA  02111-1301  USA.

"""
Sample script showing how to do local port forwarding over paramiko.

This script connects to the requested SSH server and sets up local port
forwarding (the openssh -L option) from a local port through a tunneled
connection to a destination reachable from the SSH server machine.
"""

import logging
import select
import socketserver

logger = logging.getLogger('ssh')


class ForwardServer(socketserver.ThreadingTCPServer):
    daemon_threads = True
    allow_reuse_address = True


class Handler(socketserver.BaseRequestHandler):
    def handle(self):
        try:
            chan = self.ssh_transport.open_channel(
                'direct-tcpip',
                (self.chain_host, self.chain_port),
                self.request.getpeername(),
            )
        except Exception as e:
            logger.debug(
                'Incoming request to %s:%d failed: %r',
                self.chain_host,
                self.chain_port,
                e,
            )
            return
        if chan is None:
            logger.debug(
                'Incoming request to %s:%d was rejected by the SSH server.',
                self.chain_host,
                self.chain_port,
            )
            return

        logger.debug(
            f'Connected!  Tunnel open {self.request.getpeername()!r} -> {chan.getpeername()!r} -> {(self.chain_host, self.chain_port)!r}'
        )
        while True:
            r, w, x = select.select([self.request, chan], [], [])
            if self.request in r:
                data = self.request.recv(1024)
                if len(data) == 0:
                    break
                chan.send(data)
            if chan in r:
                data = chan.recv(1024)
                if len(data) == 0:
                    break
                self.request.send(data)
        chan.close()
        self.request.close()
        logger.debug('Tunnel closed ')


def forward_tunnel(local_port, remote_host, remote_port, transport):
    # this is a little convoluted, but lets me configure things for the Handler
    # object.  (SocketServer doesn't give Handlers any way to access the outer
    # server normally.)
    class SubHander(Handler):
        chain_host = remote_host
        chain_port = remote_port
        ssh_transport = transport

    ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever()


__all__ = ['forward_tunnel']
pyzmq-26.4.0/zmq/ssh/tunnel.py000066400000000000000000000320551477374370200162710ustar00rootroot00000000000000"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
"""

# Copyright (C) 2010-2011  IPython Development Team
# Copyright (C) 2011- PyZMQ Developers
#
# Redistributed from IPython under the terms of the BSD License.

import atexit
import os
import re
import signal
import socket
import sys
import warnings
from getpass import getpass, getuser
from multiprocessing import Process

try:
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', DeprecationWarning)
        import paramiko

        SSHException = paramiko.ssh_exception.SSHException
except ImportError:
    paramiko = None  # type: ignore

    class SSHException(Exception):  # type: ignore
        pass

else:
    from .forward import forward_tunnel

try:
    import pexpect
except ImportError:
    pexpect = None


class MaxRetryExceeded(Exception):
    pass


def select_random_ports(n):
    """Select and return n random ports that are available."""
    ports = []
    sockets = []
    for i in range(n):
        sock = socket.socket()
        sock.bind(('', 0))
        ports.append(sock.getsockname()[1])
        sockets.append(sock)
    for sock in sockets:
        sock.close()
    return ports


# -----------------------------------------------------------------------------
# Check for passwordless login
# -----------------------------------------------------------------------------
_password_pat = re.compile(rb'pass(word|phrase)', re.IGNORECASE)


def try_passwordless_ssh(server, keyfile, paramiko=None):
    """Attempt to make an ssh connection without a password.
    This is mainly used for requiring password input only once
    when many tunnels may be connected to the same server.

    If paramiko is None, the default for the platform is chosen.
    """
    if paramiko is None:
        paramiko = sys.platform == 'win32'
    if not paramiko:
        f = _try_passwordless_openssh
    else:
        f = _try_passwordless_paramiko
    return f(server, keyfile)


def _try_passwordless_openssh(server, keyfile):
    """Try passwordless login with shell ssh command."""
    if pexpect is None:
        raise ImportError("pexpect unavailable, use paramiko")
    cmd = 'ssh -f ' + server
    if keyfile:
        cmd += ' -i ' + keyfile
    cmd += ' exit'

    # pop SSH_ASKPASS from env
    env = os.environ.copy()
    env.pop('SSH_ASKPASS', None)

    ssh_newkey = 'Are you sure you want to continue connecting'
    p = pexpect.spawn(cmd, env=env)

    MAX_RETRY = 10

    for _ in range(MAX_RETRY):
        try:
            i = p.expect([ssh_newkey, _password_pat], timeout=0.1)
            if i == 0:
                raise SSHException(
                    'The authenticity of the host can\'t be established.'
                )
        except pexpect.TIMEOUT:
            continue
        except pexpect.EOF:
            return True
        else:
            return False

    raise MaxRetryExceeded(f"Failed after {MAX_RETRY} attempts")


def _try_passwordless_paramiko(server, keyfile):
    """Try passwordless login with paramiko."""
    if paramiko is None:
        msg = "Paramiko unavailable, "
        if sys.platform == 'win32':
            msg += "Paramiko is required for ssh tunneled connections on Windows."
        else:
            msg += "use OpenSSH."
        raise ImportError(msg)
    username, server, port = _split_server(server)
    client = paramiko.SSHClient()
    known_hosts = os.path.expanduser("~/.ssh/known_hosts")
    try:
        client.load_host_keys(known_hosts)
    except FileNotFoundError:
        pass

    policy_name = os.environ.get("PYZMQ_PARAMIKO_HOST_KEY_POLICY", None)
    if policy_name:
        policy = getattr(paramiko, f"{policy_name}Policy")
        client.set_missing_host_key_policy(policy())
    try:
        client.connect(
            server, port, username=username, key_filename=keyfile, look_for_keys=True
        )
    except paramiko.AuthenticationException:
        return False
    else:
        client.close()
        return True


def tunnel_connection(
    socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60
):
    """Connect a socket to an address via an ssh tunnel.

    This is a wrapper for socket.connect(addr), when addr is not accessible
    from the local machine.  It simply creates an ssh tunnel using the remaining args,
    and calls socket.connect('tcp://localhost:lport') where lport is the randomly
    selected local port of the tunnel.

    """
    new_url, tunnel = open_tunnel(
        addr,
        server,
        keyfile=keyfile,
        password=password,
        paramiko=paramiko,
        timeout=timeout,
    )
    socket.connect(new_url)
    return tunnel


def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
    """Open a tunneled connection from a 0MQ url.

    For use inside tunnel_connection.

    Returns
    -------

    (url, tunnel) : (str, object)
        The 0MQ url that has been forwarded, and the tunnel object
    """

    lport = select_random_ports(1)[0]
    transport, addr = addr.split('://')
    ip, rport = addr.split(':')
    rport = int(rport)
    if paramiko is None:
        paramiko = sys.platform == 'win32'
    if paramiko:
        tunnelf = paramiko_tunnel
    else:
        tunnelf = openssh_tunnel

    tunnel = tunnelf(
        lport,
        rport,
        server,
        remoteip=ip,
        keyfile=keyfile,
        password=password,
        timeout=timeout,
    )
    return f'tcp://127.0.0.1:{lport}', tunnel


def openssh_tunnel(
    lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60
):
    """Create an ssh tunnel using command-line ssh that connects port lport
    on this machine to localhost:rport on server.  The tunnel
    will automatically close when not in use, remaining open
    for a minimum of timeout seconds for an initial connection.

    This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
    as seen from `server`.

    keyfile and password may be specified, but ssh config is checked for defaults.

    Parameters
    ----------

    lport : int
        local port for connecting to the tunnel from this machine.
    rport : int
        port on the remote machine to connect to.
    server : str
        The ssh server to connect to. The full ssh server string will be parsed.
        user@server:port
    remoteip : str [Default: 127.0.0.1]
        The remote ip, specifying the destination of the tunnel.
        Default is localhost, which means that the tunnel would redirect
        localhost:lport on this machine to localhost:rport on the *server*.

    keyfile : str; path to private key file
        This specifies a key to be used in ssh login, default None.
        Regular default ssh keys will be used without specifying this argument.
    password : str;
        Your ssh password to the ssh server. Note that if this is left None,
        you will be prompted for it if passwordless key based login is unavailable.
    timeout : int [default: 60]
        The time (in seconds) after which no activity will result in the tunnel
        closing.  This prevents orphaned tunnels from running forever.
    """
    if pexpect is None:
        raise ImportError("pexpect unavailable, use paramiko_tunnel")
    ssh = "ssh "
    if keyfile:
        ssh += "-i " + keyfile

    if ':' in server:
        server, port = server.split(':')
        ssh += f" -p {port}"

    cmd = f"{ssh} -O check {server}"
    (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
    if not exitstatus:
        pid = int(output[output.find(b"(pid=") + 5 : output.find(b")")])
        cmd = f"{ssh} -O forward -L 127.0.0.1:{lport}:{remoteip}:{rport} {server}"
        (output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
        if not exitstatus:
            atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
            return pid
    cmd = f"{ssh} -f -S none -L 127.0.0.1:{lport}:{remoteip}:{rport} {server} sleep {timeout}"

    # pop SSH_ASKPASS from env
    env = os.environ.copy()
    env.pop('SSH_ASKPASS', None)

    ssh_newkey = 'Are you sure you want to continue connecting'
    tunnel = pexpect.spawn(cmd, env=env)
    failed = False
    MAX_RETRY = 10
    for _ in range(MAX_RETRY):
        try:
            i = tunnel.expect([ssh_newkey, _password_pat], timeout=0.1)
            if i == 0:
                raise SSHException(
                    'The authenticity of the host can\'t be established.'
                )
        except pexpect.TIMEOUT:
            continue
        except pexpect.EOF:
            if tunnel.exitstatus:
                print(tunnel.exitstatus)
                print(tunnel.before)
                print(tunnel.after)
                raise RuntimeError(f"tunnel '{cmd}' failed to start")
            else:
                return tunnel.pid
        else:
            if failed:
                print("Password rejected, try again")
                password = None
            if password is None:
                password = getpass(f"{server}'s password: ")
            tunnel.sendline(password)
            failed = True
    raise MaxRetryExceeded(f"Failed after {MAX_RETRY} attempts")


def _stop_tunnel(cmd):
    pexpect.run(cmd)


def _split_server(server):
    if '@' in server:
        username, server = server.split('@', 1)
    else:
        username = getuser()
    if ':' in server:
        server, port = server.split(':')
        port = int(port)
    else:
        port = 22
    return username, server, port


def paramiko_tunnel(
    lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60
):
    """launch a tunner with paramiko in a subprocess. This should only be used
    when shell ssh is unavailable (e.g. Windows).

    This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
    as seen from `server`.

    If you are familiar with ssh tunnels, this creates the tunnel:

    ssh server -L localhost:lport:remoteip:rport

    keyfile and password may be specified, but ssh config is checked for defaults.


    Parameters
    ----------

    lport : int
        local port for connecting to the tunnel from this machine.
    rport : int
        port on the remote machine to connect to.
    server : str
        The ssh server to connect to. The full ssh server string will be parsed.
        user@server:port
    remoteip : str [Default: 127.0.0.1]
        The remote ip, specifying the destination of the tunnel.
        Default is localhost, which means that the tunnel would redirect
        localhost:lport on this machine to localhost:rport on the *server*.

    keyfile : str; path to private key file
        This specifies a key to be used in ssh login, default None.
        Regular default ssh keys will be used without specifying this argument.
    password : str;
        Your ssh password to the ssh server. Note that if this is left None,
        you will be prompted for it if passwordless key based login is unavailable.
    timeout : int [default: 60]
        The time (in seconds) after which no activity will result in the tunnel
        closing.  This prevents orphaned tunnels from running forever.

    """
    if paramiko is None:
        raise ImportError("Paramiko not available")

    if password is None:
        if not _try_passwordless_paramiko(server, keyfile):
            password = getpass(f"{server}'s password: ")

    p = Process(
        target=_paramiko_tunnel,
        args=(lport, rport, server, remoteip),
        kwargs=dict(keyfile=keyfile, password=password),
    )
    p.daemon = True
    p.start()
    return p


def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
    """Function for actually starting a paramiko tunnel, to be passed
    to multiprocessing.Process(target=this), and not called directly.
    """
    username, server, port = _split_server(server)
    client = paramiko.SSHClient()
    client.load_system_host_keys()
    client.set_missing_host_key_policy(paramiko.WarningPolicy())

    try:
        client.connect(
            server,
            port,
            username=username,
            key_filename=keyfile,
            look_for_keys=True,
            password=password,
        )
    #    except paramiko.AuthenticationException:
    #        if password is None:
    #            password = getpass("%s@%s's password: "%(username, server))
    #            client.connect(server, port, username=username, password=password)
    #        else:
    #            raise
    except Exception as e:
        print(f'*** Failed to connect to {server}:{port}: {e!r}')
        sys.exit(1)

    # Don't let SIGINT kill the tunnel subprocess
    signal.signal(signal.SIGINT, signal.SIG_IGN)

    try:
        forward_tunnel(lport, remoteip, rport, client.get_transport())
    except KeyboardInterrupt:
        print('SIGINT: Port forwarding stopped cleanly')
        sys.exit(0)
    except Exception as e:
        print(f"Port forwarding stopped uncleanly: {e}")
        sys.exit(255)


if sys.platform == 'win32':
    ssh_tunnel = paramiko_tunnel
else:
    ssh_tunnel = openssh_tunnel


__all__ = [
    'tunnel_connection',
    'ssh_tunnel',
    'openssh_tunnel',
    'paramiko_tunnel',
    'try_passwordless_ssh',
]
pyzmq-26.4.0/zmq/sugar/000077500000000000000000000000001477374370200147315ustar00rootroot00000000000000pyzmq-26.4.0/zmq/sugar/__init__.py000066400000000000000000000020451477374370200170430ustar00rootroot00000000000000"""pure-Python sugar wrappers for core 0MQ objects."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

from zmq import error
from zmq.backend import proxy
from zmq.constants import DeviceType
from zmq.sugar import context, frame, poll, socket, tracker, version


def device(device_type: DeviceType, frontend: socket.Socket, backend: socket.Socket):
    """Deprecated alias for zmq.proxy

    .. deprecated:: libzmq-3.2
    .. deprecated:: 13.0
    """

    return proxy(frontend, backend)


__all__ = ["device"]
for submod in (context, error, frame, poll, socket, tracker, version):
    __all__.extend(submod.__all__)

from zmq.error import *  # noqa
from zmq.sugar.context import *  # noqa
from zmq.sugar.frame import *  # noqa
from zmq.sugar.poll import *  # noqa
from zmq.sugar.socket import *  # noqa

# deprecated:
from zmq.sugar.stopwatch import Stopwatch  # noqa
from zmq.sugar.tracker import *  # noqa
from zmq.sugar.version import *  # noqa

__all__.append('Stopwatch')
pyzmq-26.4.0/zmq/sugar/__init__.pyi000066400000000000000000000003331477374370200172120ustar00rootroot00000000000000from zmq.error import *

from . import constants as constants
from .constants import *
from .context import *
from .frame import *
from .poll import *
from .socket import *
from .tracker import *
from .version import *
pyzmq-26.4.0/zmq/sugar/attrsettr.py000066400000000000000000000051161477374370200173420ustar00rootroot00000000000000"""Mixin for mapping set/getattr to self.set/get"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import errno
from typing import TypeVar, Union

from .. import constants

T = TypeVar("T")
OptValT = Union[str, bytes, int]


class AttributeSetter:
    def __setattr__(self, key: str, value: OptValT) -> None:
        """set zmq options by attribute"""

        if key in self.__dict__:
            object.__setattr__(self, key, value)
            return
        # regular setattr only allowed for class-defined attributes
        for cls in self.__class__.mro():
            if key in cls.__dict__ or key in getattr(cls, "__annotations__", {}):
                object.__setattr__(self, key, value)
                return

        upper_key = key.upper()
        try:
            opt = getattr(constants, upper_key)
        except AttributeError:
            raise AttributeError(
                f"{self.__class__.__name__} has no such option: {upper_key}"
            )
        else:
            self._set_attr_opt(upper_key, opt, value)

    def _set_attr_opt(self, name: str, opt: int, value: OptValT) -> None:
        """override if setattr should do something other than call self.set"""
        self.set(opt, value)

    def __getattr__(self, key: str) -> OptValT:
        """get zmq options by attribute"""
        upper_key = key.upper()
        try:
            opt = getattr(constants, upper_key)
        except AttributeError:
            raise AttributeError(
                f"{self.__class__.__name__} has no such option: {upper_key}"
            ) from None
        else:
            from zmq import ZMQError

            try:
                return self._get_attr_opt(upper_key, opt)
            except ZMQError as e:
                # EINVAL will be raised on access for write-only attributes.
                # Turn that into an AttributeError
                # necessary for mocking
                if e.errno in {errno.EINVAL, errno.EFAULT}:
                    raise AttributeError(f"{key} attribute is write-only")
                else:
                    raise

    def _get_attr_opt(self, name, opt) -> OptValT:
        """override if getattr should do something other than call self.get"""
        return self.get(opt)

    def get(self, opt: int) -> OptValT:
        """Override in subclass"""
        raise NotImplementedError("override in subclass")

    def set(self, opt: int, val: OptValT) -> None:
        """Override in subclass"""
        raise NotImplementedError("override in subclass")


__all__ = ['AttributeSetter']
pyzmq-26.4.0/zmq/sugar/context.py000066400000000000000000000343571477374370200170030ustar00rootroot00000000000000"""Python bindings for 0MQ."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

import atexit
import os
from threading import Lock
from typing import Any, Callable, Generic, TypeVar, overload
from warnings import warn
from weakref import WeakSet

import zmq
from zmq._typing import TypeAlias
from zmq.backend import Context as ContextBase
from zmq.constants import ContextOption, Errno, SocketOption
from zmq.error import ZMQError
from zmq.utils.interop import cast_int_addr

from .attrsettr import AttributeSetter, OptValT
from .socket import Socket, SyncSocket

# notice when exiting, to avoid triggering term on exit
_exiting = False


def _notice_atexit() -> None:
    global _exiting
    _exiting = True


atexit.register(_notice_atexit)

_ContextType = TypeVar('_ContextType', bound='Context')
_SocketType = TypeVar('_SocketType', bound='Socket', covariant=True)


class Context(ContextBase, AttributeSetter, Generic[_SocketType]):
    """Create a zmq Context

    A zmq Context creates sockets via its ``ctx.socket`` method.

    .. versionchanged:: 24

        When using a Context as a context manager (``with zmq.Context()``),
        or deleting a context without closing it first,
        ``ctx.destroy()`` is called,
        closing any leftover sockets,
        instead of `ctx.term()` which requires sockets to be closed first.

        This prevents hangs caused by `ctx.term()` if sockets are left open,
        but means that unclean destruction of contexts
        (with sockets left open) is not safe
        if sockets are managed in other threads.

    .. versionadded:: 25

        Contexts can now be shadowed by passing another Context.
        This helps in creating an async copy of a sync context or vice versa::

            ctx = zmq.Context(async_ctx)

        Which previously had to be::

            ctx = zmq.Context.shadow(async_ctx.underlying)
    """

    sockopts: dict[int, Any]
    _instance: Any = None
    _instance_lock = Lock()
    _instance_pid: int | None = None
    _shadow = False
    _shadow_obj = None
    _warn_destroy_close = False
    _sockets: WeakSet
    # mypy doesn't like a default value here
    _socket_class: type[_SocketType] = Socket  # type: ignore

    @overload
    def __init__(self: SyncContext, io_threads: int = 1): ...

    @overload
    def __init__(self: SyncContext, io_threads: Context, /): ...

    @overload
    def __init__(self: SyncContext, *, shadow: Context | int): ...

    def __init__(
        self: SyncContext,
        io_threads: int | Context = 1,
        shadow: Context | int = 0,
    ) -> None:
        if isinstance(io_threads, Context):
            # allow positional shadow `zmq.Context(zmq.asyncio.Context())`
            # this s
            shadow = io_threads
            io_threads = 1

        shadow_address: int = 0
        if shadow:
            self._shadow = True
            # hold a reference to the shadow object
            self._shadow_obj = shadow
            if not isinstance(shadow, int):
                try:
                    shadow = shadow.underlying
                except AttributeError:
                    pass
            shadow_address = cast_int_addr(shadow)
        else:
            self._shadow = False
        super().__init__(io_threads=io_threads, shadow=shadow_address)
        self.sockopts = {}
        self._sockets = WeakSet()

    def __del__(self) -> None:
        """Deleting a Context without closing it destroys it and all sockets.

        .. versionchanged:: 24
            Switch from threadsafe `term()` which hangs in the event of open sockets
            to less safe `destroy()` which
            warns about any leftover sockets and closes them.
        """

        # Calling locals() here conceals issue #1167 on Windows CPython 3.5.4.
        locals()

        if not self._shadow and not _exiting and not self.closed:
            self._warn_destroy_close = True
            if warn is not None and getattr(self, "_sockets", None) is not None:
                # warn can be None during process teardown
                warn(
                    f"Unclosed context {self}",
                    ResourceWarning,
                    stacklevel=2,
                    source=self,
                )
            self.destroy()

    _repr_cls = "zmq.Context"

    def __repr__(self) -> str:
        cls = self.__class__
        # look up _repr_cls on exact class, not inherited
        _repr_cls = cls.__dict__.get("_repr_cls", None)
        if _repr_cls is None:
            _repr_cls = f"{cls.__module__}.{cls.__name__}"

        closed = ' closed' if self.closed else ''
        if getattr(self, "_sockets", None):
            n_sockets = len(self._sockets)
            s = 's' if n_sockets > 1 else ''
            sockets = f"{n_sockets} socket{s}"
        else:
            sockets = ""
        return f"<{_repr_cls}({sockets}) at {hex(id(self))}{closed}>"

    def __enter__(self: _ContextType) -> _ContextType:
        return self

    def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
        # warn about any leftover sockets before closing them
        self._warn_destroy_close = True
        self.destroy()

    def __copy__(self: _ContextType, memo: Any = None) -> _ContextType:
        """Copying a Context creates a shadow copy"""
        return self.__class__.shadow(self.underlying)

    __deepcopy__ = __copy__

    @classmethod
    def shadow(cls: type[_ContextType], address: int | zmq.Context) -> _ContextType:
        """Shadow an existing libzmq context

        address is a zmq.Context or an integer (or FFI pointer)
        representing the address of the libzmq context.

        .. versionadded:: 14.1

        .. versionadded:: 25
            Support for shadowing `zmq.Context` objects,
            instead of just integer addresses.
        """
        return cls(shadow=address)

    @classmethod
    def shadow_pyczmq(cls: type[_ContextType], ctx: Any) -> _ContextType:
        """Shadow an existing pyczmq context

        ctx is the FFI `zctx_t *` pointer

        .. versionadded:: 14.1
        """
        from pyczmq import zctx  # type: ignore

        from zmq.utils.interop import cast_int_addr

        underlying = zctx.underlying(ctx)
        address = cast_int_addr(underlying)
        return cls(shadow=address)

    # static method copied from tornado IOLoop.instance
    @classmethod
    def instance(cls: type[_ContextType], io_threads: int = 1) -> _ContextType:
        """Returns a global Context instance.

        Most single-process applications have a single, global Context.
        Use this method instead of passing around Context instances
        throughout your code.

        A common pattern for classes that depend on Contexts is to use
        a default argument to enable programs with multiple Contexts
        but not require the argument for simpler applications::

            class MyClass(object):
                def __init__(self, context=None):
                    self.context = context or Context.instance()

        .. versionchanged:: 18.1

            When called in a subprocess after forking,
            a new global instance is created instead of inheriting
            a Context that won't work from the parent process.
        """
        if (
            cls._instance is None
            or cls._instance_pid != os.getpid()
            or cls._instance.closed
        ):
            with cls._instance_lock:
                if (
                    cls._instance is None
                    or cls._instance_pid != os.getpid()
                    or cls._instance.closed
                ):
                    cls._instance = cls(io_threads=io_threads)
                    cls._instance_pid = os.getpid()
        return cls._instance

    def term(self) -> None:
        """Close or terminate the context.

        Context termination is performed in the following steps:

        - Any blocking operations currently in progress on sockets open within context shall
          raise :class:`zmq.ContextTerminated`.
          With the exception of socket.close(), any further operations on sockets open within this context
          shall raise :class:`zmq.ContextTerminated`.
        - After interrupting all blocking calls, term shall block until the following conditions are satisfied:
            - All sockets open within context have been closed.
            - For each socket within context, all messages sent on the socket have either been
              physically transferred to a network peer,
              or the socket's linger period set with the zmq.LINGER socket option has expired.

        For further details regarding socket linger behaviour refer to libzmq documentation for ZMQ_LINGER.

        This can be called to close the context by hand. If this is not called,
        the context will automatically be closed when it is garbage collected,
        in which case you may see a ResourceWarning about the unclosed context.
        """
        super().term()

    # -------------------------------------------------------------------------
    # Hooks for ctxopt completion
    # -------------------------------------------------------------------------

    def __dir__(self) -> list[str]:
        keys = dir(self.__class__)
        keys.extend(ContextOption.__members__)
        return keys

    # -------------------------------------------------------------------------
    # Creating Sockets
    # -------------------------------------------------------------------------

    def _add_socket(self, socket: Any) -> None:
        """Add a weakref to a socket for Context.destroy / reference counting"""
        self._sockets.add(socket)

    def _rm_socket(self, socket: Any) -> None:
        """Remove a socket for Context.destroy / reference counting"""
        # allow _sockets to be None in case of process teardown
        if getattr(self, "_sockets", None) is not None:
            self._sockets.discard(socket)

    def destroy(self, linger: int | None = None) -> None:
        """Close all sockets associated with this context and then terminate
        the context.

        .. warning::

            destroy involves calling :meth:`Socket.close`, which is **NOT** threadsafe.
            If there are active sockets in other threads, this must not be called.

        Parameters
        ----------

        linger : int, optional
            If specified, set LINGER on sockets prior to closing them.
        """
        if self.closed:
            return

        sockets: list[_SocketType] = list(getattr(self, "_sockets", None) or [])
        for s in sockets:
            if s and not s.closed:
                if self._warn_destroy_close and warn is not None:
                    # warn can be None during process teardown
                    warn(
                        f"Destroying context with unclosed socket {s}",
                        ResourceWarning,
                        stacklevel=3,
                        source=s,
                    )
                if linger is not None:
                    s.setsockopt(SocketOption.LINGER, linger)
                s.close()

        self.term()

    def socket(
        self: _ContextType,
        socket_type: int,
        socket_class: Callable[[_ContextType, int], _SocketType] | None = None,
        **kwargs: Any,
    ) -> _SocketType:
        """Create a Socket associated with this Context.

        Parameters
        ----------
        socket_type : int
            The socket type, which can be any of the 0MQ socket types:
            REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc.

        socket_class: zmq.Socket
            The socket class to instantiate, if different from the default for this Context.
            e.g. for creating an asyncio socket attached to a default Context or vice versa.

            .. versionadded:: 25

        kwargs:
            will be passed to the __init__ method of the socket class.
        """
        if self.closed:
            raise ZMQError(Errno.ENOTSUP)
        if socket_class is None:
            socket_class = self._socket_class
        s: _SocketType = (
            socket_class(  # set PYTHONTRACEMALLOC=2 to get the calling frame
                self, socket_type, **kwargs
            )
        )
        for opt, value in self.sockopts.items():
            try:
                s.setsockopt(opt, value)
            except ZMQError:
                # ignore ZMQErrors, which are likely for socket options
                # that do not apply to a particular socket type, e.g.
                # SUBSCRIBE for non-SUB sockets.
                pass
        self._add_socket(s)
        return s

    def setsockopt(self, opt: int, value: Any) -> None:
        """set default socket options for new sockets created by this Context

        .. versionadded:: 13.0
        """
        self.sockopts[opt] = value

    def getsockopt(self, opt: int) -> OptValT:
        """get default socket options for new sockets created by this Context

        .. versionadded:: 13.0
        """
        return self.sockopts[opt]

    def _set_attr_opt(self, name: str, opt: int, value: OptValT) -> None:
        """set default sockopts as attributes"""
        if name in ContextOption.__members__:
            return self.set(opt, value)
        elif name in SocketOption.__members__:
            self.sockopts[opt] = value
        else:
            raise AttributeError(f"No such context or socket option: {name}")

    def _get_attr_opt(self, name: str, opt: int) -> OptValT:
        """get default sockopts as attributes"""
        if name in ContextOption.__members__:
            return self.get(opt)
        else:
            if opt not in self.sockopts:
                raise AttributeError(name)
            else:
                return self.sockopts[opt]

    def __delattr__(self, key: str) -> None:
        """delete default sockopts as attributes"""
        if key in self.__dict__:
            self.__dict__.pop(key)
            return
        key = key.upper()
        try:
            opt = getattr(SocketOption, key)
        except AttributeError:
            raise AttributeError(f"No such socket option: {key!r}")
        else:
            if opt not in self.sockopts:
                raise AttributeError(key)
            else:
                del self.sockopts[opt]


SyncContext: TypeAlias = Context[SyncSocket]


__all__ = ['Context', 'SyncContext']
pyzmq-26.4.0/zmq/sugar/frame.py000066400000000000000000000102501477374370200163730ustar00rootroot00000000000000"""0MQ Frame pure Python methods."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import zmq
from zmq.backend import Frame as FrameBase

from .attrsettr import AttributeSetter


def _draft(v, feature):
    zmq.error._check_version(v, feature)
    if not zmq.DRAFT_API:
        raise RuntimeError(
            f"libzmq and pyzmq must be built with draft support for {feature}"
        )


class Frame(FrameBase, AttributeSetter):
    """
    A zmq message Frame class for non-copying send/recvs and access to message properties.

    A ``zmq.Frame`` wraps an underlying ``zmq_msg_t``.

    Message *properties* can be accessed by treating a Frame like a dictionary (``frame["User-Id"]``).

    .. versionadded:: 14.4, libzmq 4

        Frames created by ``recv(copy=False)`` can be used to access message properties and attributes,
        such as the CURVE User-Id.

        For example::

            frames = socket.recv_multipart(copy=False)
            user_id = frames[0]["User-Id"]

    This class is used if you want to do non-copying send and recvs.
    When you pass a chunk of bytes to this class, e.g. ``Frame(buf)``, the
    ref-count of `buf` is increased by two: once because the Frame saves `buf` as
    an instance attribute and another because a ZMQ message is created that
    points to the buffer of `buf`. This second ref-count increase makes sure
    that `buf` lives until all messages that use it have been sent.
    Once 0MQ sends all the messages and it doesn't need the buffer of ``buf``,
    0MQ will call ``Py_DECREF(s)``.

    Parameters
    ----------

    data : object, optional
        any object that provides the buffer interface will be used to
        construct the 0MQ message data.
    track : bool
        whether a MessageTracker_ should be created to track this object.
        Tracking a message has a cost at creation, because it creates a threadsafe
        Event object.
    copy : bool
        default: use copy_threshold
        Whether to create a copy of the data to pass to libzmq
        or share the memory with libzmq.
        If unspecified, copy_threshold is used.
    copy_threshold: int
        default: :const:`zmq.COPY_THRESHOLD`
        If copy is unspecified, messages smaller than this many bytes
        will be copied and messages larger than this will be shared with libzmq.
    """

    def __getitem__(self, key):
        # map Frame['User-Id'] to Frame.get('User-Id')
        return self.get(key)

    def __repr__(self):
        """Return the str form of the message."""
        nbytes = len(self)
        msg_suffix = ""
        if nbytes > 16:
            msg_bytes = bytes(memoryview(self.buffer)[:12])
            if nbytes >= 1e9:
                unit = "GB"
                n = nbytes // 1e9
            elif nbytes >= 2**20:
                unit = "MB"
                n = nbytes // 1e6
            elif nbytes >= 1e3:
                unit = "kB"
                n = nbytes // 1e3
            else:
                unit = "B"
                n = nbytes
            msg_suffix = f'...{n:.0f}{unit}'
        else:
            msg_bytes = self.bytes

        _module = self.__class__.__module__
        if _module == "zmq.sugar.frame":
            _module = "zmq"
        return f"<{_module}.{self.__class__.__name__}({msg_bytes!r}{msg_suffix})>"

    @property
    def group(self):
        """The RADIO-DISH group of the message.

        Requires libzmq >= 4.2 and pyzmq built with draft APIs enabled.

        .. versionadded:: 17
        """
        _draft((4, 2), "RADIO-DISH")
        return self.get('group')

    @group.setter
    def group(self, group):
        _draft((4, 2), "RADIO-DISH")
        self.set('group', group)

    @property
    def routing_id(self):
        """The CLIENT-SERVER routing id of the message.

        Requires libzmq >= 4.2 and pyzmq built with draft APIs enabled.

        .. versionadded:: 17
        """
        _draft((4, 2), "CLIENT-SERVER")
        return self.get('routing_id')

    @routing_id.setter
    def routing_id(self, routing_id):
        _draft((4, 2), "CLIENT-SERVER")
        self.set('routing_id', routing_id)


# keep deprecated alias
Message = Frame
__all__ = ['Frame', 'Message']
pyzmq-26.4.0/zmq/sugar/poll.py000066400000000000000000000131701477374370200162530ustar00rootroot00000000000000"""0MQ polling related functions and classes."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

from typing import Any

from zmq.backend import zmq_poll
from zmq.constants import POLLERR, POLLIN, POLLOUT

# -----------------------------------------------------------------------------
# Polling related methods
# -----------------------------------------------------------------------------


class Poller:
    """A stateful poll interface that mirrors Python's built-in poll."""

    sockets: list[tuple[Any, int]]
    _map: dict

    def __init__(self) -> None:
        self.sockets = []
        self._map = {}

    def __contains__(self, socket: Any) -> bool:
        return socket in self._map

    def register(self, socket: Any, flags: int = POLLIN | POLLOUT):
        """p.register(socket, flags=POLLIN|POLLOUT)

        Register a 0MQ socket or native fd for I/O monitoring.

        register(s,0) is equivalent to unregister(s).

        Parameters
        ----------
        socket : zmq.Socket or native socket
            A zmq.Socket or any Python object having a ``fileno()``
            method that returns a valid file descriptor.
        flags : int
            The events to watch for.  Can be POLLIN, POLLOUT or POLLIN|POLLOUT.
            If `flags=0`, socket will be unregistered.
        """
        if flags:
            if socket in self._map:
                idx = self._map[socket]
                self.sockets[idx] = (socket, flags)
            else:
                idx = len(self.sockets)
                self.sockets.append((socket, flags))
                self._map[socket] = idx
        elif socket in self._map:
            # uregister sockets registered with no events
            self.unregister(socket)
        else:
            # ignore new sockets with no events
            pass

    def modify(self, socket, flags=POLLIN | POLLOUT):
        """Modify the flags for an already registered 0MQ socket or native fd."""
        self.register(socket, flags)

    def unregister(self, socket: Any):
        """Remove a 0MQ socket or native fd for I/O monitoring.

        Parameters
        ----------
        socket : Socket
            The socket instance to stop polling.
        """
        idx = self._map.pop(socket)
        self.sockets.pop(idx)
        # shift indices after deletion
        for socket, flags in self.sockets[idx:]:
            self._map[socket] -= 1

    def poll(self, timeout: int | None = None) -> list[tuple[Any, int]]:
        """Poll the registered 0MQ or native fds for I/O.

        If there are currently events ready to be processed, this function will return immediately.
        Otherwise, this function will return as soon the first event is available or after timeout
        milliseconds have elapsed.

        Parameters
        ----------
        timeout : int
            The timeout in milliseconds. If None, no `timeout` (infinite). This
            is in milliseconds to be compatible with ``select.poll()``.

        Returns
        -------
        events : list
            The list of events that are ready to be processed.
            This is a list of tuples of the form ``(socket, event_mask)``, where the 0MQ Socket
            or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second.
            It is common to call ``events = dict(poller.poll())``,
            which turns the list of tuples into a mapping of ``socket : event_mask``.
        """
        if timeout is None or timeout < 0:
            timeout = -1
        elif isinstance(timeout, float):
            timeout = int(timeout)
        return zmq_poll(self.sockets, timeout=timeout)


def select(
    rlist: list, wlist: list, xlist: list, timeout: float | None = None
) -> tuple[list, list, list]:
    """select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist)

    Return the result of poll as a lists of sockets ready for r/w/exception.

    This has the same interface as Python's built-in ``select.select()`` function.

    Parameters
    ----------
    timeout : float, optional
        The timeout in seconds. If None, no timeout (infinite). This is in seconds to be
        compatible with ``select.select()``.
    rlist : list
        sockets/FDs to be polled for read events
    wlist : list
        sockets/FDs to be polled for write events
    xlist : list
        sockets/FDs to be polled for error events

    Returns
    -------
    rlist: list
        list of sockets or FDs that are readable
    wlist: list
        list of sockets or FDs that are writable
    xlist: list
        list of sockets or FDs that had error events (rare)
    """
    if timeout is None:
        timeout = -1
    # Convert from sec -> ms for zmq_poll.
    # zmq_poll accepts 3.x style timeout in ms
    timeout = int(timeout * 1000.0)
    if timeout < 0:
        timeout = -1
    sockets = []
    for s in set(rlist + wlist + xlist):
        flags = 0
        if s in rlist:
            flags |= POLLIN
        if s in wlist:
            flags |= POLLOUT
        if s in xlist:
            flags |= POLLERR
        sockets.append((s, flags))
    return_sockets = zmq_poll(sockets, timeout)
    rlist, wlist, xlist = [], [], []
    for s, flags in return_sockets:
        if flags & POLLIN:
            rlist.append(s)
        if flags & POLLOUT:
            wlist.append(s)
        if flags & POLLERR:
            xlist.append(s)
    return rlist, wlist, xlist


# -----------------------------------------------------------------------------
# Symbols to export
# -----------------------------------------------------------------------------

__all__ = ['Poller', 'select']
pyzmq-26.4.0/zmq/sugar/socket.py000066400000000000000000001050421477374370200165750ustar00rootroot00000000000000"""0MQ Socket pure Python methods."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

import errno
import pickle
import random
import sys
from typing import (
    Any,
    Callable,
    Generic,
    List,
    Literal,
    Sequence,
    TypeVar,
    Union,
    cast,
    overload,
)
from warnings import warn

import zmq
from zmq._typing import TypeAlias
from zmq.backend import Socket as SocketBase
from zmq.error import ZMQBindError, ZMQError
from zmq.utils import jsonapi
from zmq.utils.interop import cast_int_addr

from ..constants import SocketOption, SocketType, _OptType
from .attrsettr import AttributeSetter
from .poll import Poller

try:
    DEFAULT_PROTOCOL = pickle.DEFAULT_PROTOCOL
except AttributeError:
    DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL

_SocketType = TypeVar("_SocketType", bound="Socket")

_JSONType: TypeAlias = "int | str | bool | list[_JSONType] | dict[str, _JSONType]"


class _SocketContext(Generic[_SocketType]):
    """Context Manager for socket bind/unbind"""

    socket: _SocketType
    kind: str
    addr: str

    def __repr__(self):
        return f""

    def __init__(
        self: _SocketContext[_SocketType], socket: _SocketType, kind: str, addr: str
    ):
        assert kind in {"bind", "connect"}
        self.socket = socket
        self.kind = kind
        self.addr = addr

    def __enter__(self: _SocketContext[_SocketType]) -> _SocketType:
        return self.socket

    def __exit__(self, *args):
        if self.socket.closed:
            return
        if self.kind == "bind":
            self.socket.unbind(self.addr)
        elif self.kind == "connect":
            self.socket.disconnect(self.addr)


SocketReturnType = TypeVar("SocketReturnType")


class Socket(SocketBase, AttributeSetter, Generic[SocketReturnType]):
    """The ZMQ socket object

    To create a Socket, first create a Context::

        ctx = zmq.Context.instance()

    then call ``ctx.socket(socket_type)``::

        s = ctx.socket(zmq.ROUTER)

    .. versionadded:: 25

        Sockets can now be shadowed by passing another Socket.
        This helps in creating an async copy of a sync socket or vice versa::

            s = zmq.Socket(async_socket)

        Which previously had to be::

            s = zmq.Socket.shadow(async_socket.underlying)
    """

    _shadow = False
    _shadow_obj = None
    _monitor_socket = None
    _type_name = 'UNKNOWN'

    @overload
    def __init__(
        self: Socket[bytes],
        ctx_or_socket: zmq.Context,
        socket_type: int,
        *,
        copy_threshold: int | None = None,
    ): ...

    @overload
    def __init__(
        self: Socket[bytes],
        *,
        shadow: Socket | int,
        copy_threshold: int | None = None,
    ): ...

    @overload
    def __init__(
        self: Socket[bytes],
        ctx_or_socket: Socket,
    ): ...

    def __init__(
        self: Socket[bytes],
        ctx_or_socket: zmq.Context | Socket | None = None,
        socket_type: int = 0,
        *,
        shadow: Socket | int = 0,
        copy_threshold: int | None = None,
    ):
        shadow_context: zmq.Context | None = None
        if isinstance(ctx_or_socket, zmq.Socket):
            # positional Socket(other_socket)
            shadow = ctx_or_socket
            ctx_or_socket = None

        shadow_address: int = 0

        if shadow:
            self._shadow = True
            # hold a reference to the shadow object
            self._shadow_obj = shadow
            if not isinstance(shadow, int):
                if isinstance(shadow, zmq.Socket):
                    shadow_context = shadow.context
                try:
                    shadow = cast(int, shadow.underlying)
                except AttributeError:
                    pass
            shadow_address = cast_int_addr(shadow)
        else:
            self._shadow = False

        super().__init__(
            ctx_or_socket,
            socket_type,
            shadow=shadow_address,
            copy_threshold=copy_threshold,
        )
        if self._shadow_obj and shadow_context:
            # keep self.context reference if shadowing a Socket object
            self.context = shadow_context

        try:
            socket_type = cast(int, self.get(zmq.TYPE))
        except Exception:
            pass
        else:
            try:
                self.__dict__["type"] = stype = SocketType(socket_type)
            except ValueError:
                self._type_name = str(socket_type)
            else:
                self._type_name = stype.name

    def __del__(self):
        if not self._shadow and not self.closed:
            if warn is not None:
                # warn can be None during process teardown
                warn(
                    f"Unclosed socket {self}",
                    ResourceWarning,
                    stacklevel=2,
                    source=self,
                )
            self.close()

    _repr_cls = "zmq.Socket"

    def __repr__(self):
        cls = self.__class__
        # look up _repr_cls on exact class, not inherited
        _repr_cls = cls.__dict__.get("_repr_cls", None)
        if _repr_cls is None:
            _repr_cls = f"{cls.__module__}.{cls.__name__}"

        closed = ' closed' if self._closed else ''

        return f"<{_repr_cls}(zmq.{self._type_name}) at {hex(id(self))}{closed}>"

    # socket as context manager:
    def __enter__(self: _SocketType) -> _SocketType:
        """Sockets are context managers

        .. versionadded:: 14.4
        """
        return self

    def __exit__(self, *args, **kwargs):
        self.close()

    # -------------------------------------------------------------------------
    # Socket creation
    # -------------------------------------------------------------------------

    def __copy__(self: _SocketType, memo=None) -> _SocketType:
        """Copying a Socket creates a shadow copy"""
        return self.__class__.shadow(self.underlying)

    __deepcopy__ = __copy__

    @classmethod
    def shadow(cls: type[_SocketType], address: int | zmq.Socket) -> _SocketType:
        """Shadow an existing libzmq socket

        address is a zmq.Socket or an integer (or FFI pointer)
        representing the address of the libzmq socket.

        .. versionadded:: 14.1

        .. versionadded:: 25
            Support for shadowing `zmq.Socket` objects,
            instead of just integer addresses.
        """
        return cls(shadow=address)

    def close(self, linger=None) -> None:
        """
        Close the socket.

        If linger is specified, LINGER sockopt will be set prior to closing.

        Note: closing a zmq Socket may not close the underlying sockets
        if there are undelivered messages.
        Only after all messages are delivered or discarded by reaching the socket's LINGER timeout
        (default: forever)
        will the underlying sockets be closed.

        This can be called to close the socket by hand. If this is not
        called, the socket will automatically be closed when it is
        garbage collected,
        in which case you may see a ResourceWarning about the unclosed socket.
        """
        if self.context:
            self.context._rm_socket(self)
        super().close(linger=linger)

    # -------------------------------------------------------------------------
    # Connect/Bind context managers
    # -------------------------------------------------------------------------

    def _connect_cm(self: _SocketType, addr: str) -> _SocketContext[_SocketType]:
        """Context manager to disconnect on exit

        .. versionadded:: 20.0
        """
        return _SocketContext(self, 'connect', addr)

    def _bind_cm(self: _SocketType, addr: str) -> _SocketContext[_SocketType]:
        """Context manager to unbind on exit

        .. versionadded:: 20.0
        """
        try:
            # retrieve last_endpoint
            # to support binding on random ports via
            # `socket.bind('tcp://127.0.0.1:0')`
            addr = cast(bytes, self.get(zmq.LAST_ENDPOINT)).decode("utf8")
        except (AttributeError, ZMQError, UnicodeDecodeError):
            pass
        return _SocketContext(self, 'bind', addr)

    def bind(self: _SocketType, addr: str) -> _SocketContext[_SocketType]:
        """s.bind(addr)

        Bind the socket to an address.

        This causes the socket to listen on a network port. Sockets on the
        other side of this connection will use ``Socket.connect(addr)`` to
        connect to this socket.

        Returns a context manager which will call unbind on exit.

        .. versionadded:: 20.0
            Can be used as a context manager.

        .. versionadded:: 26.0
            binding to port 0 can be used as a context manager
            for binding to a random port.
            The URL can be retrieved as `socket.last_endpoint`.

        Parameters
        ----------
        addr : str
            The address string. This has the form 'protocol://interface:port',
            for example 'tcp://127.0.0.1:5555'. Protocols supported include
            tcp, udp, pgm, epgm, inproc and ipc. If the address is unicode, it is
            encoded to utf-8 first.

        """
        try:
            super().bind(addr)
        except ZMQError as e:
            e.strerror += f" (addr={addr!r})"
            raise
        return self._bind_cm(addr)

    def connect(self: _SocketType, addr: str) -> _SocketContext[_SocketType]:
        """s.connect(addr)

        Connect to a remote 0MQ socket.

        Returns a context manager which will call disconnect on exit.

        .. versionadded:: 20.0
            Can be used as a context manager.

        Parameters
        ----------
        addr : str
            The address string. This has the form 'protocol://interface:port',
            for example 'tcp://127.0.0.1:5555'. Protocols supported are
            tcp, udp, pgm, inproc and ipc. If the address is unicode, it is
            encoded to utf-8 first.

        """
        try:
            super().connect(addr)
        except ZMQError as e:
            e.strerror += f" (addr={addr!r})"
            raise
        return self._connect_cm(addr)

    # -------------------------------------------------------------------------
    # Deprecated aliases
    # -------------------------------------------------------------------------

    @property
    def socket_type(self) -> int:
        warn("Socket.socket_type is deprecated, use Socket.type", DeprecationWarning)
        return cast(int, self.type)

    # -------------------------------------------------------------------------
    # Hooks for sockopt completion
    # -------------------------------------------------------------------------

    def __dir__(self):
        keys = dir(self.__class__)
        keys.extend(SocketOption.__members__)
        return keys

    # -------------------------------------------------------------------------
    # Getting/Setting options
    # -------------------------------------------------------------------------
    setsockopt = SocketBase.set
    getsockopt = SocketBase.get

    def __setattr__(self, key, value):
        """Override to allow setting zmq.[UN]SUBSCRIBE even though we have a subscribe method"""
        if key in self.__dict__:
            object.__setattr__(self, key, value)
            return
        _key = key.lower()
        if _key in ('subscribe', 'unsubscribe'):
            if isinstance(value, str):
                value = value.encode('utf8')
            if _key == 'subscribe':
                self.set(zmq.SUBSCRIBE, value)
            else:
                self.set(zmq.UNSUBSCRIBE, value)
            return
        super().__setattr__(key, value)

    def fileno(self) -> int:
        """Return edge-triggered file descriptor for this socket.

        This is a read-only edge-triggered file descriptor for both read and write events on this socket.
        It is important that all available events be consumed when an event is detected,
        otherwise the read event will not trigger again.

        .. versionadded:: 17.0
        """
        return self.FD

    def subscribe(self, topic: str | bytes) -> None:
        """Subscribe to a topic

        Only for SUB sockets.

        .. versionadded:: 15.3
        """
        if isinstance(topic, str):
            topic = topic.encode('utf8')
        self.set(zmq.SUBSCRIBE, topic)

    def unsubscribe(self, topic: str | bytes) -> None:
        """Unsubscribe from a topic

        Only for SUB sockets.

        .. versionadded:: 15.3
        """
        if isinstance(topic, str):
            topic = topic.encode('utf8')
        self.set(zmq.UNSUBSCRIBE, topic)

    def set_string(self, option: int, optval: str, encoding='utf-8') -> None:
        """Set socket options with a unicode object.

        This is simply a wrapper for setsockopt to protect from encoding ambiguity.

        See the 0MQ documentation for details on specific options.

        Parameters
        ----------
        option : int
            The name of the option to set. Can be any of: SUBSCRIBE,
            UNSUBSCRIBE, IDENTITY
        optval : str
            The value of the option to set.
        encoding : str
            The encoding to be used, default is utf8
        """
        if not isinstance(optval, str):
            raise TypeError(f"strings only, not {type(optval)}: {optval!r}")
        return self.set(option, optval.encode(encoding))

    setsockopt_unicode = setsockopt_string = set_string

    def get_string(self, option: int, encoding='utf-8') -> str:
        """Get the value of a socket option.

        See the 0MQ documentation for details on specific options.

        Parameters
        ----------
        option : int
            The option to retrieve.

        Returns
        -------
        optval : str
            The value of the option as a unicode string.
        """
        if SocketOption(option)._opt_type != _OptType.bytes:
            raise TypeError(f"option {option} will not return a string to be decoded")
        return cast(bytes, self.get(option)).decode(encoding)

    getsockopt_unicode = getsockopt_string = get_string

    def bind_to_random_port(
        self: _SocketType,
        addr: str,
        min_port: int = 49152,
        max_port: int = 65536,
        max_tries: int = 100,
    ) -> int:
        """Bind this socket to a random port in a range.

        If the port range is unspecified, the system will choose the port.

        Parameters
        ----------
        addr : str
            The address string without the port to pass to ``Socket.bind()``.
        min_port : int, optional
            The minimum port in the range of ports to try (inclusive).
        max_port : int, optional
            The maximum port in the range of ports to try (exclusive).
        max_tries : int, optional
            The maximum number of bind attempts to make.

        Returns
        -------
        port : int
            The port the socket was bound to.

        Raises
        ------
        ZMQBindError
            if `max_tries` reached before successful bind
        """
        if min_port == 49152 and max_port == 65536:
            # if LAST_ENDPOINT is supported, and min_port / max_port weren't specified,
            # we can bind to port 0 and let the OS do the work
            self.bind(f"{addr}:*")
            url = cast(bytes, self.last_endpoint).decode('ascii', 'replace')
            _, port_s = url.rsplit(':', 1)
            return int(port_s)

        for i in range(max_tries):
            try:
                port = random.randrange(min_port, max_port)
                self.bind(f'{addr}:{port}')
            except ZMQError as exception:
                en = exception.errno
                if en == zmq.EADDRINUSE:
                    continue
                elif sys.platform == 'win32' and en == errno.EACCES:
                    continue
                else:
                    raise
            else:
                return port
        raise ZMQBindError("Could not bind socket to random port.")

    def get_hwm(self) -> int:
        """Get the High Water Mark.

        On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM
        """
        # return sndhwm, fallback on rcvhwm
        try:
            return cast(int, self.get(zmq.SNDHWM))
        except zmq.ZMQError:
            pass

        return cast(int, self.get(zmq.RCVHWM))

    def set_hwm(self, value: int) -> None:
        """Set the High Water Mark.

        On libzmq ≥ 3, this sets both SNDHWM and RCVHWM


        .. warning::

            New values only take effect for subsequent socket
            bind/connects.
        """
        raised = None
        try:
            self.sndhwm = value
        except Exception as e:
            raised = e
        try:
            self.rcvhwm = value
        except Exception as e:
            raised = e

        if raised:
            raise raised

    hwm = property(
        get_hwm,
        set_hwm,
        None,
        """Property for High Water Mark.

        Setting hwm sets both SNDHWM and RCVHWM as appropriate.
        It gets SNDHWM if available, otherwise RCVHWM.
        """,
    )

    # -------------------------------------------------------------------------
    # Sending and receiving messages
    # -------------------------------------------------------------------------

    @overload
    def send(
        self,
        data: Any,
        flags: int = ...,
        copy: bool = ...,
        *,
        track: Literal[True],
        routing_id: int | None = ...,
        group: str | None = ...,
    ) -> zmq.MessageTracker: ...

    @overload
    def send(
        self,
        data: Any,
        flags: int = ...,
        copy: bool = ...,
        *,
        track: Literal[False],
        routing_id: int | None = ...,
        group: str | None = ...,
    ) -> None: ...

    @overload
    def send(
        self,
        data: Any,
        flags: int = ...,
        *,
        copy: bool = ...,
        routing_id: int | None = ...,
        group: str | None = ...,
    ) -> None: ...

    @overload
    def send(
        self,
        data: Any,
        flags: int = ...,
        copy: bool = ...,
        track: bool = ...,
        routing_id: int | None = ...,
        group: str | None = ...,
    ) -> zmq.MessageTracker | None: ...

    def send(
        self,
        data: Any,
        flags: int = 0,
        copy: bool = True,
        track: bool = False,
        routing_id: int | None = None,
        group: str | None = None,
    ) -> zmq.MessageTracker | None:
        """Send a single zmq message frame on this socket.

        This queues the message to be sent by the IO thread at a later time.

        With flags=NOBLOCK, this raises :class:`ZMQError` if the queue is full;
        otherwise, this waits until space is available.
        See :class:`Poller` for more general non-blocking I/O.

        Parameters
        ----------
        data : bytes, Frame, memoryview
            The content of the message. This can be any object that provides
            the Python buffer API (i.e. `memoryview(data)` can be called).
        flags : int
            0, NOBLOCK, SNDMORE, or NOBLOCK|SNDMORE.
        copy : bool
            Should the message be sent in a copying or non-copying manner.
        track : bool
            Should the message be tracked for notification that ZMQ has
            finished with it? (ignored if copy=True)
        routing_id : int
            For use with SERVER sockets
        group : str
            For use with RADIO sockets

        Returns
        -------
        None : if `copy` or not track
            None if message was sent, raises an exception otherwise.
        MessageTracker : if track and not copy
            a MessageTracker object, whose `done` property will
            be False until the send is completed.

        Raises
        ------
        TypeError
            If a unicode object is passed
        ValueError
            If `track=True`, but an untracked Frame is passed.
        ZMQError
            If the send does not succeed for any reason (including
            if NOBLOCK is set and the outgoing queue is full).


        .. versionchanged:: 17.0

            DRAFT support for routing_id and group arguments.
        """
        if routing_id is not None:
            if not isinstance(data, zmq.Frame):
                data = zmq.Frame(
                    data,
                    track=track,
                    copy=copy or None,
                    copy_threshold=self.copy_threshold,
                )
            data.routing_id = routing_id
        if group is not None:
            if not isinstance(data, zmq.Frame):
                data = zmq.Frame(
                    data,
                    track=track,
                    copy=copy or None,
                    copy_threshold=self.copy_threshold,
                )
            data.group = group
        return super().send(data, flags=flags, copy=copy, track=track)

    def send_multipart(
        self,
        msg_parts: Sequence,
        flags: int = 0,
        copy: bool = True,
        track: bool = False,
        **kwargs,
    ):
        """Send a sequence of buffers as a multipart message.

        The zmq.SNDMORE flag is added to all msg parts before the last.

        Parameters
        ----------
        msg_parts : iterable
            A sequence of objects to send as a multipart message. Each element
            can be any sendable object (Frame, bytes, buffer-providers)
        flags : int, optional
            Any valid flags for :func:`Socket.send`.
            SNDMORE is added automatically for frames before the last.
        copy : bool, optional
            Should the frame(s) be sent in a copying or non-copying manner.
            If copy=False, frames smaller than self.copy_threshold bytes
            will be copied anyway.
        track : bool, optional
            Should the frame(s) be tracked for notification that ZMQ has
            finished with it (ignored if copy=True).

        Returns
        -------
        None : if copy or not track
        MessageTracker : if track and not copy
            a MessageTracker object, whose `done` property will
            be False until the last send is completed.
        """
        # typecheck parts before sending:
        for i, msg in enumerate(msg_parts):
            if isinstance(msg, (zmq.Frame, bytes, memoryview)):
                continue
            try:
                memoryview(msg)
            except Exception:
                rmsg = repr(msg)
                if len(rmsg) > 32:
                    rmsg = rmsg[:32] + '...'
                raise TypeError(
                    f"Frame {i} ({rmsg}) does not support the buffer interface."
                )
        for msg in msg_parts[:-1]:
            self.send(msg, zmq.SNDMORE | flags, copy=copy, track=track)
        # Send the last part without the extra SNDMORE flag.
        return self.send(msg_parts[-1], flags, copy=copy, track=track)

    @overload
    def recv_multipart(
        self, flags: int = ..., *, copy: Literal[True], track: bool = ...
    ) -> list[bytes]: ...

    @overload
    def recv_multipart(
        self, flags: int = ..., *, copy: Literal[False], track: bool = ...
    ) -> list[zmq.Frame]: ...

    @overload
    def recv_multipart(self, flags: int = ..., *, track: bool = ...) -> list[bytes]: ...

    @overload
    def recv_multipart(
        self, flags: int = 0, copy: bool = True, track: bool = False
    ) -> list[zmq.Frame] | list[bytes]: ...

    def recv_multipart(
        self, flags: int = 0, copy: bool = True, track: bool = False
    ) -> list[zmq.Frame] | list[bytes]:
        """Receive a multipart message as a list of bytes or Frame objects

        Parameters
        ----------
        flags : int, optional
            Any valid flags for :func:`Socket.recv`.
        copy : bool, optional
            Should the message frame(s) be received in a copying or non-copying manner?
            If False a Frame object is returned for each part, if True a copy of
            the bytes is made for each frame.
        track : bool, optional
            Should the message frame(s) be tracked for notification that ZMQ has
            finished with it? (ignored if copy=True)

        Returns
        -------
        msg_parts : list
            A list of frames in the multipart message; either Frames or bytes,
            depending on `copy`.

        Raises
        ------
        ZMQError
            for any of the reasons :func:`~Socket.recv` might fail
        """
        parts = [self.recv(flags, copy=copy, track=track)]
        # have first part already, only loop while more to receive
        while self.getsockopt(zmq.RCVMORE):
            part = self.recv(flags, copy=copy, track=track)
            parts.append(part)
        # cast List[Union] to Union[List]
        # how do we get mypy to recognize that return type is invariant on `copy`?
        return cast(Union[List[zmq.Frame], List[bytes]], parts)

    def _deserialize(
        self,
        recvd: bytes,
        load: Callable[[bytes], Any],
    ) -> Any:
        """Deserialize a received message

        Override in subclass (e.g. Futures) if recvd is not the raw bytes.

        The default implementation expects bytes and returns the deserialized message immediately.

        Parameters
        ----------

        load: callable
            Callable that deserializes bytes
        recvd:
            The object returned by self.recv

        """
        return load(recvd)

    def send_serialized(self, msg, serialize, flags=0, copy=True, **kwargs):
        """Send a message with a custom serialization function.

        .. versionadded:: 17

        Parameters
        ----------
        msg : The message to be sent. Can be any object serializable by `serialize`.
        serialize : callable
            The serialization function to use.
            serialize(msg) should return an iterable of sendable message frames
            (e.g. bytes objects), which will be passed to send_multipart.
        flags : int, optional
            Any valid flags for :func:`Socket.send`.
        copy : bool, optional
            Whether to copy the frames.

        """
        frames = serialize(msg)
        return self.send_multipart(frames, flags=flags, copy=copy, **kwargs)

    def recv_serialized(self, deserialize, flags=0, copy=True):
        """Receive a message with a custom deserialization function.

        .. versionadded:: 17

        Parameters
        ----------
        deserialize : callable
            The deserialization function to use.
            deserialize will be called with one argument: the list of frames
            returned by recv_multipart() and can return any object.
        flags : int, optional
            Any valid flags for :func:`Socket.recv`.
        copy : bool, optional
            Whether to recv bytes or Frame objects.

        Returns
        -------
        obj : object
            The object returned by the deserialization function.

        Raises
        ------
        ZMQError
            for any of the reasons :func:`~Socket.recv` might fail
        """
        frames = self.recv_multipart(flags=flags, copy=copy)
        return self._deserialize(frames, deserialize)

    def send_string(
        self,
        u: str,
        flags: int = 0,
        copy: bool = True,
        encoding: str = 'utf-8',
        **kwargs,
    ) -> zmq.Frame | None:
        """Send a Python unicode string as a message with an encoding.

        0MQ communicates with raw bytes, so you must encode/decode
        text (str) around 0MQ.

        Parameters
        ----------
        u : str
            The unicode string to send.
        flags : int, optional
            Any valid flags for :func:`Socket.send`.
        encoding : str
            The encoding to be used
        """
        if not isinstance(u, str):
            raise TypeError("str objects only")
        return self.send(u.encode(encoding), flags=flags, copy=copy, **kwargs)

    send_unicode = send_string

    def recv_string(self, flags: int = 0, encoding: str = 'utf-8') -> str:
        """Receive a unicode string, as sent by send_string.

        Parameters
        ----------
        flags : int
            Any valid flags for :func:`Socket.recv`.
        encoding : str
            The encoding to be used

        Returns
        -------
        s : str
            The Python unicode string that arrives as encoded bytes.

        Raises
        ------
        ZMQError
            for any of the reasons :func:`Socket.recv` might fail
        """
        msg = self.recv(flags=flags)
        return self._deserialize(msg, lambda buf: buf.decode(encoding))

    recv_unicode = recv_string

    def send_pyobj(
        self, obj: Any, flags: int = 0, protocol: int = DEFAULT_PROTOCOL, **kwargs
    ) -> zmq.Frame | None:
        """
        Send a Python object as a message using pickle to serialize.

        .. warning::

            Never deserialize an untrusted message with pickle,
            which can involve arbitrary code execution.
            Make sure to authenticate the sources of messages
            before unpickling them, e.g. with transport-level security
            (e.g. CURVE, ZAP, or IPC permissions)
            or signed messages.

        Parameters
        ----------
        obj : Python object
            The Python object to send.
        flags : int
            Any valid flags for :func:`Socket.send`.
        protocol : int
            The pickle protocol number to use. The default is pickle.DEFAULT_PROTOCOL
            where defined, and pickle.HIGHEST_PROTOCOL elsewhere.
        """
        msg = pickle.dumps(obj, protocol)
        return self.send(msg, flags=flags, **kwargs)

    def recv_pyobj(self, flags: int = 0) -> Any:
        """
        Receive a Python object as a message using UNSAFE pickle to serialize.

        .. warning::

            Never deserialize an untrusted message with pickle,
            which can involve arbitrary code execution.
            Make sure to authenticate the sources of messages
            before unpickling them, e.g. with transport-level security
            (such as CURVE or IPC permissions)
            or authenticating messages themselves before deserializing.

        Parameters
        ----------
        flags : int
            Any valid flags for :func:`Socket.recv`.

        Returns
        -------
        obj : Python object
            The Python object that arrives as a message.

        Raises
        ------
        ZMQError
            for any of the reasons :func:`~Socket.recv` might fail
        """
        msg = self.recv(flags)
        return self._deserialize(msg, pickle.loads)

    def send_json(self, obj: Any, flags: int = 0, **kwargs) -> None:
        """Send a Python object as a message using json to serialize.

        Keyword arguments are passed on to json.dumps

        Parameters
        ----------
        obj : Python object
            The Python object to send
        flags : int
            Any valid flags for :func:`Socket.send`
        """
        send_kwargs = {}
        for key in ('routing_id', 'group'):
            if key in kwargs:
                send_kwargs[key] = kwargs.pop(key)
        msg = jsonapi.dumps(obj, **kwargs)
        return self.send(msg, flags=flags, **send_kwargs)

    def recv_json(self, flags: int = 0, **kwargs) -> _JSONType:
        """Receive a Python object as a message using json to serialize.

        Keyword arguments are passed on to json.loads

        Parameters
        ----------
        flags : int
            Any valid flags for :func:`Socket.recv`.

        Returns
        -------
        obj : Python object
            The Python object that arrives as a message.

        Raises
        ------
        ZMQError
            for any of the reasons :func:`~Socket.recv` might fail
        """
        msg = self.recv(flags)
        return self._deserialize(msg, lambda buf: jsonapi.loads(buf, **kwargs))

    _poller_class = Poller

    def poll(self, timeout: int | None = None, flags: int = zmq.POLLIN) -> int:
        """Poll the socket for events.

        See :class:`Poller` to wait for multiple sockets at once.

        Parameters
        ----------
        timeout : int
            The timeout (in milliseconds) to wait for an event. If unspecified
            (or specified None), will wait forever for an event.
        flags : int
            default: POLLIN.
            POLLIN, POLLOUT, or POLLIN|POLLOUT. The event flags to poll for.

        Returns
        -------
        event_mask : int
            The poll event mask (POLLIN, POLLOUT),
            0 if the timeout was reached without an event.
        """

        if self.closed:
            raise ZMQError(zmq.ENOTSUP)

        p = self._poller_class()
        p.register(self, flags)
        evts = dict(p.poll(timeout))
        # return 0 if no events, otherwise return event bitfield
        return evts.get(self, 0)

    def get_monitor_socket(
        self: _SocketType, events: int | None = None, addr: str | None = None
    ) -> _SocketType:
        """Return a connected PAIR socket ready to receive the event notifications.

        .. versionadded:: libzmq-4.0
        .. versionadded:: 14.0

        Parameters
        ----------
        events : int
            default: `zmq.EVENT_ALL`
            The bitmask defining which events are wanted.
        addr : str
            The optional endpoint for the monitoring sockets.

        Returns
        -------
        socket : zmq.Socket
            The PAIR socket, connected and ready to receive messages.
        """
        # safe-guard, method only available on libzmq >= 4
        if zmq.zmq_version_info() < (4,):
            raise NotImplementedError(
                f"get_monitor_socket requires libzmq >= 4, have {zmq.zmq_version()}"
            )

        # if already monitoring, return existing socket
        if self._monitor_socket:
            if self._monitor_socket.closed:
                self._monitor_socket = None
            else:
                return self._monitor_socket

        if addr is None:
            # create endpoint name from internal fd
            addr = f"inproc://monitor.s-{self.FD}"
        if events is None:
            # use all events
            events = zmq.EVENT_ALL
        # attach monitoring socket
        self.monitor(addr, events)
        # create new PAIR socket and connect it
        self._monitor_socket = self.context.socket(zmq.PAIR)
        self._monitor_socket.connect(addr)
        return self._monitor_socket

    def disable_monitor(self) -> None:
        """Shutdown the PAIR socket (created using get_monitor_socket)
        that is serving socket events.

        .. versionadded:: 14.4
        """
        self._monitor_socket = None
        self.monitor(None, 0)


SyncSocket: TypeAlias = Socket[bytes]

__all__ = ['Socket', 'SyncSocket']
pyzmq-26.4.0/zmq/sugar/stopwatch.py000066400000000000000000000016471477374370200173270ustar00rootroot00000000000000"""Deprecated Stopwatch implementation"""

# Copyright (c) PyZMQ Development Team.
# Distributed under the terms of the Modified BSD License.


class Stopwatch:
    """Deprecated zmq.Stopwatch implementation

    You can use Python's builtin timers (time.monotonic, etc.).
    """

    def __init__(self):
        import warnings

        warnings.warn(
            "zmq.Stopwatch is deprecated. Use stdlib time.monotonic and friends instead",
            DeprecationWarning,
            stacklevel=2,
        )
        self._start = 0
        import time

        try:
            self._monotonic = time.monotonic
        except AttributeError:
            self._monotonic = time.time

    def start(self):
        """Start the counter"""
        self._start = self._monotonic()

    def stop(self):
        """Return time since start in microseconds"""
        stop = self._monotonic()
        return int(1e6 * (stop - self._start))
pyzmq-26.4.0/zmq/sugar/tracker.py000066400000000000000000000070231477374370200167400ustar00rootroot00000000000000"""Tracker for zero-copy messages with 0MQ."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

import time
from threading import Event

from zmq.backend import Frame
from zmq.error import NotDone


class MessageTracker:
    """A class for tracking if 0MQ is done using one or more messages.

    When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread
    sends the message at some later time. Often you want to know when 0MQ has
    actually sent the message though. This is complicated by the fact that
    a single 0MQ message can be sent multiple times using different sockets.
    This class allows you to track all of the 0MQ usages of a message.

    Parameters
    ----------
    towatch : Event, MessageTracker, zmq.Frame
        This objects to track. This class can track the low-level
        Events used by the Message class, other MessageTrackers or
        actual Messages.
    """

    events: set[Event]
    peers: set[MessageTracker]

    def __init__(self, *towatch: tuple[MessageTracker | Event | Frame]):
        """Create a message tracker to track a set of messages.

        Parameters
        ----------
        *towatch : tuple of Event, MessageTracker, Message instances.
            This list of objects to track. This class can track the low-level
            Events used by the Message class, other MessageTrackers or
            actual Messages.
        """
        self.events = set()
        self.peers = set()
        for obj in towatch:
            if isinstance(obj, Event):
                self.events.add(obj)
            elif isinstance(obj, MessageTracker):
                self.peers.add(obj)
            elif isinstance(obj, Frame):
                if not obj.tracker:
                    raise ValueError("Not a tracked message")
                self.peers.add(obj.tracker)
            else:
                raise TypeError(f"Require Events or Message Frames, not {type(obj)}")

    @property
    def done(self):
        """Is 0MQ completely done with the message(s) being tracked?"""
        for evt in self.events:
            if not evt.is_set():
                return False
        for pm in self.peers:
            if not pm.done:
                return False
        return True

    def wait(self, timeout: float | int = -1):
        """Wait for 0MQ to be done with the message or until `timeout`.

        Parameters
        ----------
        timeout : float
            default: -1, which means wait forever.
            Maximum time in (s) to wait before raising NotDone.

        Returns
        -------
        None
            if done before `timeout`

        Raises
        ------
        NotDone
            if `timeout` reached before I am done.
        """
        tic = time.time()
        remaining: float
        if timeout is False or timeout < 0:
            remaining = 3600 * 24 * 7  # a week
        else:
            remaining = timeout
        for evt in self.events:
            if remaining < 0:
                raise NotDone
            evt.wait(timeout=remaining)
            if not evt.is_set():
                raise NotDone
            toc = time.time()
            remaining -= toc - tic
            tic = toc

        for peer in self.peers:
            if remaining < 0:
                raise NotDone
            peer.wait(timeout=remaining)
            toc = time.time()
            remaining -= toc - tic
            tic = toc


_FINISHED_TRACKER = MessageTracker()

__all__ = ['MessageTracker', '_FINISHED_TRACKER']
pyzmq-26.4.0/zmq/sugar/version.py000066400000000000000000000031241477374370200167700ustar00rootroot00000000000000"""PyZMQ and 0MQ version functions."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import re
from typing import Match, cast

from zmq.backend import zmq_version_info

__version__: str = "26.4.0"
_version_pat = re.compile(r"(\d+)\.(\d+)\.(\d+)(.*)")
_match = cast(Match, _version_pat.match(__version__))
_version_groups = _match.groups()

VERSION_MAJOR = int(_version_groups[0])
VERSION_MINOR = int(_version_groups[1])
VERSION_PATCH = int(_version_groups[2])
VERSION_EXTRA = _version_groups[3].lstrip(".")

version_info: tuple[int, int, int] | tuple[int, int, int, float] = (
    VERSION_MAJOR,
    VERSION_MINOR,
    VERSION_PATCH,
)

if VERSION_EXTRA:
    version_info = (
        VERSION_MAJOR,
        VERSION_MINOR,
        VERSION_PATCH,
        float('inf'),
    )

__revision__: str = ''


def pyzmq_version() -> str:
    """return the version of pyzmq as a string"""
    if __revision__:
        return '+'.join([__version__, __revision__[:6]])
    else:
        return __version__


def pyzmq_version_info() -> tuple[int, int, int] | tuple[int, int, int, float]:
    """return the pyzmq version as a tuple of at least three numbers

    If pyzmq is a development version, `inf` will be appended after the third integer.
    """
    return version_info


def zmq_version() -> str:
    """return the version of libzmq as a string"""
    return "{}.{}.{}".format(*zmq_version_info())


__all__ = [
    'zmq_version',
    'zmq_version_info',
    'pyzmq_version',
    'pyzmq_version_info',
    '__version__',
    '__revision__',
]
pyzmq-26.4.0/zmq/tests/000077500000000000000000000000001477374370200147525ustar00rootroot00000000000000pyzmq-26.4.0/zmq/tests/__init__.py000066400000000000000000000174041477374370200170710ustar00rootroot00000000000000# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.

import os
import platform
import signal
import sys
import time
import warnings
from functools import partial
from threading import Thread
from typing import List
from unittest import SkipTest, TestCase

from pytest import mark

import zmq
from zmq.utils import jsonapi

try:
    import gevent

    from zmq import green as gzmq

    have_gevent = True
except ImportError:
    have_gevent = False


PYPY = platform.python_implementation() == 'PyPy'

# -----------------------------------------------------------------------------
# skip decorators (directly from unittest)
# -----------------------------------------------------------------------------
warnings.warn(
    "zmq.tests is deprecated in pyzmq 25, we recommend managing your own contexts and sockets.",
    DeprecationWarning,
)


def _id(x):
    return x


skip_pypy = mark.skipif(PYPY, reason="Doesn't work on PyPy")
require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4")

# -----------------------------------------------------------------------------
# Base test class
# -----------------------------------------------------------------------------


def term_context(ctx, timeout):
    """Terminate a context with a timeout"""
    t = Thread(target=ctx.term)
    t.daemon = True
    t.start()
    t.join(timeout=timeout)
    if t.is_alive():
        # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
        zmq.sugar.context.Context._instance = None
        raise RuntimeError(
            "context could not terminate, open sockets likely remain in test"
        )


class BaseZMQTestCase(TestCase):
    green = False
    teardown_timeout = 10
    test_timeout_seconds = int(os.environ.get("ZMQ_TEST_TIMEOUT") or 60)
    sockets: List[zmq.Socket]

    @property
    def _is_pyzmq_test(self):
        return self.__class__.__module__.split(".", 1)[0] == __name__.split(".", 1)[0]

    @property
    def _should_test_timeout(self):
        return (
            self._is_pyzmq_test
            and hasattr(signal, 'SIGALRM')
            and self.test_timeout_seconds
        )

    @property
    def Context(self):
        if self.green:
            return gzmq.Context
        else:
            return zmq.Context

    def socket(self, socket_type):
        s = self.context.socket(socket_type)
        self.sockets.append(s)
        return s

    def _alarm_timeout(self, timeout, *args):
        raise TimeoutError(f"Test did not complete in {timeout} seconds")

    def setUp(self):
        super().setUp()
        if self.green and not have_gevent:
            raise SkipTest("requires gevent")

        self.context = self.Context.instance()
        self.sockets = []
        if self._should_test_timeout:
            # use SIGALRM to avoid test hangs
            signal.signal(
                signal.SIGALRM, partial(self._alarm_timeout, self.test_timeout_seconds)
            )
            signal.alarm(self.test_timeout_seconds)

    def tearDown(self):
        if self._should_test_timeout:
            # cancel the timeout alarm, if there was one
            signal.alarm(0)
        contexts = {self.context}
        while self.sockets:
            sock = self.sockets.pop()
            contexts.add(sock.context)  # in case additional contexts are created
            sock.close(0)
        for ctx in contexts:
            try:
                term_context(ctx, self.teardown_timeout)
            except Exception:
                # reset Context.instance, so the failure to term doesn't corrupt subsequent tests
                zmq.sugar.context.Context._instance = None
                raise

        super().tearDown()

    def create_bound_pair(
        self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'
    ):
        """Create a bound socket pair using a random port."""
        s1 = self.context.socket(type1)
        s1.setsockopt(zmq.LINGER, 0)
        port = s1.bind_to_random_port(interface)
        s2 = self.context.socket(type2)
        s2.setsockopt(zmq.LINGER, 0)
        s2.connect(f'{interface}:{port}')
        self.sockets.extend([s1, s2])
        return s1, s2

    def ping_pong(self, s1, s2, msg):
        s1.send(msg)
        msg2 = s2.recv()
        s2.send(msg2)
        msg3 = s1.recv()
        return msg3

    def ping_pong_json(self, s1, s2, o):
        if jsonapi.jsonmod is None:
            raise SkipTest("No json library")
        s1.send_json(o)
        o2 = s2.recv_json()
        s2.send_json(o2)
        o3 = s1.recv_json()
        return o3

    def ping_pong_pyobj(self, s1, s2, o):
        s1.send_pyobj(o)
        o2 = s2.recv_pyobj()
        s2.send_pyobj(o2)
        o3 = s1.recv_pyobj()
        return o3

    def assertRaisesErrno(self, errno, func, *args, **kwargs):
        try:
            func(*args, **kwargs)
        except zmq.ZMQError as e:
            self.assertEqual(
                e.errno,
                errno,
                f"wrong error raised, expected '{zmq.ZMQError(errno)}' \
got '{zmq.ZMQError(e.errno)}'",
            )
        else:
            self.fail("Function did not raise any error")

    def _select_recv(self, multipart, socket, **kwargs):
        """call recv[_multipart] in a way that raises if there is nothing to receive"""
        # zmq 3.1 has a bug, where poll can return false positives,
        # so we wait a little bit just in case
        # See LIBZMQ-280 on JIRA
        time.sleep(0.1)

        r, w, x = zmq.select([socket], [], [], timeout=kwargs.pop('timeout', 5))
        assert len(r) > 0, "Should have received a message"
        kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)

        recv = socket.recv_multipart if multipart else socket.recv
        return recv(**kwargs)

    def recv(self, socket, **kwargs):
        """call recv in a way that raises if there is nothing to receive"""
        return self._select_recv(False, socket, **kwargs)

    def recv_multipart(self, socket, **kwargs):
        """call recv_multipart in a way that raises if there is nothing to receive"""
        return self._select_recv(True, socket, **kwargs)


class PollZMQTestCase(BaseZMQTestCase):
    pass


class GreenTest:
    """Mixin for making green versions of test classes"""

    green = True
    teardown_timeout = 10

    def assertRaisesErrno(self, errno, func, *args, **kwargs):
        if errno == zmq.EAGAIN:
            raise SkipTest("Skipping because we're green.")
        try:
            func(*args, **kwargs)
        except zmq.ZMQError:
            e = sys.exc_info()[1]
            self.assertEqual(
                e.errno,
                errno,
                f"wrong error raised, expected '{zmq.ZMQError(errno)}' \
got '{zmq.ZMQError(e.errno)}'",
            )
        else:
            self.fail("Function did not raise any error")

    def tearDown(self):
        if self._should_test_timeout:
            # cancel the timeout alarm, if there was one
            signal.alarm(0)
        contexts = {self.context}
        while self.sockets:
            sock = self.sockets.pop()
            contexts.add(sock.context)  # in case additional contexts are created
            sock.close()
        try:
            gevent.joinall(
                [gevent.spawn(ctx.term) for ctx in contexts],
                timeout=self.teardown_timeout,
                raise_error=True,
            )
        except gevent.Timeout:
            raise RuntimeError(
                "context could not terminate, open sockets likely remain in test"
            )

    def skip_green(self):
        raise SkipTest("Skipping because we are green")


def skip_green(f):
    def skipping_test(self, *args, **kwargs):
        if self.green:
            raise SkipTest("Skipping because we are green")
        else:
            return f(self, *args, **kwargs)

    return skipping_test
pyzmq-26.4.0/zmq/utils/000077500000000000000000000000001477374370200147505ustar00rootroot00000000000000pyzmq-26.4.0/zmq/utils/__init__.py000066400000000000000000000000001477374370200170470ustar00rootroot00000000000000pyzmq-26.4.0/zmq/utils/garbage.py000066400000000000000000000137541477374370200167240ustar00rootroot00000000000000"""Garbage collection thread for representing zmq refcount of Python objects
used in zero-copy sends.
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import atexit
import struct
import warnings
from collections import namedtuple
from os import getpid
from threading import Event, Lock, Thread

import zmq

gcref = namedtuple('gcref', ['obj', 'event'])


class GarbageCollectorThread(Thread):
    """Thread in which garbage collection actually happens."""

    def __init__(self, gc):
        super().__init__()
        self.gc = gc
        self.daemon = True
        self.pid = getpid()
        self.ready = Event()

    def run(self):
        # detect fork at beginning of the thread
        if getpid is None or getpid() != self.pid:
            self.ready.set()
            return
        try:
            s = self.gc.context.socket(zmq.PULL)
            s.linger = 0
            s.bind(self.gc.url)
        finally:
            self.ready.set()

        while True:
            # detect fork
            if getpid is None or getpid() != self.pid:
                return
            msg = s.recv()
            if msg == b'DIE':
                break
            fmt = 'L' if len(msg) == 4 else 'Q'
            key = struct.unpack(fmt, msg)[0]
            tup = self.gc.refs.pop(key, None)
            if tup and tup.event:
                tup.event.set()
            del tup
        s.close()


class GarbageCollector:
    """PyZMQ Garbage Collector

    Used for representing the reference held by libzmq during zero-copy sends.
    This object holds a dictionary, keyed by Python id,
    of the Python objects whose memory are currently in use by zeromq.

    When zeromq is done with the memory, it sends a message on an inproc PUSH socket
    containing the packed size_t (32 or 64-bit unsigned int),
    which is the key in the dict.
    When the PULL socket in the gc thread receives that message,
    the reference is popped from the dict,
    and any tracker events that should be signaled fire.
    """

    refs = None
    _context = None
    _lock = None
    url = "inproc://pyzmq.gc.01"

    def __init__(self, context=None):
        super().__init__()
        self.refs = {}
        self.pid = None
        self.thread = None
        self._context = context
        self._lock = Lock()
        self._stay_down = False
        self._push = None
        self._push_mutex = None
        atexit.register(self._atexit)

    @property
    def context(self):
        if self._context is None:
            if Thread.__module__.startswith('gevent'):
                # gevent has monkey-patched Thread, use green Context
                from zmq import green

                self._context = green.Context()
            else:
                self._context = zmq.Context()
        return self._context

    @context.setter
    def context(self, ctx):
        if self.is_alive():
            if self.refs:
                warnings.warn(
                    "Replacing gc context while gc is running", RuntimeWarning
                )
            self.stop()
        self._context = ctx

    def _atexit(self):
        """atexit callback

        sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers
        """
        self._stay_down = True
        self.stop()

    def stop(self):
        """stop the garbage-collection thread"""
        if not self.is_alive():
            return
        self._stop()

    def _clear(self):
        """Clear state

        called after stop or when setting up a new subprocess
        """
        self._push = None
        self._push_mutex = None
        self.thread = None
        self.refs.clear()
        self.context = None

    def _stop(self):
        push = self.context.socket(zmq.PUSH)
        push.connect(self.url)
        push.send(b'DIE')
        push.close()
        if self._push:
            self._push.close()
        self.thread.join()
        self.context.term()
        self._clear()

    @property
    def _push_socket(self):
        """The PUSH socket for use in the zmq message destructor callback."""
        if getattr(self, "_stay_down", False):
            raise RuntimeError("zmq gc socket requested during shutdown")
        if not self.is_alive() or self._push is None:
            self._push = self.context.socket(zmq.PUSH)
            self._push.connect(self.url)
        return self._push

    def start(self):
        """Start a new garbage collection thread.

        Creates a new zmq Context used for garbage collection.
        Under most circumstances, this will only be called once per process.
        """
        if self.thread is not None and self.pid != getpid():
            # It's re-starting, must free earlier thread's context
            # since a fork probably broke it
            self._clear()
        self.pid = getpid()
        self.refs = {}
        self.thread = GarbageCollectorThread(self)
        self.thread.start()
        self.thread.ready.wait()

    def is_alive(self):
        """Is the garbage collection thread currently running?

        Includes checks for process shutdown or fork.
        """
        if (
            getpid is None
            or getpid() != self.pid
            or self.thread is None
            or not self.thread.is_alive()
        ):
            return False
        return True

    def store(self, obj, event=None):
        """store an object and (optionally) event for zero-copy"""
        if not self.is_alive():
            if self._stay_down:
                return 0
            # safely start the gc thread
            # use lock and double check,
            # so we don't start multiple threads
            with self._lock:
                if not self.is_alive():
                    self.start()
        tup = gcref(obj, event)
        theid = id(tup)
        self.refs[theid] = tup
        return theid

    def __del__(self):
        if not self.is_alive():
            return
        try:
            self.stop()
        except Exception as e:
            raise (e)


gc = GarbageCollector()
pyzmq-26.4.0/zmq/utils/getpid_compat.h000066400000000000000000000001641477374370200177410ustar00rootroot00000000000000#pragma once
#ifdef _WIN32
    #include 
    #define getpid _getpid
#else
    #include 
#endif
pyzmq-26.4.0/zmq/utils/interop.py000066400000000000000000000012551477374370200170050ustar00rootroot00000000000000"""Utils for interoperability with other libraries.

Just CFFI pointer casting for now.
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from typing import Any


def cast_int_addr(n: Any) -> int:
    """Cast an address to a Python int

    This could be a Python integer or a CFFI pointer
    """
    if isinstance(n, int):
        return n
    try:
        import cffi  # type: ignore
    except ImportError:
        pass
    else:
        # from pyzmq, this is an FFI void *
        ffi = cffi.FFI()
        if isinstance(n, ffi.CData):
            return int(ffi.cast("size_t", n))

    raise ValueError(f"Cannot cast {n!r} to int")
pyzmq-26.4.0/zmq/utils/ipcmaxlen.h000066400000000000000000000010121477374370200170730ustar00rootroot00000000000000/*

Platform-independant detection of IPC path max length

Copyright (c) 2012 Godefroid Chapelle

Distributed under the terms of the New BSD License.  The full license is in
the file LICENSE.BSD, distributed as part of this software.
 */

#pragma once

#if defined(HAVE_SYS_UN_H)
#if defined _MSC_VER
#include 
#else
#include 
#endif
int get_ipc_path_max_len(void) {
    struct sockaddr_un *dummy;
    return sizeof(dummy->sun_path) - 1;
}
#else
int get_ipc_path_max_len(void) {
    return 0;
}
#endif
pyzmq-26.4.0/zmq/utils/jsonapi.py000066400000000000000000000020011477374370200167560ustar00rootroot00000000000000"""JSON serialize to/from utf8 bytes

.. versionchanged:: 22.2
    Remove optional imports of different JSON implementations.
    Now that we require recent Python, unconditionally use the standard library.
    Custom JSON libraries can be used via custom serialization functions.
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import json
from typing import Any

# backward-compatibility, unused
jsonmod = json


def dumps(o: Any, **kwargs) -> bytes:
    """Serialize object to JSON bytes (utf-8).

    Keyword arguments are passed along to :py:func:`json.dumps`.
    """
    return json.dumps(o, **kwargs).encode("utf8")


def loads(s: bytes | str, **kwargs) -> dict | list | str | int | float:
    """Load object from JSON bytes (utf-8).

    Keyword arguments are passed along to :py:func:`json.loads`.
    """
    if isinstance(s, bytes):
        s = s.decode("utf8")
    return json.loads(s, **kwargs)


__all__ = ['dumps', 'loads']
pyzmq-26.4.0/zmq/utils/monitor.py000066400000000000000000000063271477374370200170210ustar00rootroot00000000000000"""Module holding utility and convenience functions for zmq event monitoring."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

from __future__ import annotations

import struct
from typing import Awaitable, TypedDict, overload

import zmq
import zmq.asyncio
from zmq.error import _check_version


class _MonitorMessage(TypedDict):
    event: int
    value: int
    endpoint: bytes


def parse_monitor_message(msg: list[bytes]) -> _MonitorMessage:
    """decode zmq_monitor event messages.

    Parameters
    ----------
    msg : list(bytes)
        zmq multipart message that has arrived on a monitor PAIR socket.

        First frame is::

            16 bit event id
            32 bit event value
            no padding

        Second frame is the endpoint as a bytestring

    Returns
    -------
    event : dict
        event description as dict with the keys `event`, `value`, and `endpoint`.
    """
    if len(msg) != 2 or len(msg[0]) != 6:
        raise RuntimeError(f"Invalid event message format: {msg}")
    event_id, value = struct.unpack("=hi", msg[0])
    event: _MonitorMessage = {
        'event': zmq.Event(event_id),
        'value': zmq.Event(value),
        'endpoint': msg[1],
    }
    return event


async def _parse_monitor_msg_async(
    awaitable_msg: Awaitable[list[bytes]],
) -> _MonitorMessage:
    """Like parse_monitor_msg, but awaitable

    Given awaitable message, return awaitable for the parsed monitor message.
    """

    msg = await awaitable_msg
    # 4.0-style event API
    return parse_monitor_message(msg)


@overload
def recv_monitor_message(
    socket: zmq.asyncio.Socket,
    flags: int = 0,
) -> Awaitable[_MonitorMessage]: ...


@overload
def recv_monitor_message(
    socket: zmq.Socket[bytes],
    flags: int = 0,
) -> _MonitorMessage: ...


def recv_monitor_message(
    socket: zmq.Socket,
    flags: int = 0,
) -> _MonitorMessage | Awaitable[_MonitorMessage]:
    """Receive and decode the given raw message from the monitoring socket and return a dict.

    Requires libzmq ≥ 4.0

    The returned dict will have the following entries:
      event : int
        the event id as described in `libzmq.zmq_socket_monitor`
      value : int
        the event value associated with the event, see `libzmq.zmq_socket_monitor`
      endpoint : str
        the affected endpoint

    .. versionchanged:: 23.1
        Support for async sockets added.
        When called with a async socket,
        returns an awaitable for the monitor message.

    Parameters
    ----------
    socket : zmq.Socket
        The PAIR socket (created by other.get_monitor_socket()) on which to recv the message
    flags : int
        standard zmq recv flags

    Returns
    -------
    event : dict
        event description as dict with the keys `event`, `value`, and `endpoint`.
    """

    _check_version((4, 0), 'libzmq event API')
    # will always return a list
    msg = socket.recv_multipart(flags)

    # transparently handle asyncio socket,
    # returns a Future instead of a dict
    if isinstance(msg, Awaitable):
        return _parse_monitor_msg_async(msg)

    # 4.0-style event API
    return parse_monitor_message(msg)


__all__ = ['parse_monitor_message', 'recv_monitor_message']
pyzmq-26.4.0/zmq/utils/mutex.h000066400000000000000000000031561477374370200162700ustar00rootroot00000000000000/*
* simplified from mutex.c from Foundation Library, in the Public Domain
* https://github.com/rampantpixels/foundation_lib/blob/master/foundation/mutex.c
*
* This file is Copyright (C) PyZMQ Developers
* Distributed under the terms of the Modified BSD License.
*
*/

#pragma once

#include 

#if defined(_WIN32)
#  include 
#else
#  include 
#endif

typedef struct {
#if defined(_WIN32)
    CRITICAL_SECTION csection;
#else
    pthread_mutex_t  mutex;
#endif
} mutex_t;


static void
_mutex_initialize(mutex_t* mutex) {
#if defined(_WIN32)
    InitializeCriticalSectionAndSpinCount(&mutex->csection, 4000);
#else
    pthread_mutexattr_t attr;
    pthread_mutexattr_init(&attr);
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
    pthread_mutex_init(&mutex->mutex, &attr);
    pthread_mutexattr_destroy(&attr);
#endif
}

static void
_mutex_finalize(mutex_t* mutex) {
#if defined(_WIN32)
    DeleteCriticalSection(&mutex->csection);
#else
    pthread_mutex_destroy(&mutex->mutex);
#endif
}

mutex_t*
mutex_allocate(void) {
    mutex_t* mutex = (mutex_t*)malloc(sizeof(mutex_t));
    _mutex_initialize(mutex);
    return mutex;
}

void
mutex_deallocate(mutex_t* mutex) {
    if (!mutex)
        return;
    _mutex_finalize(mutex);
    free(mutex);
}

int
mutex_lock(mutex_t* mutex) {
#if defined(_WIN32)
    EnterCriticalSection(&mutex->csection);
    return 0;
#else
    return pthread_mutex_lock(&mutex->mutex);
#endif
}

int
mutex_unlock(mutex_t* mutex) {
#if defined(_WIN32)
    LeaveCriticalSection(&mutex->csection);
    return 0;
#else
    return pthread_mutex_unlock(&mutex->mutex);
#endif
}
pyzmq-26.4.0/zmq/utils/pyversion_compat.h000066400000000000000000000004341477374370200205230ustar00rootroot00000000000000#include "Python.h"

// default to Python's own target Windows version(s)
// override by setting WINVER, _WIN32_WINNT, (maybe also NTDDI_VERSION?) macros
#ifdef Py_WINVER
#ifndef WINVER
#define WINVER Py_WINVER
#endif
#ifndef _WIN32_WINNT
#define _WIN32_WINNT Py_WINVER
#endif
#endif
pyzmq-26.4.0/zmq/utils/strtypes.py000066400000000000000000000025401477374370200172200ustar00rootroot00000000000000"""Declare basic string types unambiguously for various Python versions.

Authors
-------
* MinRK
"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import warnings

bytes = bytes
unicode = str
basestring = (str,)


def cast_bytes(s, encoding='utf8', errors='strict'):
    """cast unicode or bytes to bytes"""
    warnings.warn(
        "zmq.utils.strtypes is deprecated in pyzmq 23.",
        DeprecationWarning,
        stacklevel=2,
    )
    if isinstance(s, bytes):
        return s
    elif isinstance(s, str):
        return s.encode(encoding, errors)
    else:
        raise TypeError(f"Expected unicode or bytes, got {s!r}")


def cast_unicode(s, encoding='utf8', errors='strict'):
    """cast bytes or unicode to unicode"""
    warnings.warn(
        "zmq.utils.strtypes is deprecated in pyzmq 23.",
        DeprecationWarning,
        stacklevel=2,
    )
    if isinstance(s, bytes):
        return s.decode(encoding, errors)
    elif isinstance(s, str):
        return s
    else:
        raise TypeError(f"Expected unicode or bytes, got {s!r}")


# give short 'b' alias for cast_bytes, so that we can use fake b'stuff'
# to simulate b'stuff'
b = asbytes = cast_bytes
u = cast_unicode

__all__ = [
    'asbytes',
    'bytes',
    'unicode',
    'basestring',
    'b',
    'u',
    'cast_bytes',
    'cast_unicode',
]
pyzmq-26.4.0/zmq/utils/win32.py000066400000000000000000000115141477374370200162660ustar00rootroot00000000000000"""Win32 compatibility utilities."""

# -----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
# -----------------------------------------------------------------------------
from __future__ import annotations

import os
from typing import Any, Callable


class allow_interrupt:
    """Utility for fixing CTRL-C events on Windows.

    On Windows, the Python interpreter intercepts CTRL-C events in order to
    translate them into ``KeyboardInterrupt`` exceptions.  It (presumably)
    does this by setting a flag in its "console control handler" and
    checking it later at a convenient location in the interpreter.

    However, when the Python interpreter is blocked waiting for the ZMQ
    poll operation to complete, it must wait for ZMQ's ``select()``
    operation to complete before translating the CTRL-C event into the
    ``KeyboardInterrupt`` exception.

    The only way to fix this seems to be to add our own "console control
    handler" and perform some application-defined operation that will
    unblock the ZMQ polling operation in order to force ZMQ to pass control
    back to the Python interpreter.

    This context manager performs all that Windows-y stuff, providing you
    with a hook that is called when a CTRL-C event is intercepted.  This
    hook allows you to unblock your ZMQ poll operation immediately, which
    will then result in the expected ``KeyboardInterrupt`` exception.

    Without this context manager, your ZMQ-based application will not
    respond normally to CTRL-C events on Windows.  If a CTRL-C event occurs
    while blocked on ZMQ socket polling, the translation to a
    ``KeyboardInterrupt`` exception will be delayed until the I/O completes
    and control returns to the Python interpreter (this may never happen if
    you use an infinite timeout).

    A no-op implementation is provided on non-Win32 systems to avoid the
    application from having to conditionally use it.

    Example usage:

    .. sourcecode:: python

       def stop_my_application():
           # ...

       with allow_interrupt(stop_my_application):
           # main polling loop.

    In a typical ZMQ application, you would use the "self pipe trick" to
    send message to a ``PAIR`` socket in order to interrupt your blocking
    socket polling operation.

    In a Tornado event loop, you can use the ``IOLoop.stop`` method to
    unblock your I/O loop.
    """

    def __init__(self, action: Callable[[], Any] | None = None) -> None:
        """Translate ``action`` into a CTRL-C handler.

        ``action`` is a callable that takes no arguments and returns no
        value (returned value is ignored).  It must *NEVER* raise an
        exception.

        If unspecified, a no-op will be used.
        """
        if os.name != "nt":
            return
        self._init_action(action)

    def _init_action(self, action):
        from ctypes import WINFUNCTYPE, windll
        from ctypes.wintypes import BOOL, DWORD

        kernel32 = windll.LoadLibrary('kernel32')

        # 
        PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD)
        SetConsoleCtrlHandler = self._SetConsoleCtrlHandler = (
            kernel32.SetConsoleCtrlHandler
        )
        SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL)
        SetConsoleCtrlHandler.restype = BOOL

        if action is None:

            def action():
                return None

        self.action = action

        @PHANDLER_ROUTINE
        def handle(event):
            if event == 0:  # CTRL_C_EVENT
                action()
                # Typical C implementations would return 1 to indicate that
                # the event was processed and other control handlers in the
                # stack should not be executed.  However, that would
                # prevent the Python interpreter's handler from translating
                # CTRL-C to a `KeyboardInterrupt` exception, so we pretend
                # that we didn't handle it.
            return 0

        self.handle = handle

    def __enter__(self):
        """Install the custom CTRL-C handler."""
        if os.name != "nt":
            return
        result = self._SetConsoleCtrlHandler(self.handle, 1)
        if result == 0:
            # Have standard library automatically call `GetLastError()` and
            # `FormatMessage()` into a nice exception object :-)
            raise OSError()

    def __exit__(self, *args):
        """Remove the custom CTRL-C handler."""
        if os.name != "nt":
            return
        result = self._SetConsoleCtrlHandler(self.handle, 0)
        if result == 0:
            # Have standard library automatically call `GetLastError()` and
            # `FormatMessage()` into a nice exception object :-)
            raise OSError()
pyzmq-26.4.0/zmq/utils/z85.py000066400000000000000000000034561477374370200157600ustar00rootroot00000000000000"""Python implementation of Z85 85-bit encoding

Z85 encoding is a plaintext encoding for a bytestring interpreted as 32bit integers.
Since the chunks are 32bit, a bytestring must be a multiple of 4 bytes.
See ZMQ RFC 32 for details.


"""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations

import struct

# Z85CHARS is the base 85 symbol table
Z85CHARS = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#"
# Z85MAP maps integers in [0,84] to the appropriate character in Z85CHARS
Z85MAP = {c: idx for idx, c in enumerate(Z85CHARS)}

_85s = [85**i for i in range(5)][::-1]


def encode(rawbytes):
    """encode raw bytes into Z85"""
    # Accepts only byte arrays bounded to 4 bytes
    if len(rawbytes) % 4:
        raise ValueError(f"length must be multiple of 4, not {len(rawbytes)}")

    nvalues = len(rawbytes) // 4
    values = struct.unpack(f'>{nvalues:d}I', rawbytes)
    encoded = []
    for v in values:
        for offset in _85s:
            encoded.append(Z85CHARS[(v // offset) % 85])

    return bytes(encoded)


def decode(z85bytes):
    """decode Z85 bytes to raw bytes, accepts ASCII string"""
    if isinstance(z85bytes, str):
        try:
            z85bytes = z85bytes.encode('ascii')
        except UnicodeEncodeError:
            raise ValueError('string argument should contain only ASCII characters')

    if len(z85bytes) % 5:
        raise ValueError(f"Z85 length must be multiple of 5, not {len(z85bytes)}")

    nvalues = len(z85bytes) // 5
    values = []
    for i in range(0, len(z85bytes), 5):
        value = 0
        for j, offset in enumerate(_85s):
            value += Z85MAP[z85bytes[i + j]] * offset
        values.append(value)
    return struct.pack(f'>{nvalues:d}I', *values)
pyzmq-26.4.0/zmq/utils/zmq_compat.h000066400000000000000000000044431477374370200173000ustar00rootroot00000000000000//-----------------------------------------------------------------------------
//  Copyright (c) 2010 Brian Granger, Min Ragan-Kelley
//
//  Distributed under the terms of the New BSD License.  The full license is in
//  the file LICENSE.BSD, distributed as part of this software.
//-----------------------------------------------------------------------------

#pragma once

#if defined(_MSC_VER)
#define pyzmq_int64_t __int64
#define pyzmq_uint32_t unsigned __int32
#else
#include 
#define pyzmq_int64_t int64_t
#define pyzmq_uint32_t uint32_t
#endif


#include "zmq.h"

#define _missing (-1)

#if (ZMQ_VERSION >= 40303)
    // libzmq >= 4.3.3 defines zmq_fd_t for us
    #define ZMQ_FD_T zmq_fd_t
#else
    #ifdef _WIN32
        #if defined(_MSC_VER) && _MSC_VER <= 1400
            #define ZMQ_FD_T UINT_PTR
        #else
            #define ZMQ_FD_T SOCKET
        #endif
    #else
        #define ZMQ_FD_T int
    #endif
#endif

#if (ZMQ_VERSION >= 40200)
    // Nothing to remove
#else
    #define zmq_curve_public(z85_public_key, z85_secret_key) _missing
#endif

// use unambiguous aliases for zmq_send/recv functions

#if ZMQ_VERSION_MAJOR >= 4
// nothing to remove
    #if ZMQ_VERSION_MAJOR == 4 && ZMQ_VERSION_MINOR == 0
        // zmq 4.1 deprecates zmq_utils.h
        // we only get zmq_curve_keypair from it
        #include "zmq_utils.h"
    #endif
#else
    #define zmq_curve_keypair(z85_public_key, z85_secret_key) _missing
#endif

// libzmq 4.2 draft API
#ifdef ZMQ_BUILD_DRAFT_API
    #if ZMQ_VERSION >= 40200
        #define PYZMQ_DRAFT_42
    #endif
#endif
#ifndef PYZMQ_DRAFT_42
    #define zmq_join(s, group) _missing
    #define zmq_leave(s, group) _missing
    #define zmq_msg_set_routing_id(msg, routing_id) _missing
    #define zmq_msg_routing_id(msg) 0
    #define zmq_msg_set_group(msg, group) _missing
    #define zmq_msg_group(msg) NULL
#endif

#if ZMQ_VERSION >= 40100
// nothing to remove
#else
    #define zmq_msg_gets(msg, prop) _missing
    #define zmq_has(capability) _missing
    #define zmq_proxy_steerable(in, out, mon, ctrl) _missing
#endif

// 3.x deprecations - these symbols haven't been removed,
// but let's protect against their planned removal
#define zmq_device(device_type, isocket, osocket) _missing
#define zmq_init(io_threads) ((void*)NULL)
#define zmq_term zmq_ctx_destroy
pyzmq-26.4.0/zmqversion.py000066400000000000000000000076761477374370200156100ustar00rootroot00000000000000"""A simply script to scrape zmq.h for the zeromq version.
This is similar to the version.sh script in a zeromq source dir, but
it searches for an installed header, rather than in the current dir.
"""

# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import os
import re
import sys
import traceback
from configparser import ConfigParser
from warnings import warn

pjoin = os.path.join

MAJOR_PAT = '^#define +ZMQ_VERSION_MAJOR +[0-9]+$'
MINOR_PAT = '^#define +ZMQ_VERSION_MINOR +[0-9]+$'
PATCH_PAT = '^#define +ZMQ_VERSION_PATCH +[0-9]+$'


def include_dirs_from_path():
    """Check the exec path for include dirs."""
    include_dirs = []
    for p in os.environ['PATH'].split(os.path.pathsep):
        if p.endswith('/'):
            p = p[:-1]
        if p.endswith('bin'):
            include_dirs.append(p[:-3] + 'include')
    return include_dirs


def default_include_dirs():
    """Default to just /usr/local/include:/usr/include"""
    return ['/usr/local/include', '/usr/include']


def find_zmq_version():
    """check setup.cfg, then /usr/local/include, then /usr/include for zmq.h.
    Then scrape zmq.h for the version tuple.

    Returns
    -------
        ((major,minor,patch), "/path/to/zmq.h")"""
    include_dirs = []

    if os.path.exists('setup.cfg'):
        cfg = ConfigParser()
        cfg.read('setup.cfg')
        if 'build_ext' in cfg.sections():
            items = cfg.items('build_ext')
            for name, val in items:
                if name == 'include_dirs':
                    include_dirs = val.split(os.path.pathsep)

    if not include_dirs:
        include_dirs = default_include_dirs()

    for include in include_dirs:
        zmq_h = pjoin(include, 'zmq.h')
        if os.path.isfile(zmq_h):
            with open(zmq_h) as f:
                contents = f.read()
        else:
            continue

        line = re.findall(MAJOR_PAT, contents, re.MULTILINE)[0]
        major = int(re.findall('[0-9]+', line)[0])
        line = re.findall(MINOR_PAT, contents, re.MULTILINE)[0]
        minor = int(re.findall('[0-9]+', line)[0])
        line = re.findall(PATCH_PAT, contents, re.MULTILINE)[0]
        patch = int(re.findall('[0-9]+', line)[0])
        return ((major, minor, patch), zmq_h)

    raise OSError("Couldn't find zmq.h")


def ver_str(version):
    """version tuple as string"""
    return '.'.join(map(str, version))


def check_zmq_version(min_version):
    """Check that zmq.h has an appropriate version."""
    sv = ver_str(min_version)
    try:
        found, zmq_h = find_zmq_version()
        sf = ver_str(found)
        if found < min_version:
            print(f"This pyzmq requires zeromq >= {sv}")
            print(f"but it appears you are building against {zmq_h}")
            print(f"which has zeromq {sf}")
            sys.exit(1)
    except OSError:
        msg = '\n'.join(
            [
                "Couldn't find zmq.h to check for version compatibility.",
                "If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
                f"This pyzmq requires zeromq >= {sv}",
            ]
        )
        warn(msg)
    except IndexError:
        msg = '\n'.join(
            [
                "Couldn't find ZMQ_VERSION macros in zmq.h to check for version compatibility.",
                "This probably means that you have ZeroMQ <= 2.0.9",
                "If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
                f"This pyzmq requires zeromq >= {sv}",
            ]
        )
        warn(msg)
    except Exception:
        traceback.print_exc()
        msg = '\n'.join(
            [
                "Unexpected Error checking for zmq version.",
                "If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
                f"This pyzmq requires zeromq >= {sv}",
            ]
        )
        warn(msg)


if __name__ == '__main__':
    v, h = find_zmq_version()
    print(h)
    print(ver_str(v))